]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
aarch64: Add support for Armv8-R DFB alias
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
39 static enum map_type last_type;
40 static int last_mapping_sym = -1;
41 static bfd_vma last_stop_offset = 0;
42 static bfd_vma last_mapping_addr = 0;
43
44 /* Other options */
45 static int no_aliases = 0; /* If set disassemble as most general inst. */
46 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
47 output as comments. */
48
49 /* Currently active instruction sequence. */
50 static aarch64_instr_sequence insn_sequence;
51
52 static void
53 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
54 {
55 }
56
57 static void
58 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 {
60 /* Try to match options that are simple flags */
61 if (CONST_STRNEQ (option, "no-aliases"))
62 {
63 no_aliases = 1;
64 return;
65 }
66
67 if (CONST_STRNEQ (option, "aliases"))
68 {
69 no_aliases = 0;
70 return;
71 }
72
73 if (CONST_STRNEQ (option, "no-notes"))
74 {
75 no_notes = 1;
76 return;
77 }
78
79 if (CONST_STRNEQ (option, "notes"))
80 {
81 no_notes = 0;
82 return;
83 }
84
85 #ifdef DEBUG_AARCH64
86 if (CONST_STRNEQ (option, "debug_dump"))
87 {
88 debug_dump = 1;
89 return;
90 }
91 #endif /* DEBUG_AARCH64 */
92
93 /* Invalid option. */
94 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
95 }
96
97 static void
98 parse_aarch64_dis_options (const char *options)
99 {
100 const char *option_end;
101
102 if (options == NULL)
103 return;
104
105 while (*options != '\0')
106 {
107 /* Skip empty options. */
108 if (*options == ',')
109 {
110 options++;
111 continue;
112 }
113
114 /* We know that *options is neither NUL or a comma. */
115 option_end = options + 1;
116 while (*option_end != ',' && *option_end != '\0')
117 option_end++;
118
119 parse_aarch64_dis_option (options, option_end - options);
120
121 /* Go on to the next one. If option_end points to a comma, it
122 will be skipped above. */
123 options = option_end;
124 }
125 }
126 \f
127 /* Functions doing the instruction disassembling. */
128
129 /* The unnamed arguments consist of the number of fields and information about
130 these fields where the VALUE will be extracted from CODE and returned.
131 MASK can be zero or the base mask of the opcode.
132
133 N.B. the fields are required to be in such an order than the most signficant
134 field for VALUE comes the first, e.g. the <index> in
135 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
136 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
137 the order of H, L, M. */
138
139 aarch64_insn
140 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
141 {
142 uint32_t num;
143 const aarch64_field *field;
144 enum aarch64_field_kind kind;
145 va_list va;
146
147 va_start (va, mask);
148 num = va_arg (va, uint32_t);
149 assert (num <= 5);
150 aarch64_insn value = 0x0;
151 while (num--)
152 {
153 kind = va_arg (va, enum aarch64_field_kind);
154 field = &fields[kind];
155 value <<= field->width;
156 value |= extract_field (kind, code, mask);
157 }
158 return value;
159 }
160
161 /* Extract the value of all fields in SELF->fields from instruction CODE.
162 The least significant bit comes from the final field. */
163
164 static aarch64_insn
165 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
166 {
167 aarch64_insn value;
168 unsigned int i;
169 enum aarch64_field_kind kind;
170
171 value = 0;
172 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
173 {
174 kind = self->fields[i];
175 value <<= fields[kind].width;
176 value |= extract_field (kind, code, 0);
177 }
178 return value;
179 }
180
181 /* Sign-extend bit I of VALUE. */
182 static inline uint64_t
183 sign_extend (aarch64_insn value, unsigned i)
184 {
185 uint64_t ret, sign;
186
187 assert (i < 32);
188 ret = value;
189 sign = (uint64_t) 1 << i;
190 return ((ret & (sign + sign - 1)) ^ sign) - sign;
191 }
192
193 /* N.B. the following inline helpfer functions create a dependency on the
194 order of operand qualifier enumerators. */
195
196 /* Given VALUE, return qualifier for a general purpose register. */
197 static inline enum aarch64_opnd_qualifier
198 get_greg_qualifier_from_value (aarch64_insn value)
199 {
200 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
201 assert (value <= 0x1
202 && aarch64_get_qualifier_standard_value (qualifier) == value);
203 return qualifier;
204 }
205
206 /* Given VALUE, return qualifier for a vector register. This does not support
207 decoding instructions that accept the 2H vector type. */
208
209 static inline enum aarch64_opnd_qualifier
210 get_vreg_qualifier_from_value (aarch64_insn value)
211 {
212 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
213
214 /* Instructions using vector type 2H should not call this function. Skip over
215 the 2H qualifier. */
216 if (qualifier >= AARCH64_OPND_QLF_V_2H)
217 qualifier += 1;
218
219 assert (value <= 0x8
220 && aarch64_get_qualifier_standard_value (qualifier) == value);
221 return qualifier;
222 }
223
224 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
225 static inline enum aarch64_opnd_qualifier
226 get_sreg_qualifier_from_value (aarch64_insn value)
227 {
228 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
229
230 assert (value <= 0x4
231 && aarch64_get_qualifier_standard_value (qualifier) == value);
232 return qualifier;
233 }
234
235 /* Given the instruction in *INST which is probably half way through the
236 decoding and our caller wants to know the expected qualifier for operand
237 I. Return such a qualifier if we can establish it; otherwise return
238 AARCH64_OPND_QLF_NIL. */
239
240 static aarch64_opnd_qualifier_t
241 get_expected_qualifier (const aarch64_inst *inst, int i)
242 {
243 aarch64_opnd_qualifier_seq_t qualifiers;
244 /* Should not be called if the qualifier is known. */
245 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
246 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
247 i, qualifiers))
248 return qualifiers[i];
249 else
250 return AARCH64_OPND_QLF_NIL;
251 }
252
253 /* Operand extractors. */
254
255 bfd_boolean
256 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
257 aarch64_opnd_info *info ATTRIBUTE_UNUSED,
258 const aarch64_insn code ATTRIBUTE_UNUSED,
259 const aarch64_inst *inst ATTRIBUTE_UNUSED,
260 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
261 {
262 return TRUE;
263 }
264
265 bfd_boolean
266 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
267 const aarch64_insn code,
268 const aarch64_inst *inst ATTRIBUTE_UNUSED,
269 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 return TRUE;
273 }
274
275 bfd_boolean
276 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
277 const aarch64_insn code ATTRIBUTE_UNUSED,
278 const aarch64_inst *inst ATTRIBUTE_UNUSED,
279 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
280 {
281 assert (info->idx == 1
282 || info->idx ==3);
283 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
284 return TRUE;
285 }
286
287 /* e.g. IC <ic_op>{, <Xt>}. */
288 bfd_boolean
289 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
290 const aarch64_insn code,
291 const aarch64_inst *inst ATTRIBUTE_UNUSED,
292 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
293 {
294 info->reg.regno = extract_field (self->fields[0], code, 0);
295 assert (info->idx == 1
296 && (aarch64_get_operand_class (inst->operands[0].type)
297 == AARCH64_OPND_CLASS_SYSTEM));
298 /* This will make the constraint checking happy and more importantly will
299 help the disassembler determine whether this operand is optional or
300 not. */
301 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
302
303 return TRUE;
304 }
305
306 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
307 bfd_boolean
308 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
309 const aarch64_insn code,
310 const aarch64_inst *inst ATTRIBUTE_UNUSED,
311 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
312 {
313 /* regno */
314 info->reglane.regno = extract_field (self->fields[0], code,
315 inst->opcode->mask);
316
317 /* Index and/or type. */
318 if (inst->opcode->iclass == asisdone
319 || inst->opcode->iclass == asimdins)
320 {
321 if (info->type == AARCH64_OPND_En
322 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
323 {
324 unsigned shift;
325 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
326 assert (info->idx == 1); /* Vn */
327 aarch64_insn value = extract_field (FLD_imm4, code, 0);
328 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
329 info->qualifier = get_expected_qualifier (inst, info->idx);
330 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
331 info->reglane.index = value >> shift;
332 }
333 else
334 {
335 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
336 imm5<3:0> <V>
337 0000 RESERVED
338 xxx1 B
339 xx10 H
340 x100 S
341 1000 D */
342 int pos = -1;
343 aarch64_insn value = extract_field (FLD_imm5, code, 0);
344 while (++pos <= 3 && (value & 0x1) == 0)
345 value >>= 1;
346 if (pos > 3)
347 return FALSE;
348 info->qualifier = get_sreg_qualifier_from_value (pos);
349 info->reglane.index = (unsigned) (value >> 1);
350 }
351 }
352 else if (inst->opcode->iclass == dotproduct)
353 {
354 /* Need information in other operand(s) to help decoding. */
355 info->qualifier = get_expected_qualifier (inst, info->idx);
356 switch (info->qualifier)
357 {
358 case AARCH64_OPND_QLF_S_4B:
359 case AARCH64_OPND_QLF_S_2H:
360 /* L:H */
361 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
362 info->reglane.regno &= 0x1f;
363 break;
364 default:
365 return FALSE;
366 }
367 }
368 else if (inst->opcode->iclass == cryptosm3)
369 {
370 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
371 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
372 }
373 else
374 {
375 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
376 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
377
378 /* Need information in other operand(s) to help decoding. */
379 info->qualifier = get_expected_qualifier (inst, info->idx);
380 switch (info->qualifier)
381 {
382 case AARCH64_OPND_QLF_S_H:
383 if (info->type == AARCH64_OPND_Em16)
384 {
385 /* h:l:m */
386 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
387 FLD_M);
388 info->reglane.regno &= 0xf;
389 }
390 else
391 {
392 /* h:l */
393 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
394 }
395 break;
396 case AARCH64_OPND_QLF_S_S:
397 /* h:l */
398 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
399 break;
400 case AARCH64_OPND_QLF_S_D:
401 /* H */
402 info->reglane.index = extract_field (FLD_H, code, 0);
403 break;
404 default:
405 return FALSE;
406 }
407
408 if (inst->opcode->op == OP_FCMLA_ELEM
409 && info->qualifier != AARCH64_OPND_QLF_S_H)
410 {
411 /* Complex operand takes two elements. */
412 if (info->reglane.index & 1)
413 return FALSE;
414 info->reglane.index /= 2;
415 }
416 }
417
418 return TRUE;
419 }
420
421 bfd_boolean
422 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
423 const aarch64_insn code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED,
425 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
426 {
427 /* R */
428 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
429 /* len */
430 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
431 return TRUE;
432 }
433
434 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
435 bfd_boolean
436 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
437 aarch64_opnd_info *info, const aarch64_insn code,
438 const aarch64_inst *inst,
439 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
440 {
441 aarch64_insn value;
442 /* Number of elements in each structure to be loaded/stored. */
443 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
444
445 struct
446 {
447 unsigned is_reserved;
448 unsigned num_regs;
449 unsigned num_elements;
450 } data [] =
451 { {0, 4, 4},
452 {1, 4, 4},
453 {0, 4, 1},
454 {0, 4, 2},
455 {0, 3, 3},
456 {1, 3, 3},
457 {0, 3, 1},
458 {0, 1, 1},
459 {0, 2, 2},
460 {1, 2, 2},
461 {0, 2, 1},
462 };
463
464 /* Rt */
465 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
466 /* opcode */
467 value = extract_field (FLD_opcode, code, 0);
468 /* PR 21595: Check for a bogus value. */
469 if (value >= ARRAY_SIZE (data))
470 return FALSE;
471 if (expected_num != data[value].num_elements || data[value].is_reserved)
472 return FALSE;
473 info->reglist.num_regs = data[value].num_regs;
474
475 return TRUE;
476 }
477
478 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
479 lanes instructions. */
480 bfd_boolean
481 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
482 aarch64_opnd_info *info, const aarch64_insn code,
483 const aarch64_inst *inst,
484 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
485 {
486 aarch64_insn value;
487
488 /* Rt */
489 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
490 /* S */
491 value = extract_field (FLD_S, code, 0);
492
493 /* Number of registers is equal to the number of elements in
494 each structure to be loaded/stored. */
495 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
496 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
497
498 /* Except when it is LD1R. */
499 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
500 info->reglist.num_regs = 2;
501
502 return TRUE;
503 }
504
505 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
506 load/store single element instructions. */
507 bfd_boolean
508 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
509 aarch64_opnd_info *info, const aarch64_insn code,
510 const aarch64_inst *inst ATTRIBUTE_UNUSED,
511 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
512 {
513 aarch64_field field = {0, 0};
514 aarch64_insn QSsize; /* fields Q:S:size. */
515 aarch64_insn opcodeh2; /* opcode<2:1> */
516
517 /* Rt */
518 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
519
520 /* Decode the index, opcode<2:1> and size. */
521 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
522 opcodeh2 = extract_field_2 (&field, code, 0);
523 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
524 switch (opcodeh2)
525 {
526 case 0x0:
527 info->qualifier = AARCH64_OPND_QLF_S_B;
528 /* Index encoded in "Q:S:size". */
529 info->reglist.index = QSsize;
530 break;
531 case 0x1:
532 if (QSsize & 0x1)
533 /* UND. */
534 return FALSE;
535 info->qualifier = AARCH64_OPND_QLF_S_H;
536 /* Index encoded in "Q:S:size<1>". */
537 info->reglist.index = QSsize >> 1;
538 break;
539 case 0x2:
540 if ((QSsize >> 1) & 0x1)
541 /* UND. */
542 return FALSE;
543 if ((QSsize & 0x1) == 0)
544 {
545 info->qualifier = AARCH64_OPND_QLF_S_S;
546 /* Index encoded in "Q:S". */
547 info->reglist.index = QSsize >> 2;
548 }
549 else
550 {
551 if (extract_field (FLD_S, code, 0))
552 /* UND */
553 return FALSE;
554 info->qualifier = AARCH64_OPND_QLF_S_D;
555 /* Index encoded in "Q". */
556 info->reglist.index = QSsize >> 3;
557 }
558 break;
559 default:
560 return FALSE;
561 }
562
563 info->reglist.has_index = 1;
564 info->reglist.num_regs = 0;
565 /* Number of registers is equal to the number of elements in
566 each structure to be loaded/stored. */
567 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
568 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
569
570 return TRUE;
571 }
572
573 /* Decode fields immh:immb and/or Q for e.g.
574 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
575 or SSHR <V><d>, <V><n>, #<shift>. */
576
577 bfd_boolean
578 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
579 aarch64_opnd_info *info, const aarch64_insn code,
580 const aarch64_inst *inst,
581 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582 {
583 int pos;
584 aarch64_insn Q, imm, immh;
585 enum aarch64_insn_class iclass = inst->opcode->iclass;
586
587 immh = extract_field (FLD_immh, code, 0);
588 if (immh == 0)
589 return FALSE;
590 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
591 pos = 4;
592 /* Get highest set bit in immh. */
593 while (--pos >= 0 && (immh & 0x8) == 0)
594 immh <<= 1;
595
596 assert ((iclass == asimdshf || iclass == asisdshf)
597 && (info->type == AARCH64_OPND_IMM_VLSR
598 || info->type == AARCH64_OPND_IMM_VLSL));
599
600 if (iclass == asimdshf)
601 {
602 Q = extract_field (FLD_Q, code, 0);
603 /* immh Q <T>
604 0000 x SEE AdvSIMD modified immediate
605 0001 0 8B
606 0001 1 16B
607 001x 0 4H
608 001x 1 8H
609 01xx 0 2S
610 01xx 1 4S
611 1xxx 0 RESERVED
612 1xxx 1 2D */
613 info->qualifier =
614 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
615 }
616 else
617 info->qualifier = get_sreg_qualifier_from_value (pos);
618
619 if (info->type == AARCH64_OPND_IMM_VLSR)
620 /* immh <shift>
621 0000 SEE AdvSIMD modified immediate
622 0001 (16-UInt(immh:immb))
623 001x (32-UInt(immh:immb))
624 01xx (64-UInt(immh:immb))
625 1xxx (128-UInt(immh:immb)) */
626 info->imm.value = (16 << pos) - imm;
627 else
628 /* immh:immb
629 immh <shift>
630 0000 SEE AdvSIMD modified immediate
631 0001 (UInt(immh:immb)-8)
632 001x (UInt(immh:immb)-16)
633 01xx (UInt(immh:immb)-32)
634 1xxx (UInt(immh:immb)-64) */
635 info->imm.value = imm - (8 << pos);
636
637 return TRUE;
638 }
639
640 /* Decode shift immediate for e.g. sshr (imm). */
641 bfd_boolean
642 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
643 aarch64_opnd_info *info, const aarch64_insn code,
644 const aarch64_inst *inst ATTRIBUTE_UNUSED,
645 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
646 {
647 int64_t imm;
648 aarch64_insn val;
649 val = extract_field (FLD_size, code, 0);
650 switch (val)
651 {
652 case 0: imm = 8; break;
653 case 1: imm = 16; break;
654 case 2: imm = 32; break;
655 default: return FALSE;
656 }
657 info->imm.value = imm;
658 return TRUE;
659 }
660
661 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
662 value in the field(s) will be extracted as unsigned immediate value. */
663 bfd_boolean
664 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
665 const aarch64_insn code,
666 const aarch64_inst *inst ATTRIBUTE_UNUSED,
667 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
668 {
669 uint64_t imm;
670
671 imm = extract_all_fields (self, code);
672
673 if (operand_need_sign_extension (self))
674 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
675
676 if (operand_need_shift_by_two (self))
677 imm <<= 2;
678 else if (operand_need_shift_by_four (self))
679 imm <<= 4;
680
681 if (info->type == AARCH64_OPND_ADDR_ADRP)
682 imm <<= 12;
683
684 info->imm.value = imm;
685 return TRUE;
686 }
687
688 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
689 bfd_boolean
690 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
691 const aarch64_insn code,
692 const aarch64_inst *inst ATTRIBUTE_UNUSED,
693 aarch64_operand_error *errors)
694 {
695 aarch64_ext_imm (self, info, code, inst, errors);
696 info->shifter.kind = AARCH64_MOD_LSL;
697 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
698 return TRUE;
699 }
700
701 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
702 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
703 bfd_boolean
704 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
705 aarch64_opnd_info *info,
706 const aarch64_insn code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED,
708 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
709 {
710 uint64_t imm;
711 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
712 aarch64_field field = {0, 0};
713
714 assert (info->idx == 1);
715
716 if (info->type == AARCH64_OPND_SIMD_FPIMM)
717 info->imm.is_fp = 1;
718
719 /* a:b:c:d:e:f:g:h */
720 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
721 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
722 {
723 /* Either MOVI <Dd>, #<imm>
724 or MOVI <Vd>.2D, #<imm>.
725 <imm> is a 64-bit immediate
726 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
727 encoded in "a:b:c:d:e:f:g:h". */
728 int i;
729 unsigned abcdefgh = imm;
730 for (imm = 0ull, i = 0; i < 8; i++)
731 if (((abcdefgh >> i) & 0x1) != 0)
732 imm |= 0xffull << (8 * i);
733 }
734 info->imm.value = imm;
735
736 /* cmode */
737 info->qualifier = get_expected_qualifier (inst, info->idx);
738 switch (info->qualifier)
739 {
740 case AARCH64_OPND_QLF_NIL:
741 /* no shift */
742 info->shifter.kind = AARCH64_MOD_NONE;
743 return 1;
744 case AARCH64_OPND_QLF_LSL:
745 /* shift zeros */
746 info->shifter.kind = AARCH64_MOD_LSL;
747 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
748 {
749 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
750 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
751 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
752 default: assert (0); return FALSE;
753 }
754 /* 00: 0; 01: 8; 10:16; 11:24. */
755 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
756 break;
757 case AARCH64_OPND_QLF_MSL:
758 /* shift ones */
759 info->shifter.kind = AARCH64_MOD_MSL;
760 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
761 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
762 break;
763 default:
764 assert (0);
765 return FALSE;
766 }
767
768 return TRUE;
769 }
770
771 /* Decode an 8-bit floating-point immediate. */
772 bfd_boolean
773 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
774 const aarch64_insn code,
775 const aarch64_inst *inst ATTRIBUTE_UNUSED,
776 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
777 {
778 info->imm.value = extract_all_fields (self, code);
779 info->imm.is_fp = 1;
780 return TRUE;
781 }
782
783 /* Decode a 1-bit rotate immediate (#90 or #270). */
784 bfd_boolean
785 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
786 const aarch64_insn code,
787 const aarch64_inst *inst ATTRIBUTE_UNUSED,
788 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
789 {
790 uint64_t rot = extract_field (self->fields[0], code, 0);
791 assert (rot < 2U);
792 info->imm.value = rot * 180 + 90;
793 return TRUE;
794 }
795
796 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
797 bfd_boolean
798 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
799 const aarch64_insn code,
800 const aarch64_inst *inst ATTRIBUTE_UNUSED,
801 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
802 {
803 uint64_t rot = extract_field (self->fields[0], code, 0);
804 assert (rot < 4U);
805 info->imm.value = rot * 90;
806 return TRUE;
807 }
808
809 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
810 bfd_boolean
811 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
812 aarch64_opnd_info *info, const aarch64_insn code,
813 const aarch64_inst *inst ATTRIBUTE_UNUSED,
814 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
815 {
816 info->imm.value = 64- extract_field (FLD_scale, code, 0);
817 return TRUE;
818 }
819
820 /* Decode arithmetic immediate for e.g.
821 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
822 bfd_boolean
823 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
824 aarch64_opnd_info *info, const aarch64_insn code,
825 const aarch64_inst *inst ATTRIBUTE_UNUSED,
826 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
827 {
828 aarch64_insn value;
829
830 info->shifter.kind = AARCH64_MOD_LSL;
831 /* shift */
832 value = extract_field (FLD_shift, code, 0);
833 if (value >= 2)
834 return FALSE;
835 info->shifter.amount = value ? 12 : 0;
836 /* imm12 (unsigned) */
837 info->imm.value = extract_field (FLD_imm12, code, 0);
838
839 return TRUE;
840 }
841
842 /* Return true if VALUE is a valid logical immediate encoding, storing the
843 decoded value in *RESULT if so. ESIZE is the number of bytes in the
844 decoded immediate. */
845 static bfd_boolean
846 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
847 {
848 uint64_t imm, mask;
849 uint32_t N, R, S;
850 unsigned simd_size;
851
852 /* value is N:immr:imms. */
853 S = value & 0x3f;
854 R = (value >> 6) & 0x3f;
855 N = (value >> 12) & 0x1;
856
857 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
858 (in other words, right rotated by R), then replicated. */
859 if (N != 0)
860 {
861 simd_size = 64;
862 mask = 0xffffffffffffffffull;
863 }
864 else
865 {
866 switch (S)
867 {
868 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
869 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
870 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
871 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
872 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
873 default: return FALSE;
874 }
875 mask = (1ull << simd_size) - 1;
876 /* Top bits are IGNORED. */
877 R &= simd_size - 1;
878 }
879
880 if (simd_size > esize * 8)
881 return FALSE;
882
883 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
884 if (S == simd_size - 1)
885 return FALSE;
886 /* S+1 consecutive bits to 1. */
887 /* NOTE: S can't be 63 due to detection above. */
888 imm = (1ull << (S + 1)) - 1;
889 /* Rotate to the left by simd_size - R. */
890 if (R != 0)
891 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
892 /* Replicate the value according to SIMD size. */
893 switch (simd_size)
894 {
895 case 2: imm = (imm << 2) | imm;
896 /* Fall through. */
897 case 4: imm = (imm << 4) | imm;
898 /* Fall through. */
899 case 8: imm = (imm << 8) | imm;
900 /* Fall through. */
901 case 16: imm = (imm << 16) | imm;
902 /* Fall through. */
903 case 32: imm = (imm << 32) | imm;
904 /* Fall through. */
905 case 64: break;
906 default: assert (0); return 0;
907 }
908
909 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
910
911 return TRUE;
912 }
913
914 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
915 bfd_boolean
916 aarch64_ext_limm (const aarch64_operand *self,
917 aarch64_opnd_info *info, const aarch64_insn code,
918 const aarch64_inst *inst,
919 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
920 {
921 uint32_t esize;
922 aarch64_insn value;
923
924 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
925 self->fields[2]);
926 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
927 return decode_limm (esize, value, &info->imm.value);
928 }
929
930 /* Decode a logical immediate for the BIC alias of AND (etc.). */
931 bfd_boolean
932 aarch64_ext_inv_limm (const aarch64_operand *self,
933 aarch64_opnd_info *info, const aarch64_insn code,
934 const aarch64_inst *inst,
935 aarch64_operand_error *errors)
936 {
937 if (!aarch64_ext_limm (self, info, code, inst, errors))
938 return FALSE;
939 info->imm.value = ~info->imm.value;
940 return TRUE;
941 }
942
943 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
944 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
945 bfd_boolean
946 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
947 aarch64_opnd_info *info,
948 const aarch64_insn code, const aarch64_inst *inst,
949 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
950 {
951 aarch64_insn value;
952
953 /* Rt */
954 info->reg.regno = extract_field (FLD_Rt, code, 0);
955
956 /* size */
957 value = extract_field (FLD_ldst_size, code, 0);
958 if (inst->opcode->iclass == ldstpair_indexed
959 || inst->opcode->iclass == ldstnapair_offs
960 || inst->opcode->iclass == ldstpair_off
961 || inst->opcode->iclass == loadlit)
962 {
963 enum aarch64_opnd_qualifier qualifier;
964 switch (value)
965 {
966 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
967 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
968 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
969 default: return FALSE;
970 }
971 info->qualifier = qualifier;
972 }
973 else
974 {
975 /* opc1:size */
976 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
977 if (value > 0x4)
978 return FALSE;
979 info->qualifier = get_sreg_qualifier_from_value (value);
980 }
981
982 return TRUE;
983 }
984
985 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
986 bfd_boolean
987 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
988 aarch64_opnd_info *info,
989 aarch64_insn code,
990 const aarch64_inst *inst ATTRIBUTE_UNUSED,
991 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
992 {
993 /* Rn */
994 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
995 return TRUE;
996 }
997
998 /* Decode the address operand for e.g.
999 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1000 bfd_boolean
1001 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1002 aarch64_opnd_info *info,
1003 aarch64_insn code, const aarch64_inst *inst,
1004 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1005 {
1006 info->qualifier = get_expected_qualifier (inst, info->idx);
1007
1008 /* Rn */
1009 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1010
1011 /* simm9 */
1012 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1013 info->addr.offset.imm = sign_extend (imm, 8);
1014 if (extract_field (self->fields[2], code, 0) == 1) {
1015 info->addr.writeback = 1;
1016 info->addr.preind = 1;
1017 }
1018 return TRUE;
1019 }
1020
1021 /* Decode the address operand for e.g.
1022 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1023 bfd_boolean
1024 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1025 aarch64_opnd_info *info,
1026 aarch64_insn code, const aarch64_inst *inst,
1027 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1028 {
1029 aarch64_insn S, value;
1030
1031 /* Rn */
1032 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1033 /* Rm */
1034 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1035 /* option */
1036 value = extract_field (FLD_option, code, 0);
1037 info->shifter.kind =
1038 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1039 /* Fix-up the shifter kind; although the table-driven approach is
1040 efficient, it is slightly inflexible, thus needing this fix-up. */
1041 if (info->shifter.kind == AARCH64_MOD_UXTX)
1042 info->shifter.kind = AARCH64_MOD_LSL;
1043 /* S */
1044 S = extract_field (FLD_S, code, 0);
1045 if (S == 0)
1046 {
1047 info->shifter.amount = 0;
1048 info->shifter.amount_present = 0;
1049 }
1050 else
1051 {
1052 int size;
1053 /* Need information in other operand(s) to help achieve the decoding
1054 from 'S' field. */
1055 info->qualifier = get_expected_qualifier (inst, info->idx);
1056 /* Get the size of the data element that is accessed, which may be
1057 different from that of the source register size, e.g. in strb/ldrb. */
1058 size = aarch64_get_qualifier_esize (info->qualifier);
1059 info->shifter.amount = get_logsz (size);
1060 info->shifter.amount_present = 1;
1061 }
1062
1063 return TRUE;
1064 }
1065
1066 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1067 bfd_boolean
1068 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1069 aarch64_insn code, const aarch64_inst *inst,
1070 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1071 {
1072 aarch64_insn imm;
1073 info->qualifier = get_expected_qualifier (inst, info->idx);
1074
1075 /* Rn */
1076 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1077 /* simm (imm9 or imm7) */
1078 imm = extract_field (self->fields[0], code, 0);
1079 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1080 if (self->fields[0] == FLD_imm7
1081 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1082 /* scaled immediate in ld/st pair instructions. */
1083 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1084 /* qualifier */
1085 if (inst->opcode->iclass == ldst_unscaled
1086 || inst->opcode->iclass == ldstnapair_offs
1087 || inst->opcode->iclass == ldstpair_off
1088 || inst->opcode->iclass == ldst_unpriv)
1089 info->addr.writeback = 0;
1090 else
1091 {
1092 /* pre/post- index */
1093 info->addr.writeback = 1;
1094 if (extract_field (self->fields[1], code, 0) == 1)
1095 info->addr.preind = 1;
1096 else
1097 info->addr.postind = 1;
1098 }
1099
1100 return TRUE;
1101 }
1102
1103 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1104 bfd_boolean
1105 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1106 aarch64_insn code,
1107 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1108 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1109 {
1110 int shift;
1111 info->qualifier = get_expected_qualifier (inst, info->idx);
1112 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1113 /* Rn */
1114 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1115 /* uimm12 */
1116 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1117 return TRUE;
1118 }
1119
1120 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1121 bfd_boolean
1122 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1123 aarch64_insn code,
1124 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1125 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1126 {
1127 aarch64_insn imm;
1128
1129 info->qualifier = get_expected_qualifier (inst, info->idx);
1130 /* Rn */
1131 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1132 /* simm10 */
1133 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1134 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1135 if (extract_field (self->fields[3], code, 0) == 1) {
1136 info->addr.writeback = 1;
1137 info->addr.preind = 1;
1138 }
1139 return TRUE;
1140 }
1141
1142 /* Decode the address operand for e.g.
1143 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1144 bfd_boolean
1145 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1146 aarch64_opnd_info *info,
1147 aarch64_insn code, const aarch64_inst *inst,
1148 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1149 {
1150 /* The opcode dependent area stores the number of elements in
1151 each structure to be loaded/stored. */
1152 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1153
1154 /* Rn */
1155 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1156 /* Rm | #<amount> */
1157 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1158 if (info->addr.offset.regno == 31)
1159 {
1160 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1161 /* Special handling of loading single structure to all lane. */
1162 info->addr.offset.imm = (is_ld1r ? 1
1163 : inst->operands[0].reglist.num_regs)
1164 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1165 else
1166 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1167 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1168 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1169 }
1170 else
1171 info->addr.offset.is_reg = 1;
1172 info->addr.writeback = 1;
1173
1174 return TRUE;
1175 }
1176
1177 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1178 bfd_boolean
1179 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1180 aarch64_opnd_info *info,
1181 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1182 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1183 {
1184 aarch64_insn value;
1185 /* cond */
1186 value = extract_field (FLD_cond, code, 0);
1187 info->cond = get_cond_from_value (value);
1188 return TRUE;
1189 }
1190
1191 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1192 bfd_boolean
1193 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1194 aarch64_opnd_info *info,
1195 aarch64_insn code,
1196 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1197 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1198 {
1199 /* op0:op1:CRn:CRm:op2 */
1200 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1201 FLD_CRm, FLD_op2);
1202 info->sysreg.flags = 0;
1203
1204 /* If a system instruction, check which restrictions should be on the register
1205 value during decoding, these will be enforced then. */
1206 if (inst->opcode->iclass == ic_system)
1207 {
1208 /* Check to see if it's read-only, else check if it's write only.
1209 if it's both or unspecified don't care. */
1210 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1211 info->sysreg.flags = F_REG_READ;
1212 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1213 == F_SYS_WRITE)
1214 info->sysreg.flags = F_REG_WRITE;
1215 }
1216
1217 return TRUE;
1218 }
1219
1220 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1221 bfd_boolean
1222 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1223 aarch64_opnd_info *info, aarch64_insn code,
1224 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1225 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1226 {
1227 int i;
1228 /* op1:op2 */
1229 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1230 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1231 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1232 return TRUE;
1233 /* Reserved value in <pstatefield>. */
1234 return FALSE;
1235 }
1236
1237 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1238 bfd_boolean
1239 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1240 aarch64_opnd_info *info,
1241 aarch64_insn code,
1242 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1243 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1244 {
1245 int i;
1246 aarch64_insn value;
1247 const aarch64_sys_ins_reg *sysins_ops;
1248 /* op0:op1:CRn:CRm:op2 */
1249 value = extract_fields (code, 0, 5,
1250 FLD_op0, FLD_op1, FLD_CRn,
1251 FLD_CRm, FLD_op2);
1252
1253 switch (info->type)
1254 {
1255 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1256 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1257 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1258 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1259 case AARCH64_OPND_SYSREG_SR:
1260 sysins_ops = aarch64_sys_regs_sr;
1261 /* Let's remove op2 for rctx. Refer to comments in the definition of
1262 aarch64_sys_regs_sr[]. */
1263 value = value & ~(0x7);
1264 break;
1265 default: assert (0); return FALSE;
1266 }
1267
1268 for (i = 0; sysins_ops[i].name != NULL; ++i)
1269 if (sysins_ops[i].value == value)
1270 {
1271 info->sysins_op = sysins_ops + i;
1272 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1273 info->sysins_op->name,
1274 (unsigned)info->sysins_op->value,
1275 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1276 return TRUE;
1277 }
1278
1279 return FALSE;
1280 }
1281
1282 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1283
1284 bfd_boolean
1285 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1286 aarch64_opnd_info *info,
1287 aarch64_insn code,
1288 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1289 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1290 {
1291 /* CRm */
1292 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1293 return TRUE;
1294 }
1295
1296 /* Decode the prefetch operation option operand for e.g.
1297 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1298
1299 bfd_boolean
1300 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1301 aarch64_opnd_info *info,
1302 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1304 {
1305 /* prfop in Rt */
1306 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1307 return TRUE;
1308 }
1309
1310 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1311 to the matching name/value pair in aarch64_hint_options. */
1312
1313 bfd_boolean
1314 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1315 aarch64_opnd_info *info,
1316 aarch64_insn code,
1317 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1318 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1319 {
1320 /* CRm:op2. */
1321 unsigned hint_number;
1322 int i;
1323
1324 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1325
1326 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1327 {
1328 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1329 {
1330 info->hint_option = &(aarch64_hint_options[i]);
1331 return TRUE;
1332 }
1333 }
1334
1335 return FALSE;
1336 }
1337
1338 /* Decode the extended register operand for e.g.
1339 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1340 bfd_boolean
1341 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1342 aarch64_opnd_info *info,
1343 aarch64_insn code,
1344 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1345 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1346 {
1347 aarch64_insn value;
1348
1349 /* Rm */
1350 info->reg.regno = extract_field (FLD_Rm, code, 0);
1351 /* option */
1352 value = extract_field (FLD_option, code, 0);
1353 info->shifter.kind =
1354 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1355 /* imm3 */
1356 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1357
1358 /* This makes the constraint checking happy. */
1359 info->shifter.operator_present = 1;
1360
1361 /* Assume inst->operands[0].qualifier has been resolved. */
1362 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1363 info->qualifier = AARCH64_OPND_QLF_W;
1364 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1365 && (info->shifter.kind == AARCH64_MOD_UXTX
1366 || info->shifter.kind == AARCH64_MOD_SXTX))
1367 info->qualifier = AARCH64_OPND_QLF_X;
1368
1369 return TRUE;
1370 }
1371
1372 /* Decode the shifted register operand for e.g.
1373 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1374 bfd_boolean
1375 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1376 aarch64_opnd_info *info,
1377 aarch64_insn code,
1378 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1379 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1380 {
1381 aarch64_insn value;
1382
1383 /* Rm */
1384 info->reg.regno = extract_field (FLD_Rm, code, 0);
1385 /* shift */
1386 value = extract_field (FLD_shift, code, 0);
1387 info->shifter.kind =
1388 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1389 if (info->shifter.kind == AARCH64_MOD_ROR
1390 && inst->opcode->iclass != log_shift)
1391 /* ROR is not available for the shifted register operand in arithmetic
1392 instructions. */
1393 return FALSE;
1394 /* imm6 */
1395 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1396
1397 /* This makes the constraint checking happy. */
1398 info->shifter.operator_present = 1;
1399
1400 return TRUE;
1401 }
1402
1403 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1404 where <offset> is given by the OFFSET parameter and where <factor> is
1405 1 plus SELF's operand-dependent value. fields[0] specifies the field
1406 that holds <base>. */
1407 static bfd_boolean
1408 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1409 aarch64_opnd_info *info, aarch64_insn code,
1410 int64_t offset)
1411 {
1412 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1413 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1414 info->addr.offset.is_reg = FALSE;
1415 info->addr.writeback = FALSE;
1416 info->addr.preind = TRUE;
1417 if (offset != 0)
1418 info->shifter.kind = AARCH64_MOD_MUL_VL;
1419 info->shifter.amount = 1;
1420 info->shifter.operator_present = (info->addr.offset.imm != 0);
1421 info->shifter.amount_present = FALSE;
1422 return TRUE;
1423 }
1424
1425 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1426 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1427 SELF's operand-dependent value. fields[0] specifies the field that
1428 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1429 bfd_boolean
1430 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1431 aarch64_opnd_info *info, aarch64_insn code,
1432 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1433 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1434 {
1435 int offset;
1436
1437 offset = extract_field (FLD_SVE_imm4, code, 0);
1438 offset = ((offset + 8) & 15) - 8;
1439 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1440 }
1441
1442 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1443 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1444 SELF's operand-dependent value. fields[0] specifies the field that
1445 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1446 bfd_boolean
1447 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1448 aarch64_opnd_info *info, aarch64_insn code,
1449 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1450 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1451 {
1452 int offset;
1453
1454 offset = extract_field (FLD_SVE_imm6, code, 0);
1455 offset = (((offset + 32) & 63) - 32);
1456 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1457 }
1458
1459 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1460 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1461 SELF's operand-dependent value. fields[0] specifies the field that
1462 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1463 and imm3 fields, with imm3 being the less-significant part. */
1464 bfd_boolean
1465 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1466 aarch64_opnd_info *info,
1467 aarch64_insn code,
1468 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1469 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1470 {
1471 int offset;
1472
1473 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1474 offset = (((offset + 256) & 511) - 256);
1475 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1476 }
1477
1478 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1479 is given by the OFFSET parameter and where <shift> is SELF's operand-
1480 dependent value. fields[0] specifies the base register field <base>. */
1481 static bfd_boolean
1482 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1483 aarch64_opnd_info *info, aarch64_insn code,
1484 int64_t offset)
1485 {
1486 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1487 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1488 info->addr.offset.is_reg = FALSE;
1489 info->addr.writeback = FALSE;
1490 info->addr.preind = TRUE;
1491 info->shifter.operator_present = FALSE;
1492 info->shifter.amount_present = FALSE;
1493 return TRUE;
1494 }
1495
1496 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1497 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1498 value. fields[0] specifies the base register field. */
1499 bfd_boolean
1500 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1501 aarch64_opnd_info *info, aarch64_insn code,
1502 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1503 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1504 {
1505 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1506 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1507 }
1508
1509 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1510 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1511 value. fields[0] specifies the base register field. */
1512 bfd_boolean
1513 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1514 aarch64_opnd_info *info, aarch64_insn code,
1515 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1516 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1517 {
1518 int offset = extract_field (FLD_SVE_imm6, code, 0);
1519 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1520 }
1521
1522 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1523 is SELF's operand-dependent value. fields[0] specifies the base
1524 register field and fields[1] specifies the offset register field. */
1525 bfd_boolean
1526 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1527 aarch64_opnd_info *info, aarch64_insn code,
1528 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1529 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1530 {
1531 int index_regno;
1532
1533 index_regno = extract_field (self->fields[1], code, 0);
1534 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1535 return FALSE;
1536
1537 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1538 info->addr.offset.regno = index_regno;
1539 info->addr.offset.is_reg = TRUE;
1540 info->addr.writeback = FALSE;
1541 info->addr.preind = TRUE;
1542 info->shifter.kind = AARCH64_MOD_LSL;
1543 info->shifter.amount = get_operand_specific_data (self);
1544 info->shifter.operator_present = (info->shifter.amount != 0);
1545 info->shifter.amount_present = (info->shifter.amount != 0);
1546 return TRUE;
1547 }
1548
1549 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1550 <shift> is SELF's operand-dependent value. fields[0] specifies the
1551 base register field, fields[1] specifies the offset register field and
1552 fields[2] is a single-bit field that selects SXTW over UXTW. */
1553 bfd_boolean
1554 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1555 aarch64_opnd_info *info, aarch64_insn code,
1556 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1557 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1558 {
1559 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1560 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1561 info->addr.offset.is_reg = TRUE;
1562 info->addr.writeback = FALSE;
1563 info->addr.preind = TRUE;
1564 if (extract_field (self->fields[2], code, 0))
1565 info->shifter.kind = AARCH64_MOD_SXTW;
1566 else
1567 info->shifter.kind = AARCH64_MOD_UXTW;
1568 info->shifter.amount = get_operand_specific_data (self);
1569 info->shifter.operator_present = TRUE;
1570 info->shifter.amount_present = (info->shifter.amount != 0);
1571 return TRUE;
1572 }
1573
1574 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1575 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1576 fields[0] specifies the base register field. */
1577 bfd_boolean
1578 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1579 aarch64_opnd_info *info, aarch64_insn code,
1580 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1581 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1582 {
1583 int offset = extract_field (FLD_imm5, code, 0);
1584 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1585 }
1586
1587 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1588 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1589 number. fields[0] specifies the base register field and fields[1]
1590 specifies the offset register field. */
1591 static bfd_boolean
1592 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1593 aarch64_insn code, enum aarch64_modifier_kind kind)
1594 {
1595 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1596 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1597 info->addr.offset.is_reg = TRUE;
1598 info->addr.writeback = FALSE;
1599 info->addr.preind = TRUE;
1600 info->shifter.kind = kind;
1601 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1602 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1603 || info->shifter.amount != 0);
1604 info->shifter.amount_present = (info->shifter.amount != 0);
1605 return TRUE;
1606 }
1607
1608 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1609 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1610 field and fields[1] specifies the offset register field. */
1611 bfd_boolean
1612 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1613 aarch64_opnd_info *info, aarch64_insn code,
1614 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1615 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1616 {
1617 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1618 }
1619
1620 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1621 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1622 field and fields[1] specifies the offset register field. */
1623 bfd_boolean
1624 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1625 aarch64_opnd_info *info, aarch64_insn code,
1626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1628 {
1629 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1630 }
1631
1632 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1633 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1634 field and fields[1] specifies the offset register field. */
1635 bfd_boolean
1636 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1637 aarch64_opnd_info *info, aarch64_insn code,
1638 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1639 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1640 {
1641 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1642 }
1643
1644 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1645 has the raw field value and that the low 8 bits decode to VALUE. */
1646 static bfd_boolean
1647 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1648 {
1649 info->shifter.kind = AARCH64_MOD_LSL;
1650 info->shifter.amount = 0;
1651 if (info->imm.value & 0x100)
1652 {
1653 if (value == 0)
1654 /* Decode 0x100 as #0, LSL #8. */
1655 info->shifter.amount = 8;
1656 else
1657 value *= 256;
1658 }
1659 info->shifter.operator_present = (info->shifter.amount != 0);
1660 info->shifter.amount_present = (info->shifter.amount != 0);
1661 info->imm.value = value;
1662 return TRUE;
1663 }
1664
1665 /* Decode an SVE ADD/SUB immediate. */
1666 bfd_boolean
1667 aarch64_ext_sve_aimm (const aarch64_operand *self,
1668 aarch64_opnd_info *info, const aarch64_insn code,
1669 const aarch64_inst *inst,
1670 aarch64_operand_error *errors)
1671 {
1672 return (aarch64_ext_imm (self, info, code, inst, errors)
1673 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1674 }
1675
1676 /* Decode an SVE CPY/DUP immediate. */
1677 bfd_boolean
1678 aarch64_ext_sve_asimm (const aarch64_operand *self,
1679 aarch64_opnd_info *info, const aarch64_insn code,
1680 const aarch64_inst *inst,
1681 aarch64_operand_error *errors)
1682 {
1683 return (aarch64_ext_imm (self, info, code, inst, errors)
1684 && decode_sve_aimm (info, (int8_t) info->imm.value));
1685 }
1686
1687 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1688 The fields array specifies which field to use. */
1689 bfd_boolean
1690 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1691 aarch64_opnd_info *info, aarch64_insn code,
1692 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1693 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1694 {
1695 if (extract_field (self->fields[0], code, 0))
1696 info->imm.value = 0x3f800000;
1697 else
1698 info->imm.value = 0x3f000000;
1699 info->imm.is_fp = TRUE;
1700 return TRUE;
1701 }
1702
1703 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1704 The fields array specifies which field to use. */
1705 bfd_boolean
1706 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1707 aarch64_opnd_info *info, aarch64_insn code,
1708 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1709 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1710 {
1711 if (extract_field (self->fields[0], code, 0))
1712 info->imm.value = 0x40000000;
1713 else
1714 info->imm.value = 0x3f000000;
1715 info->imm.is_fp = TRUE;
1716 return TRUE;
1717 }
1718
1719 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1720 The fields array specifies which field to use. */
1721 bfd_boolean
1722 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1723 aarch64_opnd_info *info, aarch64_insn code,
1724 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1725 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1726 {
1727 if (extract_field (self->fields[0], code, 0))
1728 info->imm.value = 0x3f800000;
1729 else
1730 info->imm.value = 0x0;
1731 info->imm.is_fp = TRUE;
1732 return TRUE;
1733 }
1734
1735 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1736 array specifies which field to use for Zn. MM is encoded in the
1737 concatenation of imm5 and SVE_tszh, with imm5 being the less
1738 significant part. */
1739 bfd_boolean
1740 aarch64_ext_sve_index (const aarch64_operand *self,
1741 aarch64_opnd_info *info, aarch64_insn code,
1742 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1743 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1744 {
1745 int val;
1746
1747 info->reglane.regno = extract_field (self->fields[0], code, 0);
1748 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1749 if ((val & 31) == 0)
1750 return 0;
1751 while ((val & 1) == 0)
1752 val /= 2;
1753 info->reglane.index = val / 2;
1754 return TRUE;
1755 }
1756
1757 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1758 bfd_boolean
1759 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1760 aarch64_opnd_info *info, const aarch64_insn code,
1761 const aarch64_inst *inst,
1762 aarch64_operand_error *errors)
1763 {
1764 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1765 return (aarch64_ext_limm (self, info, code, inst, errors)
1766 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1767 }
1768
1769 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1770 and where MM occupies the most-significant part. The operand-dependent
1771 value specifies the number of bits in Zn. */
1772 bfd_boolean
1773 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1774 aarch64_opnd_info *info, aarch64_insn code,
1775 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1776 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1777 {
1778 unsigned int reg_bits = get_operand_specific_data (self);
1779 unsigned int val = extract_all_fields (self, code);
1780 info->reglane.regno = val & ((1 << reg_bits) - 1);
1781 info->reglane.index = val >> reg_bits;
1782 return TRUE;
1783 }
1784
1785 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1786 to use for Zn. The opcode-dependent value specifies the number
1787 of registers in the list. */
1788 bfd_boolean
1789 aarch64_ext_sve_reglist (const aarch64_operand *self,
1790 aarch64_opnd_info *info, aarch64_insn code,
1791 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1792 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1793 {
1794 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1795 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1796 return TRUE;
1797 }
1798
1799 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1800 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1801 field. */
1802 bfd_boolean
1803 aarch64_ext_sve_scale (const aarch64_operand *self,
1804 aarch64_opnd_info *info, aarch64_insn code,
1805 const aarch64_inst *inst, aarch64_operand_error *errors)
1806 {
1807 int val;
1808
1809 if (!aarch64_ext_imm (self, info, code, inst, errors))
1810 return FALSE;
1811 val = extract_field (FLD_SVE_imm4, code, 0);
1812 info->shifter.kind = AARCH64_MOD_MUL;
1813 info->shifter.amount = val + 1;
1814 info->shifter.operator_present = (val != 0);
1815 info->shifter.amount_present = (val != 0);
1816 return TRUE;
1817 }
1818
1819 /* Return the top set bit in VALUE, which is expected to be relatively
1820 small. */
1821 static uint64_t
1822 get_top_bit (uint64_t value)
1823 {
1824 while ((value & -value) != value)
1825 value -= value & -value;
1826 return value;
1827 }
1828
1829 /* Decode an SVE shift-left immediate. */
1830 bfd_boolean
1831 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1832 aarch64_opnd_info *info, const aarch64_insn code,
1833 const aarch64_inst *inst, aarch64_operand_error *errors)
1834 {
1835 if (!aarch64_ext_imm (self, info, code, inst, errors)
1836 || info->imm.value == 0)
1837 return FALSE;
1838
1839 info->imm.value -= get_top_bit (info->imm.value);
1840 return TRUE;
1841 }
1842
1843 /* Decode an SVE shift-right immediate. */
1844 bfd_boolean
1845 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1846 aarch64_opnd_info *info, const aarch64_insn code,
1847 const aarch64_inst *inst, aarch64_operand_error *errors)
1848 {
1849 if (!aarch64_ext_imm (self, info, code, inst, errors)
1850 || info->imm.value == 0)
1851 return FALSE;
1852
1853 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1854 return TRUE;
1855 }
1856 \f
1857 /* Bitfields that are commonly used to encode certain operands' information
1858 may be partially used as part of the base opcode in some instructions.
1859 For example, the bit 1 of the field 'size' in
1860 FCVTXN <Vb><d>, <Va><n>
1861 is actually part of the base opcode, while only size<0> is available
1862 for encoding the register type. Another example is the AdvSIMD
1863 instruction ORR (register), in which the field 'size' is also used for
1864 the base opcode, leaving only the field 'Q' available to encode the
1865 vector register arrangement specifier '8B' or '16B'.
1866
1867 This function tries to deduce the qualifier from the value of partially
1868 constrained field(s). Given the VALUE of such a field or fields, the
1869 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1870 operand encoding), the function returns the matching qualifier or
1871 AARCH64_OPND_QLF_NIL if nothing matches.
1872
1873 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1874 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1875 may end with AARCH64_OPND_QLF_NIL. */
1876
1877 static enum aarch64_opnd_qualifier
1878 get_qualifier_from_partial_encoding (aarch64_insn value,
1879 const enum aarch64_opnd_qualifier* \
1880 candidates,
1881 aarch64_insn mask)
1882 {
1883 int i;
1884 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1885 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1886 {
1887 aarch64_insn standard_value;
1888 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1889 break;
1890 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1891 if ((standard_value & mask) == (value & mask))
1892 return candidates[i];
1893 }
1894 return AARCH64_OPND_QLF_NIL;
1895 }
1896
1897 /* Given a list of qualifier sequences, return all possible valid qualifiers
1898 for operand IDX in QUALIFIERS.
1899 Assume QUALIFIERS is an array whose length is large enough. */
1900
1901 static void
1902 get_operand_possible_qualifiers (int idx,
1903 const aarch64_opnd_qualifier_seq_t *list,
1904 enum aarch64_opnd_qualifier *qualifiers)
1905 {
1906 int i;
1907 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1908 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1909 break;
1910 }
1911
1912 /* Decode the size Q field for e.g. SHADD.
1913 We tag one operand with the qualifer according to the code;
1914 whether the qualifier is valid for this opcode or not, it is the
1915 duty of the semantic checking. */
1916
1917 static int
1918 decode_sizeq (aarch64_inst *inst)
1919 {
1920 int idx;
1921 enum aarch64_opnd_qualifier qualifier;
1922 aarch64_insn code;
1923 aarch64_insn value, mask;
1924 enum aarch64_field_kind fld_sz;
1925 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1926
1927 if (inst->opcode->iclass == asisdlse
1928 || inst->opcode->iclass == asisdlsep
1929 || inst->opcode->iclass == asisdlso
1930 || inst->opcode->iclass == asisdlsop)
1931 fld_sz = FLD_vldst_size;
1932 else
1933 fld_sz = FLD_size;
1934
1935 code = inst->value;
1936 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1937 /* Obtain the info that which bits of fields Q and size are actually
1938 available for operand encoding. Opcodes like FMAXNM and FMLA have
1939 size[1] unavailable. */
1940 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1941
1942 /* The index of the operand we are going to tag a qualifier and the qualifer
1943 itself are reasoned from the value of the size and Q fields and the
1944 possible valid qualifier lists. */
1945 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1946 DEBUG_TRACE ("key idx: %d", idx);
1947
1948 /* For most related instruciton, size:Q are fully available for operand
1949 encoding. */
1950 if (mask == 0x7)
1951 {
1952 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1953 return 1;
1954 }
1955
1956 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1957 candidates);
1958 #ifdef DEBUG_AARCH64
1959 if (debug_dump)
1960 {
1961 int i;
1962 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1963 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1964 DEBUG_TRACE ("qualifier %d: %s", i,
1965 aarch64_get_qualifier_name(candidates[i]));
1966 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1967 }
1968 #endif /* DEBUG_AARCH64 */
1969
1970 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1971
1972 if (qualifier == AARCH64_OPND_QLF_NIL)
1973 return 0;
1974
1975 inst->operands[idx].qualifier = qualifier;
1976 return 1;
1977 }
1978
1979 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1980 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1981
1982 static int
1983 decode_asimd_fcvt (aarch64_inst *inst)
1984 {
1985 aarch64_field field = {0, 0};
1986 aarch64_insn value;
1987 enum aarch64_opnd_qualifier qualifier;
1988
1989 gen_sub_field (FLD_size, 0, 1, &field);
1990 value = extract_field_2 (&field, inst->value, 0);
1991 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1992 : AARCH64_OPND_QLF_V_2D;
1993 switch (inst->opcode->op)
1994 {
1995 case OP_FCVTN:
1996 case OP_FCVTN2:
1997 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1998 inst->operands[1].qualifier = qualifier;
1999 break;
2000 case OP_FCVTL:
2001 case OP_FCVTL2:
2002 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2003 inst->operands[0].qualifier = qualifier;
2004 break;
2005 default:
2006 assert (0);
2007 return 0;
2008 }
2009
2010 return 1;
2011 }
2012
2013 /* Decode size[0], i.e. bit 22, for
2014 e.g. FCVTXN <Vb><d>, <Va><n>. */
2015
2016 static int
2017 decode_asisd_fcvtxn (aarch64_inst *inst)
2018 {
2019 aarch64_field field = {0, 0};
2020 gen_sub_field (FLD_size, 0, 1, &field);
2021 if (!extract_field_2 (&field, inst->value, 0))
2022 return 0;
2023 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2024 return 1;
2025 }
2026
2027 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2028 static int
2029 decode_fcvt (aarch64_inst *inst)
2030 {
2031 enum aarch64_opnd_qualifier qualifier;
2032 aarch64_insn value;
2033 const aarch64_field field = {15, 2};
2034
2035 /* opc dstsize */
2036 value = extract_field_2 (&field, inst->value, 0);
2037 switch (value)
2038 {
2039 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2040 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2041 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2042 default: return 0;
2043 }
2044 inst->operands[0].qualifier = qualifier;
2045
2046 return 1;
2047 }
2048
2049 /* Do miscellaneous decodings that are not common enough to be driven by
2050 flags. */
2051
2052 static int
2053 do_misc_decoding (aarch64_inst *inst)
2054 {
2055 unsigned int value;
2056 switch (inst->opcode->op)
2057 {
2058 case OP_FCVT:
2059 return decode_fcvt (inst);
2060
2061 case OP_FCVTN:
2062 case OP_FCVTN2:
2063 case OP_FCVTL:
2064 case OP_FCVTL2:
2065 return decode_asimd_fcvt (inst);
2066
2067 case OP_FCVTXN_S:
2068 return decode_asisd_fcvtxn (inst);
2069
2070 case OP_MOV_P_P:
2071 case OP_MOVS_P_P:
2072 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2073 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2074 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2075
2076 case OP_MOV_Z_P_Z:
2077 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2078 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2079
2080 case OP_MOV_Z_V:
2081 /* Index must be zero. */
2082 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2083 return value > 0 && value <= 16 && value == (value & -value);
2084
2085 case OP_MOV_Z_Z:
2086 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2087 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2088
2089 case OP_MOV_Z_Zi:
2090 /* Index must be nonzero. */
2091 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2092 return value > 0 && value != (value & -value);
2093
2094 case OP_MOVM_P_P_P:
2095 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2096 == extract_field (FLD_SVE_Pm, inst->value, 0));
2097
2098 case OP_MOVZS_P_P_P:
2099 case OP_MOVZ_P_P_P:
2100 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2101 == extract_field (FLD_SVE_Pm, inst->value, 0));
2102
2103 case OP_NOTS_P_P_P_Z:
2104 case OP_NOT_P_P_P_Z:
2105 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2106 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2107
2108 default:
2109 return 0;
2110 }
2111 }
2112
2113 /* Opcodes that have fields shared by multiple operands are usually flagged
2114 with flags. In this function, we detect such flags, decode the related
2115 field(s) and store the information in one of the related operands. The
2116 'one' operand is not any operand but one of the operands that can
2117 accommadate all the information that has been decoded. */
2118
2119 static int
2120 do_special_decoding (aarch64_inst *inst)
2121 {
2122 int idx;
2123 aarch64_insn value;
2124 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2125 if (inst->opcode->flags & F_COND)
2126 {
2127 value = extract_field (FLD_cond2, inst->value, 0);
2128 inst->cond = get_cond_from_value (value);
2129 }
2130 /* 'sf' field. */
2131 if (inst->opcode->flags & F_SF)
2132 {
2133 idx = select_operand_for_sf_field_coding (inst->opcode);
2134 value = extract_field (FLD_sf, inst->value, 0);
2135 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2136 if ((inst->opcode->flags & F_N)
2137 && extract_field (FLD_N, inst->value, 0) != value)
2138 return 0;
2139 }
2140 /* 'sf' field. */
2141 if (inst->opcode->flags & F_LSE_SZ)
2142 {
2143 idx = select_operand_for_sf_field_coding (inst->opcode);
2144 value = extract_field (FLD_lse_sz, inst->value, 0);
2145 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2146 }
2147 /* size:Q fields. */
2148 if (inst->opcode->flags & F_SIZEQ)
2149 return decode_sizeq (inst);
2150
2151 if (inst->opcode->flags & F_FPTYPE)
2152 {
2153 idx = select_operand_for_fptype_field_coding (inst->opcode);
2154 value = extract_field (FLD_type, inst->value, 0);
2155 switch (value)
2156 {
2157 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2158 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2159 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2160 default: return 0;
2161 }
2162 }
2163
2164 if (inst->opcode->flags & F_SSIZE)
2165 {
2166 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2167 of the base opcode. */
2168 aarch64_insn mask;
2169 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2170 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2171 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2172 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2173 /* For most related instruciton, the 'size' field is fully available for
2174 operand encoding. */
2175 if (mask == 0x3)
2176 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2177 else
2178 {
2179 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2180 candidates);
2181 inst->operands[idx].qualifier
2182 = get_qualifier_from_partial_encoding (value, candidates, mask);
2183 }
2184 }
2185
2186 if (inst->opcode->flags & F_T)
2187 {
2188 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2189 int num = 0;
2190 unsigned val, Q;
2191 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2192 == AARCH64_OPND_CLASS_SIMD_REG);
2193 /* imm5<3:0> q <t>
2194 0000 x reserved
2195 xxx1 0 8b
2196 xxx1 1 16b
2197 xx10 0 4h
2198 xx10 1 8h
2199 x100 0 2s
2200 x100 1 4s
2201 1000 0 reserved
2202 1000 1 2d */
2203 val = extract_field (FLD_imm5, inst->value, 0);
2204 while ((val & 0x1) == 0 && ++num <= 3)
2205 val >>= 1;
2206 if (num > 3)
2207 return 0;
2208 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2209 inst->operands[0].qualifier =
2210 get_vreg_qualifier_from_value ((num << 1) | Q);
2211 }
2212
2213 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2214 {
2215 /* Use Rt to encode in the case of e.g.
2216 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2217 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2218 if (idx == -1)
2219 {
2220 /* Otherwise use the result operand, which has to be a integer
2221 register. */
2222 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2223 == AARCH64_OPND_CLASS_INT_REG);
2224 idx = 0;
2225 }
2226 assert (idx == 0 || idx == 1);
2227 value = extract_field (FLD_Q, inst->value, 0);
2228 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2229 }
2230
2231 if (inst->opcode->flags & F_LDS_SIZE)
2232 {
2233 aarch64_field field = {0, 0};
2234 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2235 == AARCH64_OPND_CLASS_INT_REG);
2236 gen_sub_field (FLD_opc, 0, 1, &field);
2237 value = extract_field_2 (&field, inst->value, 0);
2238 inst->operands[0].qualifier
2239 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2240 }
2241
2242 /* Miscellaneous decoding; done as the last step. */
2243 if (inst->opcode->flags & F_MISC)
2244 return do_misc_decoding (inst);
2245
2246 return 1;
2247 }
2248
2249 /* Converters converting a real opcode instruction to its alias form. */
2250
2251 /* ROR <Wd>, <Ws>, #<shift>
2252 is equivalent to:
2253 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2254 static int
2255 convert_extr_to_ror (aarch64_inst *inst)
2256 {
2257 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2258 {
2259 copy_operand_info (inst, 2, 3);
2260 inst->operands[3].type = AARCH64_OPND_NIL;
2261 return 1;
2262 }
2263 return 0;
2264 }
2265
2266 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2267 is equivalent to:
2268 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2269 static int
2270 convert_shll_to_xtl (aarch64_inst *inst)
2271 {
2272 if (inst->operands[2].imm.value == 0)
2273 {
2274 inst->operands[2].type = AARCH64_OPND_NIL;
2275 return 1;
2276 }
2277 return 0;
2278 }
2279
2280 /* Convert
2281 UBFM <Xd>, <Xn>, #<shift>, #63.
2282 to
2283 LSR <Xd>, <Xn>, #<shift>. */
2284 static int
2285 convert_bfm_to_sr (aarch64_inst *inst)
2286 {
2287 int64_t imms, val;
2288
2289 imms = inst->operands[3].imm.value;
2290 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2291 if (imms == val)
2292 {
2293 inst->operands[3].type = AARCH64_OPND_NIL;
2294 return 1;
2295 }
2296
2297 return 0;
2298 }
2299
2300 /* Convert MOV to ORR. */
2301 static int
2302 convert_orr_to_mov (aarch64_inst *inst)
2303 {
2304 /* MOV <Vd>.<T>, <Vn>.<T>
2305 is equivalent to:
2306 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2307 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2308 {
2309 inst->operands[2].type = AARCH64_OPND_NIL;
2310 return 1;
2311 }
2312 return 0;
2313 }
2314
2315 /* When <imms> >= <immr>, the instruction written:
2316 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2317 is equivalent to:
2318 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2319
2320 static int
2321 convert_bfm_to_bfx (aarch64_inst *inst)
2322 {
2323 int64_t immr, imms;
2324
2325 immr = inst->operands[2].imm.value;
2326 imms = inst->operands[3].imm.value;
2327 if (imms >= immr)
2328 {
2329 int64_t lsb = immr;
2330 inst->operands[2].imm.value = lsb;
2331 inst->operands[3].imm.value = imms + 1 - lsb;
2332 /* The two opcodes have different qualifiers for
2333 the immediate operands; reset to help the checking. */
2334 reset_operand_qualifier (inst, 2);
2335 reset_operand_qualifier (inst, 3);
2336 return 1;
2337 }
2338
2339 return 0;
2340 }
2341
2342 /* When <imms> < <immr>, the instruction written:
2343 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2344 is equivalent to:
2345 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2346
2347 static int
2348 convert_bfm_to_bfi (aarch64_inst *inst)
2349 {
2350 int64_t immr, imms, val;
2351
2352 immr = inst->operands[2].imm.value;
2353 imms = inst->operands[3].imm.value;
2354 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2355 if (imms < immr)
2356 {
2357 inst->operands[2].imm.value = (val - immr) & (val - 1);
2358 inst->operands[3].imm.value = imms + 1;
2359 /* The two opcodes have different qualifiers for
2360 the immediate operands; reset to help the checking. */
2361 reset_operand_qualifier (inst, 2);
2362 reset_operand_qualifier (inst, 3);
2363 return 1;
2364 }
2365
2366 return 0;
2367 }
2368
2369 /* The instruction written:
2370 BFC <Xd>, #<lsb>, #<width>
2371 is equivalent to:
2372 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2373
2374 static int
2375 convert_bfm_to_bfc (aarch64_inst *inst)
2376 {
2377 int64_t immr, imms, val;
2378
2379 /* Should have been assured by the base opcode value. */
2380 assert (inst->operands[1].reg.regno == 0x1f);
2381
2382 immr = inst->operands[2].imm.value;
2383 imms = inst->operands[3].imm.value;
2384 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2385 if (imms < immr)
2386 {
2387 /* Drop XZR from the second operand. */
2388 copy_operand_info (inst, 1, 2);
2389 copy_operand_info (inst, 2, 3);
2390 inst->operands[3].type = AARCH64_OPND_NIL;
2391
2392 /* Recalculate the immediates. */
2393 inst->operands[1].imm.value = (val - immr) & (val - 1);
2394 inst->operands[2].imm.value = imms + 1;
2395
2396 /* The two opcodes have different qualifiers for the operands; reset to
2397 help the checking. */
2398 reset_operand_qualifier (inst, 1);
2399 reset_operand_qualifier (inst, 2);
2400 reset_operand_qualifier (inst, 3);
2401
2402 return 1;
2403 }
2404
2405 return 0;
2406 }
2407
2408 /* The instruction written:
2409 LSL <Xd>, <Xn>, #<shift>
2410 is equivalent to:
2411 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2412
2413 static int
2414 convert_ubfm_to_lsl (aarch64_inst *inst)
2415 {
2416 int64_t immr = inst->operands[2].imm.value;
2417 int64_t imms = inst->operands[3].imm.value;
2418 int64_t val
2419 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2420
2421 if ((immr == 0 && imms == val) || immr == imms + 1)
2422 {
2423 inst->operands[3].type = AARCH64_OPND_NIL;
2424 inst->operands[2].imm.value = val - imms;
2425 return 1;
2426 }
2427
2428 return 0;
2429 }
2430
2431 /* CINC <Wd>, <Wn>, <cond>
2432 is equivalent to:
2433 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2434 where <cond> is not AL or NV. */
2435
2436 static int
2437 convert_from_csel (aarch64_inst *inst)
2438 {
2439 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2440 && (inst->operands[3].cond->value & 0xe) != 0xe)
2441 {
2442 copy_operand_info (inst, 2, 3);
2443 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2444 inst->operands[3].type = AARCH64_OPND_NIL;
2445 return 1;
2446 }
2447 return 0;
2448 }
2449
2450 /* CSET <Wd>, <cond>
2451 is equivalent to:
2452 CSINC <Wd>, WZR, WZR, invert(<cond>)
2453 where <cond> is not AL or NV. */
2454
2455 static int
2456 convert_csinc_to_cset (aarch64_inst *inst)
2457 {
2458 if (inst->operands[1].reg.regno == 0x1f
2459 && inst->operands[2].reg.regno == 0x1f
2460 && (inst->operands[3].cond->value & 0xe) != 0xe)
2461 {
2462 copy_operand_info (inst, 1, 3);
2463 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2464 inst->operands[3].type = AARCH64_OPND_NIL;
2465 inst->operands[2].type = AARCH64_OPND_NIL;
2466 return 1;
2467 }
2468 return 0;
2469 }
2470
2471 /* MOV <Wd>, #<imm>
2472 is equivalent to:
2473 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2474
2475 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2476 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2477 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2478 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2479 machine-instruction mnemonic must be used. */
2480
2481 static int
2482 convert_movewide_to_mov (aarch64_inst *inst)
2483 {
2484 uint64_t value = inst->operands[1].imm.value;
2485 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2486 if (value == 0 && inst->operands[1].shifter.amount != 0)
2487 return 0;
2488 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2489 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2490 value <<= inst->operands[1].shifter.amount;
2491 /* As an alias convertor, it has to be clear that the INST->OPCODE
2492 is the opcode of the real instruction. */
2493 if (inst->opcode->op == OP_MOVN)
2494 {
2495 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2496 value = ~value;
2497 /* A MOVN has an immediate that could be encoded by MOVZ. */
2498 if (aarch64_wide_constant_p (value, is32, NULL))
2499 return 0;
2500 }
2501 inst->operands[1].imm.value = value;
2502 inst->operands[1].shifter.amount = 0;
2503 return 1;
2504 }
2505
2506 /* MOV <Wd>, #<imm>
2507 is equivalent to:
2508 ORR <Wd>, WZR, #<imm>.
2509
2510 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2511 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2512 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2513 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2514 machine-instruction mnemonic must be used. */
2515
2516 static int
2517 convert_movebitmask_to_mov (aarch64_inst *inst)
2518 {
2519 int is32;
2520 uint64_t value;
2521
2522 /* Should have been assured by the base opcode value. */
2523 assert (inst->operands[1].reg.regno == 0x1f);
2524 copy_operand_info (inst, 1, 2);
2525 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2526 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2527 value = inst->operands[1].imm.value;
2528 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2529 instruction. */
2530 if (inst->operands[0].reg.regno != 0x1f
2531 && (aarch64_wide_constant_p (value, is32, NULL)
2532 || aarch64_wide_constant_p (~value, is32, NULL)))
2533 return 0;
2534
2535 inst->operands[2].type = AARCH64_OPND_NIL;
2536 return 1;
2537 }
2538
2539 /* Some alias opcodes are disassembled by being converted from their real-form.
2540 N.B. INST->OPCODE is the real opcode rather than the alias. */
2541
2542 static int
2543 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2544 {
2545 switch (alias->op)
2546 {
2547 case OP_ASR_IMM:
2548 case OP_LSR_IMM:
2549 return convert_bfm_to_sr (inst);
2550 case OP_LSL_IMM:
2551 return convert_ubfm_to_lsl (inst);
2552 case OP_CINC:
2553 case OP_CINV:
2554 case OP_CNEG:
2555 return convert_from_csel (inst);
2556 case OP_CSET:
2557 case OP_CSETM:
2558 return convert_csinc_to_cset (inst);
2559 case OP_UBFX:
2560 case OP_BFXIL:
2561 case OP_SBFX:
2562 return convert_bfm_to_bfx (inst);
2563 case OP_SBFIZ:
2564 case OP_BFI:
2565 case OP_UBFIZ:
2566 return convert_bfm_to_bfi (inst);
2567 case OP_BFC:
2568 return convert_bfm_to_bfc (inst);
2569 case OP_MOV_V:
2570 return convert_orr_to_mov (inst);
2571 case OP_MOV_IMM_WIDE:
2572 case OP_MOV_IMM_WIDEN:
2573 return convert_movewide_to_mov (inst);
2574 case OP_MOV_IMM_LOG:
2575 return convert_movebitmask_to_mov (inst);
2576 case OP_ROR_IMM:
2577 return convert_extr_to_ror (inst);
2578 case OP_SXTL:
2579 case OP_SXTL2:
2580 case OP_UXTL:
2581 case OP_UXTL2:
2582 return convert_shll_to_xtl (inst);
2583 default:
2584 return 0;
2585 }
2586 }
2587
2588 static bfd_boolean
2589 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2590 aarch64_inst *, int, aarch64_operand_error *errors);
2591
2592 /* Given the instruction information in *INST, check if the instruction has
2593 any alias form that can be used to represent *INST. If the answer is yes,
2594 update *INST to be in the form of the determined alias. */
2595
2596 /* In the opcode description table, the following flags are used in opcode
2597 entries to help establish the relations between the real and alias opcodes:
2598
2599 F_ALIAS: opcode is an alias
2600 F_HAS_ALIAS: opcode has alias(es)
2601 F_P1
2602 F_P2
2603 F_P3: Disassembly preference priority 1-3 (the larger the
2604 higher). If nothing is specified, it is the priority
2605 0 by default, i.e. the lowest priority.
2606
2607 Although the relation between the machine and the alias instructions are not
2608 explicitly described, it can be easily determined from the base opcode
2609 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2610 description entries:
2611
2612 The mask of an alias opcode must be equal to or a super-set (i.e. more
2613 constrained) of that of the aliased opcode; so is the base opcode value.
2614
2615 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2616 && (opcode->mask & real->mask) == real->mask
2617 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2618 then OPCODE is an alias of, and only of, the REAL instruction
2619
2620 The alias relationship is forced flat-structured to keep related algorithm
2621 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2622
2623 During the disassembling, the decoding decision tree (in
2624 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2625 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2626 not specified), the disassembler will check whether there is any alias
2627 instruction exists for this real instruction. If there is, the disassembler
2628 will try to disassemble the 32-bit binary again using the alias's rule, or
2629 try to convert the IR to the form of the alias. In the case of the multiple
2630 aliases, the aliases are tried one by one from the highest priority
2631 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2632 first succeeds first adopted.
2633
2634 You may ask why there is a need for the conversion of IR from one form to
2635 another in handling certain aliases. This is because on one hand it avoids
2636 adding more operand code to handle unusual encoding/decoding; on other
2637 hand, during the disassembling, the conversion is an effective approach to
2638 check the condition of an alias (as an alias may be adopted only if certain
2639 conditions are met).
2640
2641 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2642 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2643 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2644
2645 static void
2646 determine_disassembling_preference (struct aarch64_inst *inst,
2647 aarch64_operand_error *errors)
2648 {
2649 const aarch64_opcode *opcode;
2650 const aarch64_opcode *alias;
2651
2652 opcode = inst->opcode;
2653
2654 /* This opcode does not have an alias, so use itself. */
2655 if (!opcode_has_alias (opcode))
2656 return;
2657
2658 alias = aarch64_find_alias_opcode (opcode);
2659 assert (alias);
2660
2661 #ifdef DEBUG_AARCH64
2662 if (debug_dump)
2663 {
2664 const aarch64_opcode *tmp = alias;
2665 printf ("#### LIST orderd: ");
2666 while (tmp)
2667 {
2668 printf ("%s, ", tmp->name);
2669 tmp = aarch64_find_next_alias_opcode (tmp);
2670 }
2671 printf ("\n");
2672 }
2673 #endif /* DEBUG_AARCH64 */
2674
2675 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2676 {
2677 DEBUG_TRACE ("try %s", alias->name);
2678 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2679
2680 /* An alias can be a pseudo opcode which will never be used in the
2681 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2682 aliasing AND. */
2683 if (pseudo_opcode_p (alias))
2684 {
2685 DEBUG_TRACE ("skip pseudo %s", alias->name);
2686 continue;
2687 }
2688
2689 if ((inst->value & alias->mask) != alias->opcode)
2690 {
2691 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2692 continue;
2693 }
2694
2695 if (!AARCH64_CPU_HAS_FEATURE (arch_variant, *alias->avariant))
2696 {
2697 DEBUG_TRACE ("skip %s: we're missing features", alias->name);
2698 continue;
2699 }
2700
2701 /* No need to do any complicated transformation on operands, if the alias
2702 opcode does not have any operand. */
2703 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2704 {
2705 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2706 aarch64_replace_opcode (inst, alias);
2707 return;
2708 }
2709 if (alias->flags & F_CONV)
2710 {
2711 aarch64_inst copy;
2712 memcpy (&copy, inst, sizeof (aarch64_inst));
2713 /* ALIAS is the preference as long as the instruction can be
2714 successfully converted to the form of ALIAS. */
2715 if (convert_to_alias (&copy, alias) == 1)
2716 {
2717 aarch64_replace_opcode (&copy, alias);
2718 assert (aarch64_match_operands_constraint (&copy, NULL));
2719 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2720 memcpy (inst, &copy, sizeof (aarch64_inst));
2721 return;
2722 }
2723 }
2724 else
2725 {
2726 /* Directly decode the alias opcode. */
2727 aarch64_inst temp;
2728 memset (&temp, '\0', sizeof (aarch64_inst));
2729 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2730 {
2731 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2732 memcpy (inst, &temp, sizeof (aarch64_inst));
2733 return;
2734 }
2735 }
2736 }
2737 }
2738
2739 /* Some instructions (including all SVE ones) use the instruction class
2740 to describe how a qualifiers_list index is represented in the instruction
2741 encoding. If INST is such an instruction, decode the appropriate fields
2742 and fill in the operand qualifiers accordingly. Return true if no
2743 problems are found. */
2744
2745 static bfd_boolean
2746 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2747 {
2748 int i, variant;
2749
2750 variant = 0;
2751 switch (inst->opcode->iclass)
2752 {
2753 case sve_cpy:
2754 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2755 break;
2756
2757 case sve_index:
2758 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2759 if ((i & 31) == 0)
2760 return FALSE;
2761 while ((i & 1) == 0)
2762 {
2763 i >>= 1;
2764 variant += 1;
2765 }
2766 break;
2767
2768 case sve_limm:
2769 /* Pick the smallest applicable element size. */
2770 if ((inst->value & 0x20600) == 0x600)
2771 variant = 0;
2772 else if ((inst->value & 0x20400) == 0x400)
2773 variant = 1;
2774 else if ((inst->value & 0x20000) == 0)
2775 variant = 2;
2776 else
2777 variant = 3;
2778 break;
2779
2780 case sve_misc:
2781 /* sve_misc instructions have only a single variant. */
2782 break;
2783
2784 case sve_movprfx:
2785 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2786 break;
2787
2788 case sve_pred_zm:
2789 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2790 break;
2791
2792 case sve_shift_pred:
2793 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2794 sve_shift:
2795 if (i == 0)
2796 return FALSE;
2797 while (i != 1)
2798 {
2799 i >>= 1;
2800 variant += 1;
2801 }
2802 break;
2803
2804 case sve_shift_unpred:
2805 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2806 goto sve_shift;
2807
2808 case sve_size_bhs:
2809 variant = extract_field (FLD_size, inst->value, 0);
2810 if (variant >= 3)
2811 return FALSE;
2812 break;
2813
2814 case sve_size_bhsd:
2815 variant = extract_field (FLD_size, inst->value, 0);
2816 break;
2817
2818 case sve_size_hsd:
2819 i = extract_field (FLD_size, inst->value, 0);
2820 if (i < 1)
2821 return FALSE;
2822 variant = i - 1;
2823 break;
2824
2825 case sve_size_bh:
2826 case sve_size_sd:
2827 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2828 break;
2829
2830 case sve_size_sd2:
2831 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
2832 break;
2833
2834 case sve_size_hsd2:
2835 i = extract_field (FLD_SVE_size, inst->value, 0);
2836 if (i < 1)
2837 return FALSE;
2838 variant = i - 1;
2839 break;
2840
2841 case sve_size_13:
2842 /* Ignore low bit of this field since that is set in the opcode for
2843 instructions of this iclass. */
2844 i = (extract_field (FLD_size, inst->value, 0) & 2);
2845 variant = (i >> 1);
2846 break;
2847
2848 case sve_shift_tsz_bhsd:
2849 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2850 if (i == 0)
2851 return FALSE;
2852 while (i != 1)
2853 {
2854 i >>= 1;
2855 variant += 1;
2856 }
2857 break;
2858
2859 case sve_size_tsz_bhs:
2860 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2861 if (i == 0)
2862 return FALSE;
2863 while (i != 1)
2864 {
2865 if (i & 1)
2866 return FALSE;
2867 i >>= 1;
2868 variant += 1;
2869 }
2870 break;
2871
2872 case sve_shift_tsz_hsd:
2873 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2874 if (i == 0)
2875 return FALSE;
2876 while (i != 1)
2877 {
2878 i >>= 1;
2879 variant += 1;
2880 }
2881 break;
2882
2883 default:
2884 /* No mapping between instruction class and qualifiers. */
2885 return TRUE;
2886 }
2887
2888 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2889 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2890 return TRUE;
2891 }
2892 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2893 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2894 return 1.
2895
2896 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2897 determined and used to disassemble CODE; this is done just before the
2898 return. */
2899
2900 static bfd_boolean
2901 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2902 aarch64_inst *inst, int noaliases_p,
2903 aarch64_operand_error *errors)
2904 {
2905 int i;
2906
2907 DEBUG_TRACE ("enter with %s", opcode->name);
2908
2909 assert (opcode && inst);
2910
2911 /* Clear inst. */
2912 memset (inst, '\0', sizeof (aarch64_inst));
2913
2914 /* Check the base opcode. */
2915 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2916 {
2917 DEBUG_TRACE ("base opcode match FAIL");
2918 goto decode_fail;
2919 }
2920
2921 inst->opcode = opcode;
2922 inst->value = code;
2923
2924 /* Assign operand codes and indexes. */
2925 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2926 {
2927 if (opcode->operands[i] == AARCH64_OPND_NIL)
2928 break;
2929 inst->operands[i].type = opcode->operands[i];
2930 inst->operands[i].idx = i;
2931 }
2932
2933 /* Call the opcode decoder indicated by flags. */
2934 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2935 {
2936 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2937 goto decode_fail;
2938 }
2939
2940 /* Possibly use the instruction class to determine the correct
2941 qualifier. */
2942 if (!aarch64_decode_variant_using_iclass (inst))
2943 {
2944 DEBUG_TRACE ("iclass-based decoder FAIL");
2945 goto decode_fail;
2946 }
2947
2948 /* Call operand decoders. */
2949 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2950 {
2951 const aarch64_operand *opnd;
2952 enum aarch64_opnd type;
2953
2954 type = opcode->operands[i];
2955 if (type == AARCH64_OPND_NIL)
2956 break;
2957 opnd = &aarch64_operands[type];
2958 if (operand_has_extractor (opnd)
2959 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2960 errors)))
2961 {
2962 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2963 goto decode_fail;
2964 }
2965 }
2966
2967 /* If the opcode has a verifier, then check it now. */
2968 if (opcode->verifier
2969 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2970 {
2971 DEBUG_TRACE ("operand verifier FAIL");
2972 goto decode_fail;
2973 }
2974
2975 /* Match the qualifiers. */
2976 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2977 {
2978 /* Arriving here, the CODE has been determined as a valid instruction
2979 of OPCODE and *INST has been filled with information of this OPCODE
2980 instruction. Before the return, check if the instruction has any
2981 alias and should be disassembled in the form of its alias instead.
2982 If the answer is yes, *INST will be updated. */
2983 if (!noaliases_p)
2984 determine_disassembling_preference (inst, errors);
2985 DEBUG_TRACE ("SUCCESS");
2986 return TRUE;
2987 }
2988 else
2989 {
2990 DEBUG_TRACE ("constraint matching FAIL");
2991 }
2992
2993 decode_fail:
2994 return FALSE;
2995 }
2996 \f
2997 /* This does some user-friendly fix-up to *INST. It is currently focus on
2998 the adjustment of qualifiers to help the printed instruction
2999 recognized/understood more easily. */
3000
3001 static void
3002 user_friendly_fixup (aarch64_inst *inst)
3003 {
3004 switch (inst->opcode->iclass)
3005 {
3006 case testbranch:
3007 /* TBNZ Xn|Wn, #uimm6, label
3008 Test and Branch Not Zero: conditionally jumps to label if bit number
3009 uimm6 in register Xn is not zero. The bit number implies the width of
3010 the register, which may be written and should be disassembled as Wn if
3011 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3012 */
3013 if (inst->operands[1].imm.value < 32)
3014 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3015 break;
3016 default: break;
3017 }
3018 }
3019
3020 /* Decode INSN and fill in *INST the instruction information. An alias
3021 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3022 success. */
3023
3024 enum err_type
3025 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3026 bfd_boolean noaliases_p,
3027 aarch64_operand_error *errors)
3028 {
3029 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3030
3031 #ifdef DEBUG_AARCH64
3032 if (debug_dump)
3033 {
3034 const aarch64_opcode *tmp = opcode;
3035 printf ("\n");
3036 DEBUG_TRACE ("opcode lookup:");
3037 while (tmp != NULL)
3038 {
3039 aarch64_verbose (" %s", tmp->name);
3040 tmp = aarch64_find_next_opcode (tmp);
3041 }
3042 }
3043 #endif /* DEBUG_AARCH64 */
3044
3045 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3046 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3047 opcode field and value, apart from the difference that one of them has an
3048 extra field as part of the opcode, but such a field is used for operand
3049 encoding in other opcode(s) ('immh' in the case of the example). */
3050 while (opcode != NULL)
3051 {
3052 /* But only one opcode can be decoded successfully for, as the
3053 decoding routine will check the constraint carefully. */
3054 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3055 return ERR_OK;
3056 opcode = aarch64_find_next_opcode (opcode);
3057 }
3058
3059 return ERR_UND;
3060 }
3061
3062 /* Print operands. */
3063
3064 static void
3065 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3066 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3067 bfd_boolean *has_notes)
3068 {
3069 char *notes = NULL;
3070 int i, pcrel_p, num_printed;
3071 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3072 {
3073 char str[128];
3074 /* We regard the opcode operand info more, however we also look into
3075 the inst->operands to support the disassembling of the optional
3076 operand.
3077 The two operand code should be the same in all cases, apart from
3078 when the operand can be optional. */
3079 if (opcode->operands[i] == AARCH64_OPND_NIL
3080 || opnds[i].type == AARCH64_OPND_NIL)
3081 break;
3082
3083 /* Generate the operand string in STR. */
3084 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3085 &info->target, &notes);
3086
3087 /* Print the delimiter (taking account of omitted operand(s)). */
3088 if (str[0] != '\0')
3089 (*info->fprintf_func) (info->stream, "%s",
3090 num_printed++ == 0 ? "\t" : ", ");
3091
3092 /* Print the operand. */
3093 if (pcrel_p)
3094 (*info->print_address_func) (info->target, info);
3095 else
3096 (*info->fprintf_func) (info->stream, "%s", str);
3097 }
3098
3099 if (notes && !no_notes)
3100 {
3101 *has_notes = TRUE;
3102 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3103 }
3104 }
3105
3106 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3107
3108 static void
3109 remove_dot_suffix (char *name, const aarch64_inst *inst)
3110 {
3111 char *ptr;
3112 size_t len;
3113
3114 ptr = strchr (inst->opcode->name, '.');
3115 assert (ptr && inst->cond);
3116 len = ptr - inst->opcode->name;
3117 assert (len < 8);
3118 strncpy (name, inst->opcode->name, len);
3119 name[len] = '\0';
3120 }
3121
3122 /* Print the instruction mnemonic name. */
3123
3124 static void
3125 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3126 {
3127 if (inst->opcode->flags & F_COND)
3128 {
3129 /* For instructions that are truly conditionally executed, e.g. b.cond,
3130 prepare the full mnemonic name with the corresponding condition
3131 suffix. */
3132 char name[8];
3133
3134 remove_dot_suffix (name, inst);
3135 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3136 }
3137 else
3138 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3139 }
3140
3141 /* Decide whether we need to print a comment after the operands of
3142 instruction INST. */
3143
3144 static void
3145 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3146 {
3147 if (inst->opcode->flags & F_COND)
3148 {
3149 char name[8];
3150 unsigned int i, num_conds;
3151
3152 remove_dot_suffix (name, inst);
3153 num_conds = ARRAY_SIZE (inst->cond->names);
3154 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3155 (*info->fprintf_func) (info->stream, "%s %s.%s",
3156 i == 1 ? " //" : ",",
3157 name, inst->cond->names[i]);
3158 }
3159 }
3160
3161 /* Build notes from verifiers into a string for printing. */
3162
3163 static void
3164 print_verifier_notes (aarch64_operand_error *detail,
3165 struct disassemble_info *info)
3166 {
3167 if (no_notes)
3168 return;
3169
3170 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3171 would not have succeeded. We can safely ignore these. */
3172 assert (detail->non_fatal);
3173 assert (detail->error);
3174
3175 /* If there are multiple verifier messages, concat them up to 1k. */
3176 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3177 if (detail->index >= 0)
3178 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3179 }
3180
3181 /* Print the instruction according to *INST. */
3182
3183 static void
3184 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3185 const aarch64_insn code,
3186 struct disassemble_info *info,
3187 aarch64_operand_error *mismatch_details)
3188 {
3189 bfd_boolean has_notes = FALSE;
3190
3191 print_mnemonic_name (inst, info);
3192 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3193 print_comment (inst, info);
3194
3195 /* We've already printed a note, not enough space to print more so exit.
3196 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3197 from a register and instruction at the same time. */
3198 if (has_notes)
3199 return;
3200
3201 /* Always run constraint verifiers, this is needed because constraints need to
3202 maintain a global state regardless of whether the instruction has the flag
3203 set or not. */
3204 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3205 mismatch_details, &insn_sequence);
3206 switch (result)
3207 {
3208 case ERR_UND:
3209 case ERR_UNP:
3210 case ERR_NYI:
3211 assert (0);
3212 case ERR_VFI:
3213 print_verifier_notes (mismatch_details, info);
3214 break;
3215 default:
3216 break;
3217 }
3218 }
3219
3220 /* Entry-point of the instruction disassembler and printer. */
3221
3222 static void
3223 print_insn_aarch64_word (bfd_vma pc,
3224 uint32_t word,
3225 struct disassemble_info *info,
3226 aarch64_operand_error *errors)
3227 {
3228 static const char *err_msg[ERR_NR_ENTRIES+1] =
3229 {
3230 [ERR_OK] = "_",
3231 [ERR_UND] = "undefined",
3232 [ERR_UNP] = "unpredictable",
3233 [ERR_NYI] = "NYI"
3234 };
3235
3236 enum err_type ret;
3237 aarch64_inst inst;
3238
3239 info->insn_info_valid = 1;
3240 info->branch_delay_insns = 0;
3241 info->data_size = 0;
3242 info->target = 0;
3243 info->target2 = 0;
3244
3245 if (info->flags & INSN_HAS_RELOC)
3246 /* If the instruction has a reloc associated with it, then
3247 the offset field in the instruction will actually be the
3248 addend for the reloc. (If we are using REL type relocs).
3249 In such cases, we can ignore the pc when computing
3250 addresses, since the addend is not currently pc-relative. */
3251 pc = 0;
3252
3253 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3254
3255 if (((word >> 21) & 0x3ff) == 1)
3256 {
3257 /* RESERVED for ALES. */
3258 assert (ret != ERR_OK);
3259 ret = ERR_NYI;
3260 }
3261
3262 switch (ret)
3263 {
3264 case ERR_UND:
3265 case ERR_UNP:
3266 case ERR_NYI:
3267 /* Handle undefined instructions. */
3268 info->insn_type = dis_noninsn;
3269 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3270 word, err_msg[ret]);
3271 break;
3272 case ERR_OK:
3273 user_friendly_fixup (&inst);
3274 print_aarch64_insn (pc, &inst, word, info, errors);
3275 break;
3276 default:
3277 abort ();
3278 }
3279 }
3280
3281 /* Disallow mapping symbols ($x, $d etc) from
3282 being displayed in symbol relative addresses. */
3283
3284 bfd_boolean
3285 aarch64_symbol_is_valid (asymbol * sym,
3286 struct disassemble_info * info ATTRIBUTE_UNUSED)
3287 {
3288 const char * name;
3289
3290 if (sym == NULL)
3291 return FALSE;
3292
3293 name = bfd_asymbol_name (sym);
3294
3295 return name
3296 && (name[0] != '$'
3297 || (name[1] != 'x' && name[1] != 'd')
3298 || (name[2] != '\0' && name[2] != '.'));
3299 }
3300
3301 /* Print data bytes on INFO->STREAM. */
3302
3303 static void
3304 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3305 uint32_t word,
3306 struct disassemble_info *info,
3307 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3308 {
3309 switch (info->bytes_per_chunk)
3310 {
3311 case 1:
3312 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3313 break;
3314 case 2:
3315 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3316 break;
3317 case 4:
3318 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3319 break;
3320 default:
3321 abort ();
3322 }
3323 }
3324
3325 /* Try to infer the code or data type from a symbol.
3326 Returns nonzero if *MAP_TYPE was set. */
3327
3328 static int
3329 get_sym_code_type (struct disassemble_info *info, int n,
3330 enum map_type *map_type)
3331 {
3332 asymbol * as;
3333 elf_symbol_type *es;
3334 unsigned int type;
3335 const char *name;
3336
3337 /* If the symbol is in a different section, ignore it. */
3338 if (info->section != NULL && info->section != info->symtab[n]->section)
3339 return FALSE;
3340
3341 if (n >= info->symtab_size)
3342 return FALSE;
3343
3344 as = info->symtab[n];
3345 if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
3346 return FALSE;
3347 es = (elf_symbol_type *) as;
3348
3349 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3350
3351 /* If the symbol has function type then use that. */
3352 if (type == STT_FUNC)
3353 {
3354 *map_type = MAP_INSN;
3355 return TRUE;
3356 }
3357
3358 /* Check for mapping symbols. */
3359 name = bfd_asymbol_name(info->symtab[n]);
3360 if (name[0] == '$'
3361 && (name[1] == 'x' || name[1] == 'd')
3362 && (name[2] == '\0' || name[2] == '.'))
3363 {
3364 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3365 return TRUE;
3366 }
3367
3368 return FALSE;
3369 }
3370
3371 /* Set the feature bits in arch_variant in order to get the correct disassembly
3372 for the chosen architecture variant.
3373
3374 Currently we only restrict disassembly for Armv8-R and otherwise enable all
3375 non-R-profile features. */
3376 static void
3377 select_aarch64_variant (unsigned mach)
3378 {
3379 switch (mach)
3380 {
3381 case bfd_mach_aarch64_8R:
3382 arch_variant = AARCH64_ARCH_V8_R;
3383 break;
3384 default:
3385 arch_variant = AARCH64_ANY & ~(AARCH64_FEATURE_V8_R);
3386 }
3387 }
3388
3389 /* Entry-point of the AArch64 disassembler. */
3390
3391 int
3392 print_insn_aarch64 (bfd_vma pc,
3393 struct disassemble_info *info)
3394 {
3395 bfd_byte buffer[INSNLEN];
3396 int status;
3397 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3398 aarch64_operand_error *);
3399 bfd_boolean found = FALSE;
3400 unsigned int size = 4;
3401 unsigned long data;
3402 aarch64_operand_error errors;
3403 static bfd_boolean set_features;
3404
3405 if (info->disassembler_options)
3406 {
3407 set_default_aarch64_dis_options (info);
3408
3409 parse_aarch64_dis_options (info->disassembler_options);
3410
3411 /* To avoid repeated parsing of these options, we remove them here. */
3412 info->disassembler_options = NULL;
3413 }
3414
3415 if (!set_features)
3416 {
3417 select_aarch64_variant (info->mach);
3418 set_features = TRUE;
3419 }
3420
3421 /* Aarch64 instructions are always little-endian */
3422 info->endian_code = BFD_ENDIAN_LITTLE;
3423
3424 /* Default to DATA. A text section is required by the ABI to contain an
3425 INSN mapping symbol at the start. A data section has no such
3426 requirement, hence if no mapping symbol is found the section must
3427 contain only data. This however isn't very useful if the user has
3428 fully stripped the binaries. If this is the case use the section
3429 attributes to determine the default. If we have no section default to
3430 INSN as well, as we may be disassembling some raw bytes on a baremetal
3431 HEX file or similar. */
3432 enum map_type type = MAP_DATA;
3433 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3434 type = MAP_INSN;
3435
3436 /* First check the full symtab for a mapping symbol, even if there
3437 are no usable non-mapping symbols for this address. */
3438 if (info->symtab_size != 0
3439 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3440 {
3441 int last_sym = -1;
3442 bfd_vma addr, section_vma = 0;
3443 bfd_boolean can_use_search_opt_p;
3444 int n;
3445
3446 if (pc <= last_mapping_addr)
3447 last_mapping_sym = -1;
3448
3449 /* Start scanning at the start of the function, or wherever
3450 we finished last time. */
3451 n = info->symtab_pos + 1;
3452
3453 /* If the last stop offset is different from the current one it means we
3454 are disassembling a different glob of bytes. As such the optimization
3455 would not be safe and we should start over. */
3456 can_use_search_opt_p = last_mapping_sym >= 0
3457 && info->stop_offset == last_stop_offset;
3458
3459 if (n >= last_mapping_sym && can_use_search_opt_p)
3460 n = last_mapping_sym;
3461
3462 /* Look down while we haven't passed the location being disassembled.
3463 The reason for this is that there's no defined order between a symbol
3464 and an mapping symbol that may be at the same address. We may have to
3465 look at least one position ahead. */
3466 for (; n < info->symtab_size; n++)
3467 {
3468 addr = bfd_asymbol_value (info->symtab[n]);
3469 if (addr > pc)
3470 break;
3471 if (get_sym_code_type (info, n, &type))
3472 {
3473 last_sym = n;
3474 found = TRUE;
3475 }
3476 }
3477
3478 if (!found)
3479 {
3480 n = info->symtab_pos;
3481 if (n >= last_mapping_sym && can_use_search_opt_p)
3482 n = last_mapping_sym;
3483
3484 /* No mapping symbol found at this address. Look backwards
3485 for a preceeding one, but don't go pass the section start
3486 otherwise a data section with no mapping symbol can pick up
3487 a text mapping symbol of a preceeding section. The documentation
3488 says section can be NULL, in which case we will seek up all the
3489 way to the top. */
3490 if (info->section)
3491 section_vma = info->section->vma;
3492
3493 for (; n >= 0; n--)
3494 {
3495 addr = bfd_asymbol_value (info->symtab[n]);
3496 if (addr < section_vma)
3497 break;
3498
3499 if (get_sym_code_type (info, n, &type))
3500 {
3501 last_sym = n;
3502 found = TRUE;
3503 break;
3504 }
3505 }
3506 }
3507
3508 last_mapping_sym = last_sym;
3509 last_type = type;
3510 last_stop_offset = info->stop_offset;
3511
3512 /* Look a little bit ahead to see if we should print out
3513 less than four bytes of data. If there's a symbol,
3514 mapping or otherwise, after two bytes then don't
3515 print more. */
3516 if (last_type == MAP_DATA)
3517 {
3518 size = 4 - (pc & 3);
3519 for (n = last_sym + 1; n < info->symtab_size; n++)
3520 {
3521 addr = bfd_asymbol_value (info->symtab[n]);
3522 if (addr > pc)
3523 {
3524 if (addr - pc < size)
3525 size = addr - pc;
3526 break;
3527 }
3528 }
3529 /* If the next symbol is after three bytes, we need to
3530 print only part of the data, so that we can use either
3531 .byte or .short. */
3532 if (size == 3)
3533 size = (pc & 1) ? 1 : 2;
3534 }
3535 }
3536 else
3537 last_type = type;
3538
3539 /* PR 10263: Disassemble data if requested to do so by the user. */
3540 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3541 {
3542 /* size was set above. */
3543 info->bytes_per_chunk = size;
3544 info->display_endian = info->endian;
3545 printer = print_insn_data;
3546 }
3547 else
3548 {
3549 info->bytes_per_chunk = size = INSNLEN;
3550 info->display_endian = info->endian_code;
3551 printer = print_insn_aarch64_word;
3552 }
3553
3554 status = (*info->read_memory_func) (pc, buffer, size, info);
3555 if (status != 0)
3556 {
3557 (*info->memory_error_func) (status, pc, info);
3558 return -1;
3559 }
3560
3561 data = bfd_get_bits (buffer, size * 8,
3562 info->display_endian == BFD_ENDIAN_BIG);
3563
3564 (*printer) (pc, data, info, &errors);
3565
3566 return size;
3567 }
3568 \f
3569 void
3570 print_aarch64_disassembler_options (FILE *stream)
3571 {
3572 fprintf (stream, _("\n\
3573 The following AARCH64 specific disassembler options are supported for use\n\
3574 with the -M switch (multiple options should be separated by commas):\n"));
3575
3576 fprintf (stream, _("\n\
3577 no-aliases Don't print instruction aliases.\n"));
3578
3579 fprintf (stream, _("\n\
3580 aliases Do print instruction aliases.\n"));
3581
3582 fprintf (stream, _("\n\
3583 no-notes Don't print instruction notes.\n"));
3584
3585 fprintf (stream, _("\n\
3586 notes Do print instruction notes.\n"));
3587
3588 #ifdef DEBUG_AARCH64
3589 fprintf (stream, _("\n\
3590 debug_dump Temp switch for debug trace.\n"));
3591 #endif /* DEBUG_AARCH64 */
3592
3593 fprintf (stream, _("\n"));
3594 }