]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
Fix latent bug in custom word point completion handling
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_stop_offset = 0;
41 static bfd_vma last_mapping_addr = 0;
42
43 /* Other options */
44 static int no_aliases = 0; /* If set disassemble as most general inst. */
45 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
46 output as comments. */
47
48 /* Currently active instruction sequence. */
49 static aarch64_instr_sequence insn_sequence;
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 if (CONST_STRNEQ (option, "no-notes"))
73 {
74 no_notes = 1;
75 return;
76 }
77
78 if (CONST_STRNEQ (option, "notes"))
79 {
80 no_notes = 0;
81 return;
82 }
83
84 #ifdef DEBUG_AARCH64
85 if (CONST_STRNEQ (option, "debug_dump"))
86 {
87 debug_dump = 1;
88 return;
89 }
90 #endif /* DEBUG_AARCH64 */
91
92 /* Invalid option. */
93 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
94 }
95
96 static void
97 parse_aarch64_dis_options (const char *options)
98 {
99 const char *option_end;
100
101 if (options == NULL)
102 return;
103
104 while (*options != '\0')
105 {
106 /* Skip empty options. */
107 if (*options == ',')
108 {
109 options++;
110 continue;
111 }
112
113 /* We know that *options is neither NUL or a comma. */
114 option_end = options + 1;
115 while (*option_end != ',' && *option_end != '\0')
116 option_end++;
117
118 parse_aarch64_dis_option (options, option_end - options);
119
120 /* Go on to the next one. If option_end points to a comma, it
121 will be skipped above. */
122 options = option_end;
123 }
124 }
125 \f
126 /* Functions doing the instruction disassembling. */
127
128 /* The unnamed arguments consist of the number of fields and information about
129 these fields where the VALUE will be extracted from CODE and returned.
130 MASK can be zero or the base mask of the opcode.
131
132 N.B. the fields are required to be in such an order than the most signficant
133 field for VALUE comes the first, e.g. the <index> in
134 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
135 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
136 the order of H, L, M. */
137
138 aarch64_insn
139 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
140 {
141 uint32_t num;
142 const aarch64_field *field;
143 enum aarch64_field_kind kind;
144 va_list va;
145
146 va_start (va, mask);
147 num = va_arg (va, uint32_t);
148 assert (num <= 5);
149 aarch64_insn value = 0x0;
150 while (num--)
151 {
152 kind = va_arg (va, enum aarch64_field_kind);
153 field = &fields[kind];
154 value <<= field->width;
155 value |= extract_field (kind, code, mask);
156 }
157 return value;
158 }
159
160 /* Extract the value of all fields in SELF->fields from instruction CODE.
161 The least significant bit comes from the final field. */
162
163 static aarch64_insn
164 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
165 {
166 aarch64_insn value;
167 unsigned int i;
168 enum aarch64_field_kind kind;
169
170 value = 0;
171 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
172 {
173 kind = self->fields[i];
174 value <<= fields[kind].width;
175 value |= extract_field (kind, code, 0);
176 }
177 return value;
178 }
179
180 /* Sign-extend bit I of VALUE. */
181 static inline int32_t
182 sign_extend (aarch64_insn value, unsigned i)
183 {
184 uint32_t ret = value;
185
186 assert (i < 32);
187 if ((value >> i) & 0x1)
188 {
189 uint32_t val = (uint32_t)(-1) << i;
190 ret = ret | val;
191 }
192 return (int32_t) ret;
193 }
194
195 /* N.B. the following inline helpfer functions create a dependency on the
196 order of operand qualifier enumerators. */
197
198 /* Given VALUE, return qualifier for a general purpose register. */
199 static inline enum aarch64_opnd_qualifier
200 get_greg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
203 assert (value <= 0x1
204 && aarch64_get_qualifier_standard_value (qualifier) == value);
205 return qualifier;
206 }
207
208 /* Given VALUE, return qualifier for a vector register. This does not support
209 decoding instructions that accept the 2H vector type. */
210
211 static inline enum aarch64_opnd_qualifier
212 get_vreg_qualifier_from_value (aarch64_insn value)
213 {
214 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
215
216 /* Instructions using vector type 2H should not call this function. Skip over
217 the 2H qualifier. */
218 if (qualifier >= AARCH64_OPND_QLF_V_2H)
219 qualifier += 1;
220
221 assert (value <= 0x8
222 && aarch64_get_qualifier_standard_value (qualifier) == value);
223 return qualifier;
224 }
225
226 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
227 static inline enum aarch64_opnd_qualifier
228 get_sreg_qualifier_from_value (aarch64_insn value)
229 {
230 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
231
232 assert (value <= 0x4
233 && aarch64_get_qualifier_standard_value (qualifier) == value);
234 return qualifier;
235 }
236
237 /* Given the instruction in *INST which is probably half way through the
238 decoding and our caller wants to know the expected qualifier for operand
239 I. Return such a qualifier if we can establish it; otherwise return
240 AARCH64_OPND_QLF_NIL. */
241
242 static aarch64_opnd_qualifier_t
243 get_expected_qualifier (const aarch64_inst *inst, int i)
244 {
245 aarch64_opnd_qualifier_seq_t qualifiers;
246 /* Should not be called if the qualifier is known. */
247 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
248 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
249 i, qualifiers))
250 return qualifiers[i];
251 else
252 return AARCH64_OPND_QLF_NIL;
253 }
254
255 /* Operand extractors. */
256
257 bfd_boolean
258 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
259 const aarch64_insn code,
260 const aarch64_inst *inst ATTRIBUTE_UNUSED,
261 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
262 {
263 info->reg.regno = extract_field (self->fields[0], code, 0);
264 return TRUE;
265 }
266
267 bfd_boolean
268 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
269 const aarch64_insn code ATTRIBUTE_UNUSED,
270 const aarch64_inst *inst ATTRIBUTE_UNUSED,
271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
272 {
273 assert (info->idx == 1
274 || info->idx ==3);
275 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
276 return TRUE;
277 }
278
279 /* e.g. IC <ic_op>{, <Xt>}. */
280 bfd_boolean
281 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
282 const aarch64_insn code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
285 {
286 info->reg.regno = extract_field (self->fields[0], code, 0);
287 assert (info->idx == 1
288 && (aarch64_get_operand_class (inst->operands[0].type)
289 == AARCH64_OPND_CLASS_SYSTEM));
290 /* This will make the constraint checking happy and more importantly will
291 help the disassembler determine whether this operand is optional or
292 not. */
293 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
294
295 return TRUE;
296 }
297
298 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
299 bfd_boolean
300 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
301 const aarch64_insn code,
302 const aarch64_inst *inst ATTRIBUTE_UNUSED,
303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
304 {
305 /* regno */
306 info->reglane.regno = extract_field (self->fields[0], code,
307 inst->opcode->mask);
308
309 /* Index and/or type. */
310 if (inst->opcode->iclass == asisdone
311 || inst->opcode->iclass == asimdins)
312 {
313 if (info->type == AARCH64_OPND_En
314 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
315 {
316 unsigned shift;
317 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
318 assert (info->idx == 1); /* Vn */
319 aarch64_insn value = extract_field (FLD_imm4, code, 0);
320 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
321 info->qualifier = get_expected_qualifier (inst, info->idx);
322 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
323 info->reglane.index = value >> shift;
324 }
325 else
326 {
327 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
328 imm5<3:0> <V>
329 0000 RESERVED
330 xxx1 B
331 xx10 H
332 x100 S
333 1000 D */
334 int pos = -1;
335 aarch64_insn value = extract_field (FLD_imm5, code, 0);
336 while (++pos <= 3 && (value & 0x1) == 0)
337 value >>= 1;
338 if (pos > 3)
339 return FALSE;
340 info->qualifier = get_sreg_qualifier_from_value (pos);
341 info->reglane.index = (unsigned) (value >> 1);
342 }
343 }
344 else if (inst->opcode->iclass == dotproduct)
345 {
346 /* Need information in other operand(s) to help decoding. */
347 info->qualifier = get_expected_qualifier (inst, info->idx);
348 switch (info->qualifier)
349 {
350 case AARCH64_OPND_QLF_S_4B:
351 /* L:H */
352 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
353 info->reglane.regno &= 0x1f;
354 break;
355 default:
356 return FALSE;
357 }
358 }
359 else if (inst->opcode->iclass == cryptosm3)
360 {
361 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
362 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
363 }
364 else
365 {
366 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
367 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
368
369 /* Need information in other operand(s) to help decoding. */
370 info->qualifier = get_expected_qualifier (inst, info->idx);
371 switch (info->qualifier)
372 {
373 case AARCH64_OPND_QLF_S_H:
374 if (info->type == AARCH64_OPND_Em16)
375 {
376 /* h:l:m */
377 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
378 FLD_M);
379 info->reglane.regno &= 0xf;
380 }
381 else
382 {
383 /* h:l */
384 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
385 }
386 break;
387 case AARCH64_OPND_QLF_S_S:
388 /* h:l */
389 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
390 break;
391 case AARCH64_OPND_QLF_S_D:
392 /* H */
393 info->reglane.index = extract_field (FLD_H, code, 0);
394 break;
395 default:
396 return FALSE;
397 }
398
399 if (inst->opcode->op == OP_FCMLA_ELEM
400 && info->qualifier != AARCH64_OPND_QLF_S_H)
401 {
402 /* Complex operand takes two elements. */
403 if (info->reglane.index & 1)
404 return FALSE;
405 info->reglane.index /= 2;
406 }
407 }
408
409 return TRUE;
410 }
411
412 bfd_boolean
413 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
414 const aarch64_insn code,
415 const aarch64_inst *inst ATTRIBUTE_UNUSED,
416 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
417 {
418 /* R */
419 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
420 /* len */
421 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
422 return TRUE;
423 }
424
425 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
426 bfd_boolean
427 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
428 aarch64_opnd_info *info, const aarch64_insn code,
429 const aarch64_inst *inst,
430 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
431 {
432 aarch64_insn value;
433 /* Number of elements in each structure to be loaded/stored. */
434 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
435
436 struct
437 {
438 unsigned is_reserved;
439 unsigned num_regs;
440 unsigned num_elements;
441 } data [] =
442 { {0, 4, 4},
443 {1, 4, 4},
444 {0, 4, 1},
445 {0, 4, 2},
446 {0, 3, 3},
447 {1, 3, 3},
448 {0, 3, 1},
449 {0, 1, 1},
450 {0, 2, 2},
451 {1, 2, 2},
452 {0, 2, 1},
453 };
454
455 /* Rt */
456 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
457 /* opcode */
458 value = extract_field (FLD_opcode, code, 0);
459 /* PR 21595: Check for a bogus value. */
460 if (value >= ARRAY_SIZE (data))
461 return FALSE;
462 if (expected_num != data[value].num_elements || data[value].is_reserved)
463 return FALSE;
464 info->reglist.num_regs = data[value].num_regs;
465
466 return TRUE;
467 }
468
469 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
470 lanes instructions. */
471 bfd_boolean
472 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
473 aarch64_opnd_info *info, const aarch64_insn code,
474 const aarch64_inst *inst,
475 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
476 {
477 aarch64_insn value;
478
479 /* Rt */
480 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
481 /* S */
482 value = extract_field (FLD_S, code, 0);
483
484 /* Number of registers is equal to the number of elements in
485 each structure to be loaded/stored. */
486 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
487 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
488
489 /* Except when it is LD1R. */
490 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
491 info->reglist.num_regs = 2;
492
493 return TRUE;
494 }
495
496 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
497 load/store single element instructions. */
498 bfd_boolean
499 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
500 aarch64_opnd_info *info, const aarch64_insn code,
501 const aarch64_inst *inst ATTRIBUTE_UNUSED,
502 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
503 {
504 aarch64_field field = {0, 0};
505 aarch64_insn QSsize; /* fields Q:S:size. */
506 aarch64_insn opcodeh2; /* opcode<2:1> */
507
508 /* Rt */
509 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
510
511 /* Decode the index, opcode<2:1> and size. */
512 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
513 opcodeh2 = extract_field_2 (&field, code, 0);
514 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
515 switch (opcodeh2)
516 {
517 case 0x0:
518 info->qualifier = AARCH64_OPND_QLF_S_B;
519 /* Index encoded in "Q:S:size". */
520 info->reglist.index = QSsize;
521 break;
522 case 0x1:
523 if (QSsize & 0x1)
524 /* UND. */
525 return FALSE;
526 info->qualifier = AARCH64_OPND_QLF_S_H;
527 /* Index encoded in "Q:S:size<1>". */
528 info->reglist.index = QSsize >> 1;
529 break;
530 case 0x2:
531 if ((QSsize >> 1) & 0x1)
532 /* UND. */
533 return FALSE;
534 if ((QSsize & 0x1) == 0)
535 {
536 info->qualifier = AARCH64_OPND_QLF_S_S;
537 /* Index encoded in "Q:S". */
538 info->reglist.index = QSsize >> 2;
539 }
540 else
541 {
542 if (extract_field (FLD_S, code, 0))
543 /* UND */
544 return FALSE;
545 info->qualifier = AARCH64_OPND_QLF_S_D;
546 /* Index encoded in "Q". */
547 info->reglist.index = QSsize >> 3;
548 }
549 break;
550 default:
551 return FALSE;
552 }
553
554 info->reglist.has_index = 1;
555 info->reglist.num_regs = 0;
556 /* Number of registers is equal to the number of elements in
557 each structure to be loaded/stored. */
558 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
559 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
560
561 return TRUE;
562 }
563
564 /* Decode fields immh:immb and/or Q for e.g.
565 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
566 or SSHR <V><d>, <V><n>, #<shift>. */
567
568 bfd_boolean
569 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
570 aarch64_opnd_info *info, const aarch64_insn code,
571 const aarch64_inst *inst,
572 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
573 {
574 int pos;
575 aarch64_insn Q, imm, immh;
576 enum aarch64_insn_class iclass = inst->opcode->iclass;
577
578 immh = extract_field (FLD_immh, code, 0);
579 if (immh == 0)
580 return FALSE;
581 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
582 pos = 4;
583 /* Get highest set bit in immh. */
584 while (--pos >= 0 && (immh & 0x8) == 0)
585 immh <<= 1;
586
587 assert ((iclass == asimdshf || iclass == asisdshf)
588 && (info->type == AARCH64_OPND_IMM_VLSR
589 || info->type == AARCH64_OPND_IMM_VLSL));
590
591 if (iclass == asimdshf)
592 {
593 Q = extract_field (FLD_Q, code, 0);
594 /* immh Q <T>
595 0000 x SEE AdvSIMD modified immediate
596 0001 0 8B
597 0001 1 16B
598 001x 0 4H
599 001x 1 8H
600 01xx 0 2S
601 01xx 1 4S
602 1xxx 0 RESERVED
603 1xxx 1 2D */
604 info->qualifier =
605 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
606 }
607 else
608 info->qualifier = get_sreg_qualifier_from_value (pos);
609
610 if (info->type == AARCH64_OPND_IMM_VLSR)
611 /* immh <shift>
612 0000 SEE AdvSIMD modified immediate
613 0001 (16-UInt(immh:immb))
614 001x (32-UInt(immh:immb))
615 01xx (64-UInt(immh:immb))
616 1xxx (128-UInt(immh:immb)) */
617 info->imm.value = (16 << pos) - imm;
618 else
619 /* immh:immb
620 immh <shift>
621 0000 SEE AdvSIMD modified immediate
622 0001 (UInt(immh:immb)-8)
623 001x (UInt(immh:immb)-16)
624 01xx (UInt(immh:immb)-32)
625 1xxx (UInt(immh:immb)-64) */
626 info->imm.value = imm - (8 << pos);
627
628 return TRUE;
629 }
630
631 /* Decode shift immediate for e.g. sshr (imm). */
632 bfd_boolean
633 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
634 aarch64_opnd_info *info, const aarch64_insn code,
635 const aarch64_inst *inst ATTRIBUTE_UNUSED,
636 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
637 {
638 int64_t imm;
639 aarch64_insn val;
640 val = extract_field (FLD_size, code, 0);
641 switch (val)
642 {
643 case 0: imm = 8; break;
644 case 1: imm = 16; break;
645 case 2: imm = 32; break;
646 default: return FALSE;
647 }
648 info->imm.value = imm;
649 return TRUE;
650 }
651
652 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
653 value in the field(s) will be extracted as unsigned immediate value. */
654 bfd_boolean
655 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
656 const aarch64_insn code,
657 const aarch64_inst *inst ATTRIBUTE_UNUSED,
658 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
659 {
660 int64_t imm;
661
662 imm = extract_all_fields (self, code);
663
664 if (operand_need_sign_extension (self))
665 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
666
667 if (operand_need_shift_by_two (self))
668 imm <<= 2;
669 else if (operand_need_shift_by_four (self))
670 imm <<= 4;
671
672 if (info->type == AARCH64_OPND_ADDR_ADRP)
673 imm <<= 12;
674
675 info->imm.value = imm;
676 return TRUE;
677 }
678
679 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
680 bfd_boolean
681 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
682 const aarch64_insn code,
683 const aarch64_inst *inst ATTRIBUTE_UNUSED,
684 aarch64_operand_error *errors)
685 {
686 aarch64_ext_imm (self, info, code, inst, errors);
687 info->shifter.kind = AARCH64_MOD_LSL;
688 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
689 return TRUE;
690 }
691
692 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
693 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
694 bfd_boolean
695 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
696 aarch64_opnd_info *info,
697 const aarch64_insn code,
698 const aarch64_inst *inst ATTRIBUTE_UNUSED,
699 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
700 {
701 uint64_t imm;
702 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
703 aarch64_field field = {0, 0};
704
705 assert (info->idx == 1);
706
707 if (info->type == AARCH64_OPND_SIMD_FPIMM)
708 info->imm.is_fp = 1;
709
710 /* a:b:c:d:e:f:g:h */
711 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
712 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
713 {
714 /* Either MOVI <Dd>, #<imm>
715 or MOVI <Vd>.2D, #<imm>.
716 <imm> is a 64-bit immediate
717 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
718 encoded in "a:b:c:d:e:f:g:h". */
719 int i;
720 unsigned abcdefgh = imm;
721 for (imm = 0ull, i = 0; i < 8; i++)
722 if (((abcdefgh >> i) & 0x1) != 0)
723 imm |= 0xffull << (8 * i);
724 }
725 info->imm.value = imm;
726
727 /* cmode */
728 info->qualifier = get_expected_qualifier (inst, info->idx);
729 switch (info->qualifier)
730 {
731 case AARCH64_OPND_QLF_NIL:
732 /* no shift */
733 info->shifter.kind = AARCH64_MOD_NONE;
734 return 1;
735 case AARCH64_OPND_QLF_LSL:
736 /* shift zeros */
737 info->shifter.kind = AARCH64_MOD_LSL;
738 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
739 {
740 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
741 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
742 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
743 default: assert (0); return FALSE;
744 }
745 /* 00: 0; 01: 8; 10:16; 11:24. */
746 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
747 break;
748 case AARCH64_OPND_QLF_MSL:
749 /* shift ones */
750 info->shifter.kind = AARCH64_MOD_MSL;
751 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
752 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
753 break;
754 default:
755 assert (0);
756 return FALSE;
757 }
758
759 return TRUE;
760 }
761
762 /* Decode an 8-bit floating-point immediate. */
763 bfd_boolean
764 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
765 const aarch64_insn code,
766 const aarch64_inst *inst ATTRIBUTE_UNUSED,
767 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
768 {
769 info->imm.value = extract_all_fields (self, code);
770 info->imm.is_fp = 1;
771 return TRUE;
772 }
773
774 /* Decode a 1-bit rotate immediate (#90 or #270). */
775 bfd_boolean
776 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
777 const aarch64_insn code,
778 const aarch64_inst *inst ATTRIBUTE_UNUSED,
779 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
780 {
781 uint64_t rot = extract_field (self->fields[0], code, 0);
782 assert (rot < 2U);
783 info->imm.value = rot * 180 + 90;
784 return TRUE;
785 }
786
787 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
788 bfd_boolean
789 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
790 const aarch64_insn code,
791 const aarch64_inst *inst ATTRIBUTE_UNUSED,
792 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
793 {
794 uint64_t rot = extract_field (self->fields[0], code, 0);
795 assert (rot < 4U);
796 info->imm.value = rot * 90;
797 return TRUE;
798 }
799
800 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
801 bfd_boolean
802 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
803 aarch64_opnd_info *info, const aarch64_insn code,
804 const aarch64_inst *inst ATTRIBUTE_UNUSED,
805 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
806 {
807 info->imm.value = 64- extract_field (FLD_scale, code, 0);
808 return TRUE;
809 }
810
811 /* Decode arithmetic immediate for e.g.
812 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
813 bfd_boolean
814 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
815 aarch64_opnd_info *info, const aarch64_insn code,
816 const aarch64_inst *inst ATTRIBUTE_UNUSED,
817 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
818 {
819 aarch64_insn value;
820
821 info->shifter.kind = AARCH64_MOD_LSL;
822 /* shift */
823 value = extract_field (FLD_shift, code, 0);
824 if (value >= 2)
825 return FALSE;
826 info->shifter.amount = value ? 12 : 0;
827 /* imm12 (unsigned) */
828 info->imm.value = extract_field (FLD_imm12, code, 0);
829
830 return TRUE;
831 }
832
833 /* Return true if VALUE is a valid logical immediate encoding, storing the
834 decoded value in *RESULT if so. ESIZE is the number of bytes in the
835 decoded immediate. */
836 static bfd_boolean
837 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
838 {
839 uint64_t imm, mask;
840 uint32_t N, R, S;
841 unsigned simd_size;
842
843 /* value is N:immr:imms. */
844 S = value & 0x3f;
845 R = (value >> 6) & 0x3f;
846 N = (value >> 12) & 0x1;
847
848 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
849 (in other words, right rotated by R), then replicated. */
850 if (N != 0)
851 {
852 simd_size = 64;
853 mask = 0xffffffffffffffffull;
854 }
855 else
856 {
857 switch (S)
858 {
859 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
860 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
861 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
862 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
863 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
864 default: return FALSE;
865 }
866 mask = (1ull << simd_size) - 1;
867 /* Top bits are IGNORED. */
868 R &= simd_size - 1;
869 }
870
871 if (simd_size > esize * 8)
872 return FALSE;
873
874 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
875 if (S == simd_size - 1)
876 return FALSE;
877 /* S+1 consecutive bits to 1. */
878 /* NOTE: S can't be 63 due to detection above. */
879 imm = (1ull << (S + 1)) - 1;
880 /* Rotate to the left by simd_size - R. */
881 if (R != 0)
882 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
883 /* Replicate the value according to SIMD size. */
884 switch (simd_size)
885 {
886 case 2: imm = (imm << 2) | imm;
887 /* Fall through. */
888 case 4: imm = (imm << 4) | imm;
889 /* Fall through. */
890 case 8: imm = (imm << 8) | imm;
891 /* Fall through. */
892 case 16: imm = (imm << 16) | imm;
893 /* Fall through. */
894 case 32: imm = (imm << 32) | imm;
895 /* Fall through. */
896 case 64: break;
897 default: assert (0); return 0;
898 }
899
900 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
901
902 return TRUE;
903 }
904
905 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
906 bfd_boolean
907 aarch64_ext_limm (const aarch64_operand *self,
908 aarch64_opnd_info *info, const aarch64_insn code,
909 const aarch64_inst *inst,
910 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
911 {
912 uint32_t esize;
913 aarch64_insn value;
914
915 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
916 self->fields[2]);
917 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
918 return decode_limm (esize, value, &info->imm.value);
919 }
920
921 /* Decode a logical immediate for the BIC alias of AND (etc.). */
922 bfd_boolean
923 aarch64_ext_inv_limm (const aarch64_operand *self,
924 aarch64_opnd_info *info, const aarch64_insn code,
925 const aarch64_inst *inst,
926 aarch64_operand_error *errors)
927 {
928 if (!aarch64_ext_limm (self, info, code, inst, errors))
929 return FALSE;
930 info->imm.value = ~info->imm.value;
931 return TRUE;
932 }
933
934 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
935 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
936 bfd_boolean
937 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
938 aarch64_opnd_info *info,
939 const aarch64_insn code, const aarch64_inst *inst,
940 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
941 {
942 aarch64_insn value;
943
944 /* Rt */
945 info->reg.regno = extract_field (FLD_Rt, code, 0);
946
947 /* size */
948 value = extract_field (FLD_ldst_size, code, 0);
949 if (inst->opcode->iclass == ldstpair_indexed
950 || inst->opcode->iclass == ldstnapair_offs
951 || inst->opcode->iclass == ldstpair_off
952 || inst->opcode->iclass == loadlit)
953 {
954 enum aarch64_opnd_qualifier qualifier;
955 switch (value)
956 {
957 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
958 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
959 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
960 default: return FALSE;
961 }
962 info->qualifier = qualifier;
963 }
964 else
965 {
966 /* opc1:size */
967 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
968 if (value > 0x4)
969 return FALSE;
970 info->qualifier = get_sreg_qualifier_from_value (value);
971 }
972
973 return TRUE;
974 }
975
976 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
977 bfd_boolean
978 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
979 aarch64_opnd_info *info,
980 aarch64_insn code,
981 const aarch64_inst *inst ATTRIBUTE_UNUSED,
982 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
983 {
984 /* Rn */
985 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
986 return TRUE;
987 }
988
989 /* Decode the address operand for e.g.
990 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
991 bfd_boolean
992 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
993 aarch64_opnd_info *info,
994 aarch64_insn code, const aarch64_inst *inst,
995 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
996 {
997 info->qualifier = get_expected_qualifier (inst, info->idx);
998
999 /* Rn */
1000 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1001
1002 /* simm9 */
1003 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1004 info->addr.offset.imm = sign_extend (imm, 8);
1005 if (extract_field (self->fields[2], code, 0) == 1) {
1006 info->addr.writeback = 1;
1007 info->addr.preind = 1;
1008 }
1009 return TRUE;
1010 }
1011
1012 /* Decode the address operand for e.g.
1013 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1014 bfd_boolean
1015 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1016 aarch64_opnd_info *info,
1017 aarch64_insn code, const aarch64_inst *inst,
1018 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1019 {
1020 aarch64_insn S, value;
1021
1022 /* Rn */
1023 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1024 /* Rm */
1025 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1026 /* option */
1027 value = extract_field (FLD_option, code, 0);
1028 info->shifter.kind =
1029 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1030 /* Fix-up the shifter kind; although the table-driven approach is
1031 efficient, it is slightly inflexible, thus needing this fix-up. */
1032 if (info->shifter.kind == AARCH64_MOD_UXTX)
1033 info->shifter.kind = AARCH64_MOD_LSL;
1034 /* S */
1035 S = extract_field (FLD_S, code, 0);
1036 if (S == 0)
1037 {
1038 info->shifter.amount = 0;
1039 info->shifter.amount_present = 0;
1040 }
1041 else
1042 {
1043 int size;
1044 /* Need information in other operand(s) to help achieve the decoding
1045 from 'S' field. */
1046 info->qualifier = get_expected_qualifier (inst, info->idx);
1047 /* Get the size of the data element that is accessed, which may be
1048 different from that of the source register size, e.g. in strb/ldrb. */
1049 size = aarch64_get_qualifier_esize (info->qualifier);
1050 info->shifter.amount = get_logsz (size);
1051 info->shifter.amount_present = 1;
1052 }
1053
1054 return TRUE;
1055 }
1056
1057 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1058 bfd_boolean
1059 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1060 aarch64_insn code, const aarch64_inst *inst,
1061 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1062 {
1063 aarch64_insn imm;
1064 info->qualifier = get_expected_qualifier (inst, info->idx);
1065
1066 /* Rn */
1067 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1068 /* simm (imm9 or imm7) */
1069 imm = extract_field (self->fields[0], code, 0);
1070 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1071 if (self->fields[0] == FLD_imm7
1072 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1073 /* scaled immediate in ld/st pair instructions. */
1074 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1075 /* qualifier */
1076 if (inst->opcode->iclass == ldst_unscaled
1077 || inst->opcode->iclass == ldstnapair_offs
1078 || inst->opcode->iclass == ldstpair_off
1079 || inst->opcode->iclass == ldst_unpriv)
1080 info->addr.writeback = 0;
1081 else
1082 {
1083 /* pre/post- index */
1084 info->addr.writeback = 1;
1085 if (extract_field (self->fields[1], code, 0) == 1)
1086 info->addr.preind = 1;
1087 else
1088 info->addr.postind = 1;
1089 }
1090
1091 return TRUE;
1092 }
1093
1094 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1095 bfd_boolean
1096 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1097 aarch64_insn code,
1098 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1099 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1100 {
1101 int shift;
1102 info->qualifier = get_expected_qualifier (inst, info->idx);
1103 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1104 /* Rn */
1105 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1106 /* uimm12 */
1107 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1108 return TRUE;
1109 }
1110
1111 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1112 bfd_boolean
1113 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1114 aarch64_insn code,
1115 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1116 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1117 {
1118 aarch64_insn imm;
1119
1120 info->qualifier = get_expected_qualifier (inst, info->idx);
1121 /* Rn */
1122 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1123 /* simm10 */
1124 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1125 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1126 if (extract_field (self->fields[3], code, 0) == 1) {
1127 info->addr.writeback = 1;
1128 info->addr.preind = 1;
1129 }
1130 return TRUE;
1131 }
1132
1133 /* Decode the address operand for e.g.
1134 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1135 bfd_boolean
1136 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1137 aarch64_opnd_info *info,
1138 aarch64_insn code, const aarch64_inst *inst,
1139 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1140 {
1141 /* The opcode dependent area stores the number of elements in
1142 each structure to be loaded/stored. */
1143 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1144
1145 /* Rn */
1146 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1147 /* Rm | #<amount> */
1148 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1149 if (info->addr.offset.regno == 31)
1150 {
1151 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1152 /* Special handling of loading single structure to all lane. */
1153 info->addr.offset.imm = (is_ld1r ? 1
1154 : inst->operands[0].reglist.num_regs)
1155 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1156 else
1157 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1158 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1159 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1160 }
1161 else
1162 info->addr.offset.is_reg = 1;
1163 info->addr.writeback = 1;
1164
1165 return TRUE;
1166 }
1167
1168 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1169 bfd_boolean
1170 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1171 aarch64_opnd_info *info,
1172 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1173 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1174 {
1175 aarch64_insn value;
1176 /* cond */
1177 value = extract_field (FLD_cond, code, 0);
1178 info->cond = get_cond_from_value (value);
1179 return TRUE;
1180 }
1181
1182 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1183 bfd_boolean
1184 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1185 aarch64_opnd_info *info,
1186 aarch64_insn code,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1189 {
1190 /* op0:op1:CRn:CRm:op2 */
1191 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1192 FLD_CRm, FLD_op2);
1193 info->sysreg.flags = 0;
1194
1195 /* If a system instruction, check which restrictions should be on the register
1196 value during decoding, these will be enforced then. */
1197 if (inst->opcode->iclass == ic_system)
1198 {
1199 /* Check to see if it's read-only, else check if it's write only.
1200 if it's both or unspecified don't care. */
1201 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1202 info->sysreg.flags = F_REG_READ;
1203 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1204 == F_SYS_WRITE)
1205 info->sysreg.flags = F_REG_WRITE;
1206 }
1207
1208 return TRUE;
1209 }
1210
1211 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1212 bfd_boolean
1213 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1214 aarch64_opnd_info *info, aarch64_insn code,
1215 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1216 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1217 {
1218 int i;
1219 /* op1:op2 */
1220 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1221 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1222 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1223 return TRUE;
1224 /* Reserved value in <pstatefield>. */
1225 return FALSE;
1226 }
1227
1228 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1229 bfd_boolean
1230 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1231 aarch64_opnd_info *info,
1232 aarch64_insn code,
1233 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1234 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1235 {
1236 int i;
1237 aarch64_insn value;
1238 const aarch64_sys_ins_reg *sysins_ops;
1239 /* op0:op1:CRn:CRm:op2 */
1240 value = extract_fields (code, 0, 5,
1241 FLD_op0, FLD_op1, FLD_CRn,
1242 FLD_CRm, FLD_op2);
1243
1244 switch (info->type)
1245 {
1246 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1247 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1248 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1249 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1250 case AARCH64_OPND_SYSREG_SR:
1251 sysins_ops = aarch64_sys_regs_sr;
1252 /* Let's remove op2 for rctx. Refer to comments in the definition of
1253 aarch64_sys_regs_sr[]. */
1254 value = value & ~(0x7);
1255 break;
1256 default: assert (0); return FALSE;
1257 }
1258
1259 for (i = 0; sysins_ops[i].name != NULL; ++i)
1260 if (sysins_ops[i].value == value)
1261 {
1262 info->sysins_op = sysins_ops + i;
1263 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1264 info->sysins_op->name,
1265 (unsigned)info->sysins_op->value,
1266 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1267 return TRUE;
1268 }
1269
1270 return FALSE;
1271 }
1272
1273 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1274
1275 bfd_boolean
1276 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1277 aarch64_opnd_info *info,
1278 aarch64_insn code,
1279 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1280 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1281 {
1282 /* CRm */
1283 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1284 return TRUE;
1285 }
1286
1287 /* Decode the prefetch operation option operand for e.g.
1288 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1289
1290 bfd_boolean
1291 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1292 aarch64_opnd_info *info,
1293 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1294 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1295 {
1296 /* prfop in Rt */
1297 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1298 return TRUE;
1299 }
1300
1301 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1302 to the matching name/value pair in aarch64_hint_options. */
1303
1304 bfd_boolean
1305 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1306 aarch64_opnd_info *info,
1307 aarch64_insn code,
1308 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1309 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1310 {
1311 /* CRm:op2. */
1312 unsigned hint_number;
1313 int i;
1314
1315 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1316
1317 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1318 {
1319 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1320 {
1321 info->hint_option = &(aarch64_hint_options[i]);
1322 return TRUE;
1323 }
1324 }
1325
1326 return FALSE;
1327 }
1328
1329 /* Decode the extended register operand for e.g.
1330 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1331 bfd_boolean
1332 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1333 aarch64_opnd_info *info,
1334 aarch64_insn code,
1335 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1336 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1337 {
1338 aarch64_insn value;
1339
1340 /* Rm */
1341 info->reg.regno = extract_field (FLD_Rm, code, 0);
1342 /* option */
1343 value = extract_field (FLD_option, code, 0);
1344 info->shifter.kind =
1345 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1346 /* imm3 */
1347 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1348
1349 /* This makes the constraint checking happy. */
1350 info->shifter.operator_present = 1;
1351
1352 /* Assume inst->operands[0].qualifier has been resolved. */
1353 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1354 info->qualifier = AARCH64_OPND_QLF_W;
1355 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1356 && (info->shifter.kind == AARCH64_MOD_UXTX
1357 || info->shifter.kind == AARCH64_MOD_SXTX))
1358 info->qualifier = AARCH64_OPND_QLF_X;
1359
1360 return TRUE;
1361 }
1362
1363 /* Decode the shifted register operand for e.g.
1364 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1365 bfd_boolean
1366 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1367 aarch64_opnd_info *info,
1368 aarch64_insn code,
1369 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1370 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1371 {
1372 aarch64_insn value;
1373
1374 /* Rm */
1375 info->reg.regno = extract_field (FLD_Rm, code, 0);
1376 /* shift */
1377 value = extract_field (FLD_shift, code, 0);
1378 info->shifter.kind =
1379 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1380 if (info->shifter.kind == AARCH64_MOD_ROR
1381 && inst->opcode->iclass != log_shift)
1382 /* ROR is not available for the shifted register operand in arithmetic
1383 instructions. */
1384 return FALSE;
1385 /* imm6 */
1386 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1387
1388 /* This makes the constraint checking happy. */
1389 info->shifter.operator_present = 1;
1390
1391 return TRUE;
1392 }
1393
1394 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1395 where <offset> is given by the OFFSET parameter and where <factor> is
1396 1 plus SELF's operand-dependent value. fields[0] specifies the field
1397 that holds <base>. */
1398 static bfd_boolean
1399 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1400 aarch64_opnd_info *info, aarch64_insn code,
1401 int64_t offset)
1402 {
1403 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1404 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1405 info->addr.offset.is_reg = FALSE;
1406 info->addr.writeback = FALSE;
1407 info->addr.preind = TRUE;
1408 if (offset != 0)
1409 info->shifter.kind = AARCH64_MOD_MUL_VL;
1410 info->shifter.amount = 1;
1411 info->shifter.operator_present = (info->addr.offset.imm != 0);
1412 info->shifter.amount_present = FALSE;
1413 return TRUE;
1414 }
1415
1416 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1417 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1418 SELF's operand-dependent value. fields[0] specifies the field that
1419 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1420 bfd_boolean
1421 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1422 aarch64_opnd_info *info, aarch64_insn code,
1423 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1424 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1425 {
1426 int offset;
1427
1428 offset = extract_field (FLD_SVE_imm4, code, 0);
1429 offset = ((offset + 8) & 15) - 8;
1430 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1431 }
1432
1433 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1434 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1435 SELF's operand-dependent value. fields[0] specifies the field that
1436 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1437 bfd_boolean
1438 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1439 aarch64_opnd_info *info, aarch64_insn code,
1440 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1441 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1442 {
1443 int offset;
1444
1445 offset = extract_field (FLD_SVE_imm6, code, 0);
1446 offset = (((offset + 32) & 63) - 32);
1447 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1448 }
1449
1450 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1451 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1452 SELF's operand-dependent value. fields[0] specifies the field that
1453 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1454 and imm3 fields, with imm3 being the less-significant part. */
1455 bfd_boolean
1456 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1457 aarch64_opnd_info *info,
1458 aarch64_insn code,
1459 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1460 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1461 {
1462 int offset;
1463
1464 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1465 offset = (((offset + 256) & 511) - 256);
1466 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1467 }
1468
1469 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1470 is given by the OFFSET parameter and where <shift> is SELF's operand-
1471 dependent value. fields[0] specifies the base register field <base>. */
1472 static bfd_boolean
1473 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1474 aarch64_opnd_info *info, aarch64_insn code,
1475 int64_t offset)
1476 {
1477 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1478 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1479 info->addr.offset.is_reg = FALSE;
1480 info->addr.writeback = FALSE;
1481 info->addr.preind = TRUE;
1482 info->shifter.operator_present = FALSE;
1483 info->shifter.amount_present = FALSE;
1484 return TRUE;
1485 }
1486
1487 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1488 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1489 value. fields[0] specifies the base register field. */
1490 bfd_boolean
1491 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1492 aarch64_opnd_info *info, aarch64_insn code,
1493 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1495 {
1496 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1497 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1498 }
1499
1500 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1501 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1502 value. fields[0] specifies the base register field. */
1503 bfd_boolean
1504 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1505 aarch64_opnd_info *info, aarch64_insn code,
1506 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1507 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1508 {
1509 int offset = extract_field (FLD_SVE_imm6, code, 0);
1510 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1511 }
1512
1513 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1514 is SELF's operand-dependent value. fields[0] specifies the base
1515 register field and fields[1] specifies the offset register field. */
1516 bfd_boolean
1517 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1518 aarch64_opnd_info *info, aarch64_insn code,
1519 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1520 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1521 {
1522 int index_regno;
1523
1524 index_regno = extract_field (self->fields[1], code, 0);
1525 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1526 return FALSE;
1527
1528 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1529 info->addr.offset.regno = index_regno;
1530 info->addr.offset.is_reg = TRUE;
1531 info->addr.writeback = FALSE;
1532 info->addr.preind = TRUE;
1533 info->shifter.kind = AARCH64_MOD_LSL;
1534 info->shifter.amount = get_operand_specific_data (self);
1535 info->shifter.operator_present = (info->shifter.amount != 0);
1536 info->shifter.amount_present = (info->shifter.amount != 0);
1537 return TRUE;
1538 }
1539
1540 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1541 <shift> is SELF's operand-dependent value. fields[0] specifies the
1542 base register field, fields[1] specifies the offset register field and
1543 fields[2] is a single-bit field that selects SXTW over UXTW. */
1544 bfd_boolean
1545 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1546 aarch64_opnd_info *info, aarch64_insn code,
1547 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1548 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1549 {
1550 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1551 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1552 info->addr.offset.is_reg = TRUE;
1553 info->addr.writeback = FALSE;
1554 info->addr.preind = TRUE;
1555 if (extract_field (self->fields[2], code, 0))
1556 info->shifter.kind = AARCH64_MOD_SXTW;
1557 else
1558 info->shifter.kind = AARCH64_MOD_UXTW;
1559 info->shifter.amount = get_operand_specific_data (self);
1560 info->shifter.operator_present = TRUE;
1561 info->shifter.amount_present = (info->shifter.amount != 0);
1562 return TRUE;
1563 }
1564
1565 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1566 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1567 fields[0] specifies the base register field. */
1568 bfd_boolean
1569 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1570 aarch64_opnd_info *info, aarch64_insn code,
1571 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1572 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1573 {
1574 int offset = extract_field (FLD_imm5, code, 0);
1575 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1576 }
1577
1578 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1579 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1580 number. fields[0] specifies the base register field and fields[1]
1581 specifies the offset register field. */
1582 static bfd_boolean
1583 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1584 aarch64_insn code, enum aarch64_modifier_kind kind)
1585 {
1586 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1587 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1588 info->addr.offset.is_reg = TRUE;
1589 info->addr.writeback = FALSE;
1590 info->addr.preind = TRUE;
1591 info->shifter.kind = kind;
1592 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1593 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1594 || info->shifter.amount != 0);
1595 info->shifter.amount_present = (info->shifter.amount != 0);
1596 return TRUE;
1597 }
1598
1599 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1600 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1601 field and fields[1] specifies the offset register field. */
1602 bfd_boolean
1603 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1604 aarch64_opnd_info *info, aarch64_insn code,
1605 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1606 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1607 {
1608 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1609 }
1610
1611 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1612 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1613 field and fields[1] specifies the offset register field. */
1614 bfd_boolean
1615 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1616 aarch64_opnd_info *info, aarch64_insn code,
1617 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1618 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1619 {
1620 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1621 }
1622
1623 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1624 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1625 field and fields[1] specifies the offset register field. */
1626 bfd_boolean
1627 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1628 aarch64_opnd_info *info, aarch64_insn code,
1629 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1630 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1631 {
1632 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1633 }
1634
1635 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1636 has the raw field value and that the low 8 bits decode to VALUE. */
1637 static bfd_boolean
1638 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1639 {
1640 info->shifter.kind = AARCH64_MOD_LSL;
1641 info->shifter.amount = 0;
1642 if (info->imm.value & 0x100)
1643 {
1644 if (value == 0)
1645 /* Decode 0x100 as #0, LSL #8. */
1646 info->shifter.amount = 8;
1647 else
1648 value *= 256;
1649 }
1650 info->shifter.operator_present = (info->shifter.amount != 0);
1651 info->shifter.amount_present = (info->shifter.amount != 0);
1652 info->imm.value = value;
1653 return TRUE;
1654 }
1655
1656 /* Decode an SVE ADD/SUB immediate. */
1657 bfd_boolean
1658 aarch64_ext_sve_aimm (const aarch64_operand *self,
1659 aarch64_opnd_info *info, const aarch64_insn code,
1660 const aarch64_inst *inst,
1661 aarch64_operand_error *errors)
1662 {
1663 return (aarch64_ext_imm (self, info, code, inst, errors)
1664 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1665 }
1666
1667 /* Decode an SVE CPY/DUP immediate. */
1668 bfd_boolean
1669 aarch64_ext_sve_asimm (const aarch64_operand *self,
1670 aarch64_opnd_info *info, const aarch64_insn code,
1671 const aarch64_inst *inst,
1672 aarch64_operand_error *errors)
1673 {
1674 return (aarch64_ext_imm (self, info, code, inst, errors)
1675 && decode_sve_aimm (info, (int8_t) info->imm.value));
1676 }
1677
1678 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1679 The fields array specifies which field to use. */
1680 bfd_boolean
1681 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1682 aarch64_opnd_info *info, aarch64_insn code,
1683 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1684 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1685 {
1686 if (extract_field (self->fields[0], code, 0))
1687 info->imm.value = 0x3f800000;
1688 else
1689 info->imm.value = 0x3f000000;
1690 info->imm.is_fp = TRUE;
1691 return TRUE;
1692 }
1693
1694 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1695 The fields array specifies which field to use. */
1696 bfd_boolean
1697 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1698 aarch64_opnd_info *info, aarch64_insn code,
1699 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1700 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1701 {
1702 if (extract_field (self->fields[0], code, 0))
1703 info->imm.value = 0x40000000;
1704 else
1705 info->imm.value = 0x3f000000;
1706 info->imm.is_fp = TRUE;
1707 return TRUE;
1708 }
1709
1710 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1711 The fields array specifies which field to use. */
1712 bfd_boolean
1713 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1714 aarch64_opnd_info *info, aarch64_insn code,
1715 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1716 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1717 {
1718 if (extract_field (self->fields[0], code, 0))
1719 info->imm.value = 0x3f800000;
1720 else
1721 info->imm.value = 0x0;
1722 info->imm.is_fp = TRUE;
1723 return TRUE;
1724 }
1725
1726 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1727 array specifies which field to use for Zn. MM is encoded in the
1728 concatenation of imm5 and SVE_tszh, with imm5 being the less
1729 significant part. */
1730 bfd_boolean
1731 aarch64_ext_sve_index (const aarch64_operand *self,
1732 aarch64_opnd_info *info, aarch64_insn code,
1733 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1734 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1735 {
1736 int val;
1737
1738 info->reglane.regno = extract_field (self->fields[0], code, 0);
1739 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1740 if ((val & 31) == 0)
1741 return 0;
1742 while ((val & 1) == 0)
1743 val /= 2;
1744 info->reglane.index = val / 2;
1745 return TRUE;
1746 }
1747
1748 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1749 bfd_boolean
1750 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1751 aarch64_opnd_info *info, const aarch64_insn code,
1752 const aarch64_inst *inst,
1753 aarch64_operand_error *errors)
1754 {
1755 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1756 return (aarch64_ext_limm (self, info, code, inst, errors)
1757 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1758 }
1759
1760 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1761 and where MM occupies the most-significant part. The operand-dependent
1762 value specifies the number of bits in Zn. */
1763 bfd_boolean
1764 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1765 aarch64_opnd_info *info, aarch64_insn code,
1766 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1767 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1768 {
1769 unsigned int reg_bits = get_operand_specific_data (self);
1770 unsigned int val = extract_all_fields (self, code);
1771 info->reglane.regno = val & ((1 << reg_bits) - 1);
1772 info->reglane.index = val >> reg_bits;
1773 return TRUE;
1774 }
1775
1776 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1777 to use for Zn. The opcode-dependent value specifies the number
1778 of registers in the list. */
1779 bfd_boolean
1780 aarch64_ext_sve_reglist (const aarch64_operand *self,
1781 aarch64_opnd_info *info, aarch64_insn code,
1782 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1783 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1784 {
1785 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1786 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1787 return TRUE;
1788 }
1789
1790 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1791 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1792 field. */
1793 bfd_boolean
1794 aarch64_ext_sve_scale (const aarch64_operand *self,
1795 aarch64_opnd_info *info, aarch64_insn code,
1796 const aarch64_inst *inst, aarch64_operand_error *errors)
1797 {
1798 int val;
1799
1800 if (!aarch64_ext_imm (self, info, code, inst, errors))
1801 return FALSE;
1802 val = extract_field (FLD_SVE_imm4, code, 0);
1803 info->shifter.kind = AARCH64_MOD_MUL;
1804 info->shifter.amount = val + 1;
1805 info->shifter.operator_present = (val != 0);
1806 info->shifter.amount_present = (val != 0);
1807 return TRUE;
1808 }
1809
1810 /* Return the top set bit in VALUE, which is expected to be relatively
1811 small. */
1812 static uint64_t
1813 get_top_bit (uint64_t value)
1814 {
1815 while ((value & -value) != value)
1816 value -= value & -value;
1817 return value;
1818 }
1819
1820 /* Decode an SVE shift-left immediate. */
1821 bfd_boolean
1822 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1823 aarch64_opnd_info *info, const aarch64_insn code,
1824 const aarch64_inst *inst, aarch64_operand_error *errors)
1825 {
1826 if (!aarch64_ext_imm (self, info, code, inst, errors)
1827 || info->imm.value == 0)
1828 return FALSE;
1829
1830 info->imm.value -= get_top_bit (info->imm.value);
1831 return TRUE;
1832 }
1833
1834 /* Decode an SVE shift-right immediate. */
1835 bfd_boolean
1836 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1837 aarch64_opnd_info *info, const aarch64_insn code,
1838 const aarch64_inst *inst, aarch64_operand_error *errors)
1839 {
1840 if (!aarch64_ext_imm (self, info, code, inst, errors)
1841 || info->imm.value == 0)
1842 return FALSE;
1843
1844 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1845 return TRUE;
1846 }
1847 \f
1848 /* Bitfields that are commonly used to encode certain operands' information
1849 may be partially used as part of the base opcode in some instructions.
1850 For example, the bit 1 of the field 'size' in
1851 FCVTXN <Vb><d>, <Va><n>
1852 is actually part of the base opcode, while only size<0> is available
1853 for encoding the register type. Another example is the AdvSIMD
1854 instruction ORR (register), in which the field 'size' is also used for
1855 the base opcode, leaving only the field 'Q' available to encode the
1856 vector register arrangement specifier '8B' or '16B'.
1857
1858 This function tries to deduce the qualifier from the value of partially
1859 constrained field(s). Given the VALUE of such a field or fields, the
1860 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1861 operand encoding), the function returns the matching qualifier or
1862 AARCH64_OPND_QLF_NIL if nothing matches.
1863
1864 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1865 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1866 may end with AARCH64_OPND_QLF_NIL. */
1867
1868 static enum aarch64_opnd_qualifier
1869 get_qualifier_from_partial_encoding (aarch64_insn value,
1870 const enum aarch64_opnd_qualifier* \
1871 candidates,
1872 aarch64_insn mask)
1873 {
1874 int i;
1875 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1876 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1877 {
1878 aarch64_insn standard_value;
1879 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1880 break;
1881 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1882 if ((standard_value & mask) == (value & mask))
1883 return candidates[i];
1884 }
1885 return AARCH64_OPND_QLF_NIL;
1886 }
1887
1888 /* Given a list of qualifier sequences, return all possible valid qualifiers
1889 for operand IDX in QUALIFIERS.
1890 Assume QUALIFIERS is an array whose length is large enough. */
1891
1892 static void
1893 get_operand_possible_qualifiers (int idx,
1894 const aarch64_opnd_qualifier_seq_t *list,
1895 enum aarch64_opnd_qualifier *qualifiers)
1896 {
1897 int i;
1898 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1899 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1900 break;
1901 }
1902
1903 /* Decode the size Q field for e.g. SHADD.
1904 We tag one operand with the qualifer according to the code;
1905 whether the qualifier is valid for this opcode or not, it is the
1906 duty of the semantic checking. */
1907
1908 static int
1909 decode_sizeq (aarch64_inst *inst)
1910 {
1911 int idx;
1912 enum aarch64_opnd_qualifier qualifier;
1913 aarch64_insn code;
1914 aarch64_insn value, mask;
1915 enum aarch64_field_kind fld_sz;
1916 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1917
1918 if (inst->opcode->iclass == asisdlse
1919 || inst->opcode->iclass == asisdlsep
1920 || inst->opcode->iclass == asisdlso
1921 || inst->opcode->iclass == asisdlsop)
1922 fld_sz = FLD_vldst_size;
1923 else
1924 fld_sz = FLD_size;
1925
1926 code = inst->value;
1927 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1928 /* Obtain the info that which bits of fields Q and size are actually
1929 available for operand encoding. Opcodes like FMAXNM and FMLA have
1930 size[1] unavailable. */
1931 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1932
1933 /* The index of the operand we are going to tag a qualifier and the qualifer
1934 itself are reasoned from the value of the size and Q fields and the
1935 possible valid qualifier lists. */
1936 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1937 DEBUG_TRACE ("key idx: %d", idx);
1938
1939 /* For most related instruciton, size:Q are fully available for operand
1940 encoding. */
1941 if (mask == 0x7)
1942 {
1943 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1944 return 1;
1945 }
1946
1947 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1948 candidates);
1949 #ifdef DEBUG_AARCH64
1950 if (debug_dump)
1951 {
1952 int i;
1953 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1954 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1955 DEBUG_TRACE ("qualifier %d: %s", i,
1956 aarch64_get_qualifier_name(candidates[i]));
1957 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1958 }
1959 #endif /* DEBUG_AARCH64 */
1960
1961 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1962
1963 if (qualifier == AARCH64_OPND_QLF_NIL)
1964 return 0;
1965
1966 inst->operands[idx].qualifier = qualifier;
1967 return 1;
1968 }
1969
1970 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1971 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1972
1973 static int
1974 decode_asimd_fcvt (aarch64_inst *inst)
1975 {
1976 aarch64_field field = {0, 0};
1977 aarch64_insn value;
1978 enum aarch64_opnd_qualifier qualifier;
1979
1980 gen_sub_field (FLD_size, 0, 1, &field);
1981 value = extract_field_2 (&field, inst->value, 0);
1982 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1983 : AARCH64_OPND_QLF_V_2D;
1984 switch (inst->opcode->op)
1985 {
1986 case OP_FCVTN:
1987 case OP_FCVTN2:
1988 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1989 inst->operands[1].qualifier = qualifier;
1990 break;
1991 case OP_FCVTL:
1992 case OP_FCVTL2:
1993 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1994 inst->operands[0].qualifier = qualifier;
1995 break;
1996 default:
1997 assert (0);
1998 return 0;
1999 }
2000
2001 return 1;
2002 }
2003
2004 /* Decode size[0], i.e. bit 22, for
2005 e.g. FCVTXN <Vb><d>, <Va><n>. */
2006
2007 static int
2008 decode_asisd_fcvtxn (aarch64_inst *inst)
2009 {
2010 aarch64_field field = {0, 0};
2011 gen_sub_field (FLD_size, 0, 1, &field);
2012 if (!extract_field_2 (&field, inst->value, 0))
2013 return 0;
2014 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2015 return 1;
2016 }
2017
2018 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2019 static int
2020 decode_fcvt (aarch64_inst *inst)
2021 {
2022 enum aarch64_opnd_qualifier qualifier;
2023 aarch64_insn value;
2024 const aarch64_field field = {15, 2};
2025
2026 /* opc dstsize */
2027 value = extract_field_2 (&field, inst->value, 0);
2028 switch (value)
2029 {
2030 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2031 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2032 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2033 default: return 0;
2034 }
2035 inst->operands[0].qualifier = qualifier;
2036
2037 return 1;
2038 }
2039
2040 /* Do miscellaneous decodings that are not common enough to be driven by
2041 flags. */
2042
2043 static int
2044 do_misc_decoding (aarch64_inst *inst)
2045 {
2046 unsigned int value;
2047 switch (inst->opcode->op)
2048 {
2049 case OP_FCVT:
2050 return decode_fcvt (inst);
2051
2052 case OP_FCVTN:
2053 case OP_FCVTN2:
2054 case OP_FCVTL:
2055 case OP_FCVTL2:
2056 return decode_asimd_fcvt (inst);
2057
2058 case OP_FCVTXN_S:
2059 return decode_asisd_fcvtxn (inst);
2060
2061 case OP_MOV_P_P:
2062 case OP_MOVS_P_P:
2063 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2064 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2065 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2066
2067 case OP_MOV_Z_P_Z:
2068 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2069 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2070
2071 case OP_MOV_Z_V:
2072 /* Index must be zero. */
2073 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2074 return value > 0 && value <= 16 && value == (value & -value);
2075
2076 case OP_MOV_Z_Z:
2077 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2078 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2079
2080 case OP_MOV_Z_Zi:
2081 /* Index must be nonzero. */
2082 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2083 return value > 0 && value != (value & -value);
2084
2085 case OP_MOVM_P_P_P:
2086 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2087 == extract_field (FLD_SVE_Pm, inst->value, 0));
2088
2089 case OP_MOVZS_P_P_P:
2090 case OP_MOVZ_P_P_P:
2091 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2092 == extract_field (FLD_SVE_Pm, inst->value, 0));
2093
2094 case OP_NOTS_P_P_P_Z:
2095 case OP_NOT_P_P_P_Z:
2096 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2097 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2098
2099 default:
2100 return 0;
2101 }
2102 }
2103
2104 /* Opcodes that have fields shared by multiple operands are usually flagged
2105 with flags. In this function, we detect such flags, decode the related
2106 field(s) and store the information in one of the related operands. The
2107 'one' operand is not any operand but one of the operands that can
2108 accommadate all the information that has been decoded. */
2109
2110 static int
2111 do_special_decoding (aarch64_inst *inst)
2112 {
2113 int idx;
2114 aarch64_insn value;
2115 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2116 if (inst->opcode->flags & F_COND)
2117 {
2118 value = extract_field (FLD_cond2, inst->value, 0);
2119 inst->cond = get_cond_from_value (value);
2120 }
2121 /* 'sf' field. */
2122 if (inst->opcode->flags & F_SF)
2123 {
2124 idx = select_operand_for_sf_field_coding (inst->opcode);
2125 value = extract_field (FLD_sf, inst->value, 0);
2126 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2127 if ((inst->opcode->flags & F_N)
2128 && extract_field (FLD_N, inst->value, 0) != value)
2129 return 0;
2130 }
2131 /* 'sf' field. */
2132 if (inst->opcode->flags & F_LSE_SZ)
2133 {
2134 idx = select_operand_for_sf_field_coding (inst->opcode);
2135 value = extract_field (FLD_lse_sz, inst->value, 0);
2136 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2137 }
2138 /* size:Q fields. */
2139 if (inst->opcode->flags & F_SIZEQ)
2140 return decode_sizeq (inst);
2141
2142 if (inst->opcode->flags & F_FPTYPE)
2143 {
2144 idx = select_operand_for_fptype_field_coding (inst->opcode);
2145 value = extract_field (FLD_type, inst->value, 0);
2146 switch (value)
2147 {
2148 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2149 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2150 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2151 default: return 0;
2152 }
2153 }
2154
2155 if (inst->opcode->flags & F_SSIZE)
2156 {
2157 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2158 of the base opcode. */
2159 aarch64_insn mask;
2160 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2161 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2162 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2163 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2164 /* For most related instruciton, the 'size' field is fully available for
2165 operand encoding. */
2166 if (mask == 0x3)
2167 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2168 else
2169 {
2170 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2171 candidates);
2172 inst->operands[idx].qualifier
2173 = get_qualifier_from_partial_encoding (value, candidates, mask);
2174 }
2175 }
2176
2177 if (inst->opcode->flags & F_T)
2178 {
2179 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2180 int num = 0;
2181 unsigned val, Q;
2182 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2183 == AARCH64_OPND_CLASS_SIMD_REG);
2184 /* imm5<3:0> q <t>
2185 0000 x reserved
2186 xxx1 0 8b
2187 xxx1 1 16b
2188 xx10 0 4h
2189 xx10 1 8h
2190 x100 0 2s
2191 x100 1 4s
2192 1000 0 reserved
2193 1000 1 2d */
2194 val = extract_field (FLD_imm5, inst->value, 0);
2195 while ((val & 0x1) == 0 && ++num <= 3)
2196 val >>= 1;
2197 if (num > 3)
2198 return 0;
2199 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2200 inst->operands[0].qualifier =
2201 get_vreg_qualifier_from_value ((num << 1) | Q);
2202 }
2203
2204 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2205 {
2206 /* Use Rt to encode in the case of e.g.
2207 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2208 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2209 if (idx == -1)
2210 {
2211 /* Otherwise use the result operand, which has to be a integer
2212 register. */
2213 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2214 == AARCH64_OPND_CLASS_INT_REG);
2215 idx = 0;
2216 }
2217 assert (idx == 0 || idx == 1);
2218 value = extract_field (FLD_Q, inst->value, 0);
2219 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2220 }
2221
2222 if (inst->opcode->flags & F_LDS_SIZE)
2223 {
2224 aarch64_field field = {0, 0};
2225 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2226 == AARCH64_OPND_CLASS_INT_REG);
2227 gen_sub_field (FLD_opc, 0, 1, &field);
2228 value = extract_field_2 (&field, inst->value, 0);
2229 inst->operands[0].qualifier
2230 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2231 }
2232
2233 /* Miscellaneous decoding; done as the last step. */
2234 if (inst->opcode->flags & F_MISC)
2235 return do_misc_decoding (inst);
2236
2237 return 1;
2238 }
2239
2240 /* Converters converting a real opcode instruction to its alias form. */
2241
2242 /* ROR <Wd>, <Ws>, #<shift>
2243 is equivalent to:
2244 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2245 static int
2246 convert_extr_to_ror (aarch64_inst *inst)
2247 {
2248 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2249 {
2250 copy_operand_info (inst, 2, 3);
2251 inst->operands[3].type = AARCH64_OPND_NIL;
2252 return 1;
2253 }
2254 return 0;
2255 }
2256
2257 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2258 is equivalent to:
2259 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2260 static int
2261 convert_shll_to_xtl (aarch64_inst *inst)
2262 {
2263 if (inst->operands[2].imm.value == 0)
2264 {
2265 inst->operands[2].type = AARCH64_OPND_NIL;
2266 return 1;
2267 }
2268 return 0;
2269 }
2270
2271 /* Convert
2272 UBFM <Xd>, <Xn>, #<shift>, #63.
2273 to
2274 LSR <Xd>, <Xn>, #<shift>. */
2275 static int
2276 convert_bfm_to_sr (aarch64_inst *inst)
2277 {
2278 int64_t imms, val;
2279
2280 imms = inst->operands[3].imm.value;
2281 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2282 if (imms == val)
2283 {
2284 inst->operands[3].type = AARCH64_OPND_NIL;
2285 return 1;
2286 }
2287
2288 return 0;
2289 }
2290
2291 /* Convert MOV to ORR. */
2292 static int
2293 convert_orr_to_mov (aarch64_inst *inst)
2294 {
2295 /* MOV <Vd>.<T>, <Vn>.<T>
2296 is equivalent to:
2297 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2298 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2299 {
2300 inst->operands[2].type = AARCH64_OPND_NIL;
2301 return 1;
2302 }
2303 return 0;
2304 }
2305
2306 /* When <imms> >= <immr>, the instruction written:
2307 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2308 is equivalent to:
2309 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2310
2311 static int
2312 convert_bfm_to_bfx (aarch64_inst *inst)
2313 {
2314 int64_t immr, imms;
2315
2316 immr = inst->operands[2].imm.value;
2317 imms = inst->operands[3].imm.value;
2318 if (imms >= immr)
2319 {
2320 int64_t lsb = immr;
2321 inst->operands[2].imm.value = lsb;
2322 inst->operands[3].imm.value = imms + 1 - lsb;
2323 /* The two opcodes have different qualifiers for
2324 the immediate operands; reset to help the checking. */
2325 reset_operand_qualifier (inst, 2);
2326 reset_operand_qualifier (inst, 3);
2327 return 1;
2328 }
2329
2330 return 0;
2331 }
2332
2333 /* When <imms> < <immr>, the instruction written:
2334 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2335 is equivalent to:
2336 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2337
2338 static int
2339 convert_bfm_to_bfi (aarch64_inst *inst)
2340 {
2341 int64_t immr, imms, val;
2342
2343 immr = inst->operands[2].imm.value;
2344 imms = inst->operands[3].imm.value;
2345 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2346 if (imms < immr)
2347 {
2348 inst->operands[2].imm.value = (val - immr) & (val - 1);
2349 inst->operands[3].imm.value = imms + 1;
2350 /* The two opcodes have different qualifiers for
2351 the immediate operands; reset to help the checking. */
2352 reset_operand_qualifier (inst, 2);
2353 reset_operand_qualifier (inst, 3);
2354 return 1;
2355 }
2356
2357 return 0;
2358 }
2359
2360 /* The instruction written:
2361 BFC <Xd>, #<lsb>, #<width>
2362 is equivalent to:
2363 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2364
2365 static int
2366 convert_bfm_to_bfc (aarch64_inst *inst)
2367 {
2368 int64_t immr, imms, val;
2369
2370 /* Should have been assured by the base opcode value. */
2371 assert (inst->operands[1].reg.regno == 0x1f);
2372
2373 immr = inst->operands[2].imm.value;
2374 imms = inst->operands[3].imm.value;
2375 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2376 if (imms < immr)
2377 {
2378 /* Drop XZR from the second operand. */
2379 copy_operand_info (inst, 1, 2);
2380 copy_operand_info (inst, 2, 3);
2381 inst->operands[3].type = AARCH64_OPND_NIL;
2382
2383 /* Recalculate the immediates. */
2384 inst->operands[1].imm.value = (val - immr) & (val - 1);
2385 inst->operands[2].imm.value = imms + 1;
2386
2387 /* The two opcodes have different qualifiers for the operands; reset to
2388 help the checking. */
2389 reset_operand_qualifier (inst, 1);
2390 reset_operand_qualifier (inst, 2);
2391 reset_operand_qualifier (inst, 3);
2392
2393 return 1;
2394 }
2395
2396 return 0;
2397 }
2398
2399 /* The instruction written:
2400 LSL <Xd>, <Xn>, #<shift>
2401 is equivalent to:
2402 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2403
2404 static int
2405 convert_ubfm_to_lsl (aarch64_inst *inst)
2406 {
2407 int64_t immr = inst->operands[2].imm.value;
2408 int64_t imms = inst->operands[3].imm.value;
2409 int64_t val
2410 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2411
2412 if ((immr == 0 && imms == val) || immr == imms + 1)
2413 {
2414 inst->operands[3].type = AARCH64_OPND_NIL;
2415 inst->operands[2].imm.value = val - imms;
2416 return 1;
2417 }
2418
2419 return 0;
2420 }
2421
2422 /* CINC <Wd>, <Wn>, <cond>
2423 is equivalent to:
2424 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2425 where <cond> is not AL or NV. */
2426
2427 static int
2428 convert_from_csel (aarch64_inst *inst)
2429 {
2430 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2431 && (inst->operands[3].cond->value & 0xe) != 0xe)
2432 {
2433 copy_operand_info (inst, 2, 3);
2434 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2435 inst->operands[3].type = AARCH64_OPND_NIL;
2436 return 1;
2437 }
2438 return 0;
2439 }
2440
2441 /* CSET <Wd>, <cond>
2442 is equivalent to:
2443 CSINC <Wd>, WZR, WZR, invert(<cond>)
2444 where <cond> is not AL or NV. */
2445
2446 static int
2447 convert_csinc_to_cset (aarch64_inst *inst)
2448 {
2449 if (inst->operands[1].reg.regno == 0x1f
2450 && inst->operands[2].reg.regno == 0x1f
2451 && (inst->operands[3].cond->value & 0xe) != 0xe)
2452 {
2453 copy_operand_info (inst, 1, 3);
2454 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2455 inst->operands[3].type = AARCH64_OPND_NIL;
2456 inst->operands[2].type = AARCH64_OPND_NIL;
2457 return 1;
2458 }
2459 return 0;
2460 }
2461
2462 /* MOV <Wd>, #<imm>
2463 is equivalent to:
2464 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2465
2466 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2467 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2468 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2469 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2470 machine-instruction mnemonic must be used. */
2471
2472 static int
2473 convert_movewide_to_mov (aarch64_inst *inst)
2474 {
2475 uint64_t value = inst->operands[1].imm.value;
2476 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2477 if (value == 0 && inst->operands[1].shifter.amount != 0)
2478 return 0;
2479 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2480 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2481 value <<= inst->operands[1].shifter.amount;
2482 /* As an alias convertor, it has to be clear that the INST->OPCODE
2483 is the opcode of the real instruction. */
2484 if (inst->opcode->op == OP_MOVN)
2485 {
2486 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2487 value = ~value;
2488 /* A MOVN has an immediate that could be encoded by MOVZ. */
2489 if (aarch64_wide_constant_p (value, is32, NULL))
2490 return 0;
2491 }
2492 inst->operands[1].imm.value = value;
2493 inst->operands[1].shifter.amount = 0;
2494 return 1;
2495 }
2496
2497 /* MOV <Wd>, #<imm>
2498 is equivalent to:
2499 ORR <Wd>, WZR, #<imm>.
2500
2501 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2502 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2503 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2504 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2505 machine-instruction mnemonic must be used. */
2506
2507 static int
2508 convert_movebitmask_to_mov (aarch64_inst *inst)
2509 {
2510 int is32;
2511 uint64_t value;
2512
2513 /* Should have been assured by the base opcode value. */
2514 assert (inst->operands[1].reg.regno == 0x1f);
2515 copy_operand_info (inst, 1, 2);
2516 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2517 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2518 value = inst->operands[1].imm.value;
2519 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2520 instruction. */
2521 if (inst->operands[0].reg.regno != 0x1f
2522 && (aarch64_wide_constant_p (value, is32, NULL)
2523 || aarch64_wide_constant_p (~value, is32, NULL)))
2524 return 0;
2525
2526 inst->operands[2].type = AARCH64_OPND_NIL;
2527 return 1;
2528 }
2529
2530 /* Some alias opcodes are disassembled by being converted from their real-form.
2531 N.B. INST->OPCODE is the real opcode rather than the alias. */
2532
2533 static int
2534 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2535 {
2536 switch (alias->op)
2537 {
2538 case OP_ASR_IMM:
2539 case OP_LSR_IMM:
2540 return convert_bfm_to_sr (inst);
2541 case OP_LSL_IMM:
2542 return convert_ubfm_to_lsl (inst);
2543 case OP_CINC:
2544 case OP_CINV:
2545 case OP_CNEG:
2546 return convert_from_csel (inst);
2547 case OP_CSET:
2548 case OP_CSETM:
2549 return convert_csinc_to_cset (inst);
2550 case OP_UBFX:
2551 case OP_BFXIL:
2552 case OP_SBFX:
2553 return convert_bfm_to_bfx (inst);
2554 case OP_SBFIZ:
2555 case OP_BFI:
2556 case OP_UBFIZ:
2557 return convert_bfm_to_bfi (inst);
2558 case OP_BFC:
2559 return convert_bfm_to_bfc (inst);
2560 case OP_MOV_V:
2561 return convert_orr_to_mov (inst);
2562 case OP_MOV_IMM_WIDE:
2563 case OP_MOV_IMM_WIDEN:
2564 return convert_movewide_to_mov (inst);
2565 case OP_MOV_IMM_LOG:
2566 return convert_movebitmask_to_mov (inst);
2567 case OP_ROR_IMM:
2568 return convert_extr_to_ror (inst);
2569 case OP_SXTL:
2570 case OP_SXTL2:
2571 case OP_UXTL:
2572 case OP_UXTL2:
2573 return convert_shll_to_xtl (inst);
2574 default:
2575 return 0;
2576 }
2577 }
2578
2579 static bfd_boolean
2580 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2581 aarch64_inst *, int, aarch64_operand_error *errors);
2582
2583 /* Given the instruction information in *INST, check if the instruction has
2584 any alias form that can be used to represent *INST. If the answer is yes,
2585 update *INST to be in the form of the determined alias. */
2586
2587 /* In the opcode description table, the following flags are used in opcode
2588 entries to help establish the relations between the real and alias opcodes:
2589
2590 F_ALIAS: opcode is an alias
2591 F_HAS_ALIAS: opcode has alias(es)
2592 F_P1
2593 F_P2
2594 F_P3: Disassembly preference priority 1-3 (the larger the
2595 higher). If nothing is specified, it is the priority
2596 0 by default, i.e. the lowest priority.
2597
2598 Although the relation between the machine and the alias instructions are not
2599 explicitly described, it can be easily determined from the base opcode
2600 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2601 description entries:
2602
2603 The mask of an alias opcode must be equal to or a super-set (i.e. more
2604 constrained) of that of the aliased opcode; so is the base opcode value.
2605
2606 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2607 && (opcode->mask & real->mask) == real->mask
2608 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2609 then OPCODE is an alias of, and only of, the REAL instruction
2610
2611 The alias relationship is forced flat-structured to keep related algorithm
2612 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2613
2614 During the disassembling, the decoding decision tree (in
2615 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2616 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2617 not specified), the disassembler will check whether there is any alias
2618 instruction exists for this real instruction. If there is, the disassembler
2619 will try to disassemble the 32-bit binary again using the alias's rule, or
2620 try to convert the IR to the form of the alias. In the case of the multiple
2621 aliases, the aliases are tried one by one from the highest priority
2622 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2623 first succeeds first adopted.
2624
2625 You may ask why there is a need for the conversion of IR from one form to
2626 another in handling certain aliases. This is because on one hand it avoids
2627 adding more operand code to handle unusual encoding/decoding; on other
2628 hand, during the disassembling, the conversion is an effective approach to
2629 check the condition of an alias (as an alias may be adopted only if certain
2630 conditions are met).
2631
2632 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2633 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2634 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2635
2636 static void
2637 determine_disassembling_preference (struct aarch64_inst *inst,
2638 aarch64_operand_error *errors)
2639 {
2640 const aarch64_opcode *opcode;
2641 const aarch64_opcode *alias;
2642
2643 opcode = inst->opcode;
2644
2645 /* This opcode does not have an alias, so use itself. */
2646 if (!opcode_has_alias (opcode))
2647 return;
2648
2649 alias = aarch64_find_alias_opcode (opcode);
2650 assert (alias);
2651
2652 #ifdef DEBUG_AARCH64
2653 if (debug_dump)
2654 {
2655 const aarch64_opcode *tmp = alias;
2656 printf ("#### LIST orderd: ");
2657 while (tmp)
2658 {
2659 printf ("%s, ", tmp->name);
2660 tmp = aarch64_find_next_alias_opcode (tmp);
2661 }
2662 printf ("\n");
2663 }
2664 #endif /* DEBUG_AARCH64 */
2665
2666 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2667 {
2668 DEBUG_TRACE ("try %s", alias->name);
2669 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2670
2671 /* An alias can be a pseudo opcode which will never be used in the
2672 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2673 aliasing AND. */
2674 if (pseudo_opcode_p (alias))
2675 {
2676 DEBUG_TRACE ("skip pseudo %s", alias->name);
2677 continue;
2678 }
2679
2680 if ((inst->value & alias->mask) != alias->opcode)
2681 {
2682 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2683 continue;
2684 }
2685 /* No need to do any complicated transformation on operands, if the alias
2686 opcode does not have any operand. */
2687 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2688 {
2689 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2690 aarch64_replace_opcode (inst, alias);
2691 return;
2692 }
2693 if (alias->flags & F_CONV)
2694 {
2695 aarch64_inst copy;
2696 memcpy (&copy, inst, sizeof (aarch64_inst));
2697 /* ALIAS is the preference as long as the instruction can be
2698 successfully converted to the form of ALIAS. */
2699 if (convert_to_alias (&copy, alias) == 1)
2700 {
2701 aarch64_replace_opcode (&copy, alias);
2702 assert (aarch64_match_operands_constraint (&copy, NULL));
2703 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2704 memcpy (inst, &copy, sizeof (aarch64_inst));
2705 return;
2706 }
2707 }
2708 else
2709 {
2710 /* Directly decode the alias opcode. */
2711 aarch64_inst temp;
2712 memset (&temp, '\0', sizeof (aarch64_inst));
2713 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2714 {
2715 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2716 memcpy (inst, &temp, sizeof (aarch64_inst));
2717 return;
2718 }
2719 }
2720 }
2721 }
2722
2723 /* Some instructions (including all SVE ones) use the instruction class
2724 to describe how a qualifiers_list index is represented in the instruction
2725 encoding. If INST is such an instruction, decode the appropriate fields
2726 and fill in the operand qualifiers accordingly. Return true if no
2727 problems are found. */
2728
2729 static bfd_boolean
2730 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2731 {
2732 int i, variant;
2733
2734 variant = 0;
2735 switch (inst->opcode->iclass)
2736 {
2737 case sve_cpy:
2738 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2739 break;
2740
2741 case sve_index:
2742 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2743 if ((i & 31) == 0)
2744 return FALSE;
2745 while ((i & 1) == 0)
2746 {
2747 i >>= 1;
2748 variant += 1;
2749 }
2750 break;
2751
2752 case sve_limm:
2753 /* Pick the smallest applicable element size. */
2754 if ((inst->value & 0x20600) == 0x600)
2755 variant = 0;
2756 else if ((inst->value & 0x20400) == 0x400)
2757 variant = 1;
2758 else if ((inst->value & 0x20000) == 0)
2759 variant = 2;
2760 else
2761 variant = 3;
2762 break;
2763
2764 case sve_misc:
2765 /* sve_misc instructions have only a single variant. */
2766 break;
2767
2768 case sve_movprfx:
2769 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2770 break;
2771
2772 case sve_pred_zm:
2773 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2774 break;
2775
2776 case sve_shift_pred:
2777 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2778 sve_shift:
2779 if (i == 0)
2780 return FALSE;
2781 while (i != 1)
2782 {
2783 i >>= 1;
2784 variant += 1;
2785 }
2786 break;
2787
2788 case sve_shift_unpred:
2789 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2790 goto sve_shift;
2791
2792 case sve_size_bhs:
2793 variant = extract_field (FLD_size, inst->value, 0);
2794 if (variant >= 3)
2795 return FALSE;
2796 break;
2797
2798 case sve_size_bhsd:
2799 variant = extract_field (FLD_size, inst->value, 0);
2800 break;
2801
2802 case sve_size_hsd:
2803 i = extract_field (FLD_size, inst->value, 0);
2804 if (i < 1)
2805 return FALSE;
2806 variant = i - 1;
2807 break;
2808
2809 case sve_size_bh:
2810 case sve_size_sd:
2811 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2812 break;
2813
2814 case sve_size_sd2:
2815 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
2816 break;
2817
2818 case sve_size_hsd2:
2819 i = extract_field (FLD_SVE_size, inst->value, 0);
2820 if (i < 1)
2821 return FALSE;
2822 variant = i - 1;
2823 break;
2824
2825 case sve_size_013:
2826 i = extract_field (FLD_size, inst->value, 0);
2827 if (i == 2)
2828 return FALSE;
2829 if (i == 3)
2830 variant = 2;
2831 else
2832 variant = i;
2833 break;
2834
2835 case sve_shift_tsz_bhsd:
2836 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2837 if (i == 0)
2838 return FALSE;
2839 while (i != 1)
2840 {
2841 i >>= 1;
2842 variant += 1;
2843 }
2844 break;
2845
2846 case sve_size_tsz_bhs:
2847 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2848 while (i != 1)
2849 {
2850 if (i & 1)
2851 return FALSE;
2852 i >>= 1;
2853 variant += 1;
2854 }
2855 break;
2856
2857 case sve_shift_tsz_hsd:
2858 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2859 if (i == 0)
2860 return FALSE;
2861 while (i != 1)
2862 {
2863 i >>= 1;
2864 variant += 1;
2865 }
2866 break;
2867
2868 default:
2869 /* No mapping between instruction class and qualifiers. */
2870 return TRUE;
2871 }
2872
2873 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2874 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2875 return TRUE;
2876 }
2877 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2878 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2879 return 1.
2880
2881 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2882 determined and used to disassemble CODE; this is done just before the
2883 return. */
2884
2885 static bfd_boolean
2886 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2887 aarch64_inst *inst, int noaliases_p,
2888 aarch64_operand_error *errors)
2889 {
2890 int i;
2891
2892 DEBUG_TRACE ("enter with %s", opcode->name);
2893
2894 assert (opcode && inst);
2895
2896 /* Clear inst. */
2897 memset (inst, '\0', sizeof (aarch64_inst));
2898
2899 /* Check the base opcode. */
2900 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2901 {
2902 DEBUG_TRACE ("base opcode match FAIL");
2903 goto decode_fail;
2904 }
2905
2906 inst->opcode = opcode;
2907 inst->value = code;
2908
2909 /* Assign operand codes and indexes. */
2910 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2911 {
2912 if (opcode->operands[i] == AARCH64_OPND_NIL)
2913 break;
2914 inst->operands[i].type = opcode->operands[i];
2915 inst->operands[i].idx = i;
2916 }
2917
2918 /* Call the opcode decoder indicated by flags. */
2919 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2920 {
2921 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2922 goto decode_fail;
2923 }
2924
2925 /* Possibly use the instruction class to determine the correct
2926 qualifier. */
2927 if (!aarch64_decode_variant_using_iclass (inst))
2928 {
2929 DEBUG_TRACE ("iclass-based decoder FAIL");
2930 goto decode_fail;
2931 }
2932
2933 /* Call operand decoders. */
2934 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2935 {
2936 const aarch64_operand *opnd;
2937 enum aarch64_opnd type;
2938
2939 type = opcode->operands[i];
2940 if (type == AARCH64_OPND_NIL)
2941 break;
2942 opnd = &aarch64_operands[type];
2943 if (operand_has_extractor (opnd)
2944 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2945 errors)))
2946 {
2947 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2948 goto decode_fail;
2949 }
2950 }
2951
2952 /* If the opcode has a verifier, then check it now. */
2953 if (opcode->verifier
2954 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2955 {
2956 DEBUG_TRACE ("operand verifier FAIL");
2957 goto decode_fail;
2958 }
2959
2960 /* Match the qualifiers. */
2961 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2962 {
2963 /* Arriving here, the CODE has been determined as a valid instruction
2964 of OPCODE and *INST has been filled with information of this OPCODE
2965 instruction. Before the return, check if the instruction has any
2966 alias and should be disassembled in the form of its alias instead.
2967 If the answer is yes, *INST will be updated. */
2968 if (!noaliases_p)
2969 determine_disassembling_preference (inst, errors);
2970 DEBUG_TRACE ("SUCCESS");
2971 return TRUE;
2972 }
2973 else
2974 {
2975 DEBUG_TRACE ("constraint matching FAIL");
2976 }
2977
2978 decode_fail:
2979 return FALSE;
2980 }
2981 \f
2982 /* This does some user-friendly fix-up to *INST. It is currently focus on
2983 the adjustment of qualifiers to help the printed instruction
2984 recognized/understood more easily. */
2985
2986 static void
2987 user_friendly_fixup (aarch64_inst *inst)
2988 {
2989 switch (inst->opcode->iclass)
2990 {
2991 case testbranch:
2992 /* TBNZ Xn|Wn, #uimm6, label
2993 Test and Branch Not Zero: conditionally jumps to label if bit number
2994 uimm6 in register Xn is not zero. The bit number implies the width of
2995 the register, which may be written and should be disassembled as Wn if
2996 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2997 */
2998 if (inst->operands[1].imm.value < 32)
2999 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3000 break;
3001 default: break;
3002 }
3003 }
3004
3005 /* Decode INSN and fill in *INST the instruction information. An alias
3006 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3007 success. */
3008
3009 enum err_type
3010 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3011 bfd_boolean noaliases_p,
3012 aarch64_operand_error *errors)
3013 {
3014 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3015
3016 #ifdef DEBUG_AARCH64
3017 if (debug_dump)
3018 {
3019 const aarch64_opcode *tmp = opcode;
3020 printf ("\n");
3021 DEBUG_TRACE ("opcode lookup:");
3022 while (tmp != NULL)
3023 {
3024 aarch64_verbose (" %s", tmp->name);
3025 tmp = aarch64_find_next_opcode (tmp);
3026 }
3027 }
3028 #endif /* DEBUG_AARCH64 */
3029
3030 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3031 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3032 opcode field and value, apart from the difference that one of them has an
3033 extra field as part of the opcode, but such a field is used for operand
3034 encoding in other opcode(s) ('immh' in the case of the example). */
3035 while (opcode != NULL)
3036 {
3037 /* But only one opcode can be decoded successfully for, as the
3038 decoding routine will check the constraint carefully. */
3039 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3040 return ERR_OK;
3041 opcode = aarch64_find_next_opcode (opcode);
3042 }
3043
3044 return ERR_UND;
3045 }
3046
3047 /* Print operands. */
3048
3049 static void
3050 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3051 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3052 bfd_boolean *has_notes)
3053 {
3054 char *notes = NULL;
3055 int i, pcrel_p, num_printed;
3056 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3057 {
3058 char str[128];
3059 /* We regard the opcode operand info more, however we also look into
3060 the inst->operands to support the disassembling of the optional
3061 operand.
3062 The two operand code should be the same in all cases, apart from
3063 when the operand can be optional. */
3064 if (opcode->operands[i] == AARCH64_OPND_NIL
3065 || opnds[i].type == AARCH64_OPND_NIL)
3066 break;
3067
3068 /* Generate the operand string in STR. */
3069 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3070 &info->target, &notes);
3071
3072 /* Print the delimiter (taking account of omitted operand(s)). */
3073 if (str[0] != '\0')
3074 (*info->fprintf_func) (info->stream, "%s",
3075 num_printed++ == 0 ? "\t" : ", ");
3076
3077 /* Print the operand. */
3078 if (pcrel_p)
3079 (*info->print_address_func) (info->target, info);
3080 else
3081 (*info->fprintf_func) (info->stream, "%s", str);
3082 }
3083
3084 if (notes && !no_notes)
3085 {
3086 *has_notes = TRUE;
3087 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3088 }
3089 }
3090
3091 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3092
3093 static void
3094 remove_dot_suffix (char *name, const aarch64_inst *inst)
3095 {
3096 char *ptr;
3097 size_t len;
3098
3099 ptr = strchr (inst->opcode->name, '.');
3100 assert (ptr && inst->cond);
3101 len = ptr - inst->opcode->name;
3102 assert (len < 8);
3103 strncpy (name, inst->opcode->name, len);
3104 name[len] = '\0';
3105 }
3106
3107 /* Print the instruction mnemonic name. */
3108
3109 static void
3110 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3111 {
3112 if (inst->opcode->flags & F_COND)
3113 {
3114 /* For instructions that are truly conditionally executed, e.g. b.cond,
3115 prepare the full mnemonic name with the corresponding condition
3116 suffix. */
3117 char name[8];
3118
3119 remove_dot_suffix (name, inst);
3120 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3121 }
3122 else
3123 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3124 }
3125
3126 /* Decide whether we need to print a comment after the operands of
3127 instruction INST. */
3128
3129 static void
3130 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3131 {
3132 if (inst->opcode->flags & F_COND)
3133 {
3134 char name[8];
3135 unsigned int i, num_conds;
3136
3137 remove_dot_suffix (name, inst);
3138 num_conds = ARRAY_SIZE (inst->cond->names);
3139 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3140 (*info->fprintf_func) (info->stream, "%s %s.%s",
3141 i == 1 ? " //" : ",",
3142 name, inst->cond->names[i]);
3143 }
3144 }
3145
3146 /* Build notes from verifiers into a string for printing. */
3147
3148 static void
3149 print_verifier_notes (aarch64_operand_error *detail,
3150 struct disassemble_info *info)
3151 {
3152 if (no_notes)
3153 return;
3154
3155 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3156 would not have succeeded. We can safely ignore these. */
3157 assert (detail->non_fatal);
3158 assert (detail->error);
3159
3160 /* If there are multiple verifier messages, concat them up to 1k. */
3161 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3162 if (detail->index >= 0)
3163 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3164 }
3165
3166 /* Print the instruction according to *INST. */
3167
3168 static void
3169 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3170 const aarch64_insn code,
3171 struct disassemble_info *info,
3172 aarch64_operand_error *mismatch_details)
3173 {
3174 bfd_boolean has_notes = FALSE;
3175
3176 print_mnemonic_name (inst, info);
3177 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3178 print_comment (inst, info);
3179
3180 /* We've already printed a note, not enough space to print more so exit.
3181 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3182 from a register and instruction at the same time. */
3183 if (has_notes)
3184 return;
3185
3186 /* Always run constraint verifiers, this is needed because constraints need to
3187 maintain a global state regardless of whether the instruction has the flag
3188 set or not. */
3189 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3190 mismatch_details, &insn_sequence);
3191 switch (result)
3192 {
3193 case ERR_UND:
3194 case ERR_UNP:
3195 case ERR_NYI:
3196 assert (0);
3197 case ERR_VFI:
3198 print_verifier_notes (mismatch_details, info);
3199 break;
3200 default:
3201 break;
3202 }
3203 }
3204
3205 /* Entry-point of the instruction disassembler and printer. */
3206
3207 static void
3208 print_insn_aarch64_word (bfd_vma pc,
3209 uint32_t word,
3210 struct disassemble_info *info,
3211 aarch64_operand_error *errors)
3212 {
3213 static const char *err_msg[ERR_NR_ENTRIES+1] =
3214 {
3215 [ERR_OK] = "_",
3216 [ERR_UND] = "undefined",
3217 [ERR_UNP] = "unpredictable",
3218 [ERR_NYI] = "NYI"
3219 };
3220
3221 enum err_type ret;
3222 aarch64_inst inst;
3223
3224 info->insn_info_valid = 1;
3225 info->branch_delay_insns = 0;
3226 info->data_size = 0;
3227 info->target = 0;
3228 info->target2 = 0;
3229
3230 if (info->flags & INSN_HAS_RELOC)
3231 /* If the instruction has a reloc associated with it, then
3232 the offset field in the instruction will actually be the
3233 addend for the reloc. (If we are using REL type relocs).
3234 In such cases, we can ignore the pc when computing
3235 addresses, since the addend is not currently pc-relative. */
3236 pc = 0;
3237
3238 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3239
3240 if (((word >> 21) & 0x3ff) == 1)
3241 {
3242 /* RESERVED for ALES. */
3243 assert (ret != ERR_OK);
3244 ret = ERR_NYI;
3245 }
3246
3247 switch (ret)
3248 {
3249 case ERR_UND:
3250 case ERR_UNP:
3251 case ERR_NYI:
3252 /* Handle undefined instructions. */
3253 info->insn_type = dis_noninsn;
3254 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3255 word, err_msg[ret]);
3256 break;
3257 case ERR_OK:
3258 user_friendly_fixup (&inst);
3259 print_aarch64_insn (pc, &inst, word, info, errors);
3260 break;
3261 default:
3262 abort ();
3263 }
3264 }
3265
3266 /* Disallow mapping symbols ($x, $d etc) from
3267 being displayed in symbol relative addresses. */
3268
3269 bfd_boolean
3270 aarch64_symbol_is_valid (asymbol * sym,
3271 struct disassemble_info * info ATTRIBUTE_UNUSED)
3272 {
3273 const char * name;
3274
3275 if (sym == NULL)
3276 return FALSE;
3277
3278 name = bfd_asymbol_name (sym);
3279
3280 return name
3281 && (name[0] != '$'
3282 || (name[1] != 'x' && name[1] != 'd')
3283 || (name[2] != '\0' && name[2] != '.'));
3284 }
3285
3286 /* Print data bytes on INFO->STREAM. */
3287
3288 static void
3289 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3290 uint32_t word,
3291 struct disassemble_info *info,
3292 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3293 {
3294 switch (info->bytes_per_chunk)
3295 {
3296 case 1:
3297 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3298 break;
3299 case 2:
3300 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3301 break;
3302 case 4:
3303 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3304 break;
3305 default:
3306 abort ();
3307 }
3308 }
3309
3310 /* Try to infer the code or data type from a symbol.
3311 Returns nonzero if *MAP_TYPE was set. */
3312
3313 static int
3314 get_sym_code_type (struct disassemble_info *info, int n,
3315 enum map_type *map_type)
3316 {
3317 elf_symbol_type *es;
3318 unsigned int type;
3319 const char *name;
3320
3321 /* If the symbol is in a different section, ignore it. */
3322 if (info->section != NULL && info->section != info->symtab[n]->section)
3323 return FALSE;
3324
3325 es = *(elf_symbol_type **)(info->symtab + n);
3326 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3327
3328 /* If the symbol has function type then use that. */
3329 if (type == STT_FUNC)
3330 {
3331 *map_type = MAP_INSN;
3332 return TRUE;
3333 }
3334
3335 /* Check for mapping symbols. */
3336 name = bfd_asymbol_name(info->symtab[n]);
3337 if (name[0] == '$'
3338 && (name[1] == 'x' || name[1] == 'd')
3339 && (name[2] == '\0' || name[2] == '.'))
3340 {
3341 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3342 return TRUE;
3343 }
3344
3345 return FALSE;
3346 }
3347
3348 /* Entry-point of the AArch64 disassembler. */
3349
3350 int
3351 print_insn_aarch64 (bfd_vma pc,
3352 struct disassemble_info *info)
3353 {
3354 bfd_byte buffer[INSNLEN];
3355 int status;
3356 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3357 aarch64_operand_error *);
3358 bfd_boolean found = FALSE;
3359 unsigned int size = 4;
3360 unsigned long data;
3361 aarch64_operand_error errors;
3362
3363 if (info->disassembler_options)
3364 {
3365 set_default_aarch64_dis_options (info);
3366
3367 parse_aarch64_dis_options (info->disassembler_options);
3368
3369 /* To avoid repeated parsing of these options, we remove them here. */
3370 info->disassembler_options = NULL;
3371 }
3372
3373 /* Aarch64 instructions are always little-endian */
3374 info->endian_code = BFD_ENDIAN_LITTLE;
3375
3376 /* Default to DATA. A text section is required by the ABI to contain an
3377 INSN mapping symbol at the start. A data section has no such
3378 requirement, hence if no mapping symbol is found the section must
3379 contain only data. This however isn't very useful if the user has
3380 fully stripped the binaries. If this is the case use the section
3381 attributes to determine the default. If we have no section default to
3382 INSN as well, as we may be disassembling some raw bytes on a baremetal
3383 HEX file or similar. */
3384 enum map_type type = MAP_DATA;
3385 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3386 type = MAP_INSN;
3387
3388 /* First check the full symtab for a mapping symbol, even if there
3389 are no usable non-mapping symbols for this address. */
3390 if (info->symtab_size != 0
3391 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3392 {
3393 int last_sym = -1;
3394 bfd_vma addr, section_vma = 0;
3395 bfd_boolean can_use_search_opt_p;
3396 int n;
3397
3398 if (pc <= last_mapping_addr)
3399 last_mapping_sym = -1;
3400
3401 /* Start scanning at the start of the function, or wherever
3402 we finished last time. */
3403 n = info->symtab_pos + 1;
3404
3405 /* If the last stop offset is different from the current one it means we
3406 are disassembling a different glob of bytes. As such the optimization
3407 would not be safe and we should start over. */
3408 can_use_search_opt_p = last_mapping_sym >= 0
3409 && info->stop_offset == last_stop_offset;
3410
3411 if (n >= last_mapping_sym && can_use_search_opt_p)
3412 n = last_mapping_sym;
3413
3414 /* Look down while we haven't passed the location being disassembled.
3415 The reason for this is that there's no defined order between a symbol
3416 and an mapping symbol that may be at the same address. We may have to
3417 look at least one position ahead. */
3418 for (; n < info->symtab_size; n++)
3419 {
3420 addr = bfd_asymbol_value (info->symtab[n]);
3421 if (addr > pc)
3422 break;
3423 if (get_sym_code_type (info, n, &type))
3424 {
3425 last_sym = n;
3426 found = TRUE;
3427 }
3428 }
3429
3430 if (!found)
3431 {
3432 n = info->symtab_pos;
3433 if (n >= last_mapping_sym && can_use_search_opt_p)
3434 n = last_mapping_sym;
3435
3436 /* No mapping symbol found at this address. Look backwards
3437 for a preceeding one, but don't go pass the section start
3438 otherwise a data section with no mapping symbol can pick up
3439 a text mapping symbol of a preceeding section. The documentation
3440 says section can be NULL, in which case we will seek up all the
3441 way to the top. */
3442 if (info->section)
3443 section_vma = info->section->vma;
3444
3445 for (; n >= 0; n--)
3446 {
3447 addr = bfd_asymbol_value (info->symtab[n]);
3448 if (addr < section_vma)
3449 break;
3450
3451 if (get_sym_code_type (info, n, &type))
3452 {
3453 last_sym = n;
3454 found = TRUE;
3455 break;
3456 }
3457 }
3458 }
3459
3460 last_mapping_sym = last_sym;
3461 last_type = type;
3462 last_stop_offset = info->stop_offset;
3463
3464 /* Look a little bit ahead to see if we should print out
3465 less than four bytes of data. If there's a symbol,
3466 mapping or otherwise, after two bytes then don't
3467 print more. */
3468 if (last_type == MAP_DATA)
3469 {
3470 size = 4 - (pc & 3);
3471 for (n = last_sym + 1; n < info->symtab_size; n++)
3472 {
3473 addr = bfd_asymbol_value (info->symtab[n]);
3474 if (addr > pc)
3475 {
3476 if (addr - pc < size)
3477 size = addr - pc;
3478 break;
3479 }
3480 }
3481 /* If the next symbol is after three bytes, we need to
3482 print only part of the data, so that we can use either
3483 .byte or .short. */
3484 if (size == 3)
3485 size = (pc & 1) ? 1 : 2;
3486 }
3487 }
3488 else
3489 last_type = type;
3490
3491 /* PR 10263: Disassemble data if requested to do so by the user. */
3492 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3493 {
3494 /* size was set above. */
3495 info->bytes_per_chunk = size;
3496 info->display_endian = info->endian;
3497 printer = print_insn_data;
3498 }
3499 else
3500 {
3501 info->bytes_per_chunk = size = INSNLEN;
3502 info->display_endian = info->endian_code;
3503 printer = print_insn_aarch64_word;
3504 }
3505
3506 status = (*info->read_memory_func) (pc, buffer, size, info);
3507 if (status != 0)
3508 {
3509 (*info->memory_error_func) (status, pc, info);
3510 return -1;
3511 }
3512
3513 data = bfd_get_bits (buffer, size * 8,
3514 info->display_endian == BFD_ENDIAN_BIG);
3515
3516 (*printer) (pc, data, info, &errors);
3517
3518 return size;
3519 }
3520 \f
3521 void
3522 print_aarch64_disassembler_options (FILE *stream)
3523 {
3524 fprintf (stream, _("\n\
3525 The following AARCH64 specific disassembler options are supported for use\n\
3526 with the -M switch (multiple options should be separated by commas):\n"));
3527
3528 fprintf (stream, _("\n\
3529 no-aliases Don't print instruction aliases.\n"));
3530
3531 fprintf (stream, _("\n\
3532 aliases Do print instruction aliases.\n"));
3533
3534 fprintf (stream, _("\n\
3535 no-notes Don't print instruction notes.\n"));
3536
3537 fprintf (stream, _("\n\
3538 notes Do print instruction notes.\n"));
3539
3540 #ifdef DEBUG_AARCH64
3541 fprintf (stream, _("\n\
3542 debug_dump Temp switch for debug trace.\n"));
3543 #endif /* DEBUG_AARCH64 */
3544
3545 fprintf (stream, _("\n"));
3546 }