]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
Add missing ChangeLog entries
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else if (inst->opcode->iclass == dotproduct)
329 {
330 /* Need information in other operand(s) to help decoding. */
331 info->qualifier = get_expected_qualifier (inst, info->idx);
332 switch (info->qualifier)
333 {
334 case AARCH64_OPND_QLF_S_B:
335 /* L:H */
336 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
337 info->reglane.regno &= 0x1f;
338 break;
339 default:
340 return 0;
341 }
342 }
343 else
344 {
345 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
346 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
347
348 /* Need information in other operand(s) to help decoding. */
349 info->qualifier = get_expected_qualifier (inst, info->idx);
350 switch (info->qualifier)
351 {
352 case AARCH64_OPND_QLF_S_H:
353 /* h:l:m */
354 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
355 FLD_M);
356 info->reglane.regno &= 0xf;
357 break;
358 case AARCH64_OPND_QLF_S_S:
359 /* h:l */
360 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
361 break;
362 case AARCH64_OPND_QLF_S_D:
363 /* H */
364 info->reglane.index = extract_field (FLD_H, code, 0);
365 break;
366 default:
367 return 0;
368 }
369
370 if (inst->opcode->op == OP_FCMLA_ELEM)
371 {
372 /* Complex operand takes two elements. */
373 if (info->reglane.index & 1)
374 return 0;
375 info->reglane.index /= 2;
376 }
377 }
378
379 return 1;
380 }
381
382 int
383 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
384 const aarch64_insn code,
385 const aarch64_inst *inst ATTRIBUTE_UNUSED)
386 {
387 /* R */
388 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
389 /* len */
390 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
391 return 1;
392 }
393
394 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
395 int
396 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
397 aarch64_opnd_info *info, const aarch64_insn code,
398 const aarch64_inst *inst)
399 {
400 aarch64_insn value;
401 /* Number of elements in each structure to be loaded/stored. */
402 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
403
404 struct
405 {
406 unsigned is_reserved;
407 unsigned num_regs;
408 unsigned num_elements;
409 } data [] =
410 { {0, 4, 4},
411 {1, 4, 4},
412 {0, 4, 1},
413 {0, 4, 2},
414 {0, 3, 3},
415 {1, 3, 3},
416 {0, 3, 1},
417 {0, 1, 1},
418 {0, 2, 2},
419 {1, 2, 2},
420 {0, 2, 1},
421 };
422
423 /* Rt */
424 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
425 /* opcode */
426 value = extract_field (FLD_opcode, code, 0);
427 /* PR 21595: Check for a bogus value. */
428 if (value >= ARRAY_SIZE (data))
429 return 0;
430 if (expected_num != data[value].num_elements || data[value].is_reserved)
431 return 0;
432 info->reglist.num_regs = data[value].num_regs;
433
434 return 1;
435 }
436
437 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
438 lanes instructions. */
439 int
440 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst)
443 {
444 aarch64_insn value;
445
446 /* Rt */
447 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
448 /* S */
449 value = extract_field (FLD_S, code, 0);
450
451 /* Number of registers is equal to the number of elements in
452 each structure to be loaded/stored. */
453 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
454 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
455
456 /* Except when it is LD1R. */
457 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
458 info->reglist.num_regs = 2;
459
460 return 1;
461 }
462
463 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
464 load/store single element instructions. */
465 int
466 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
467 aarch64_opnd_info *info, const aarch64_insn code,
468 const aarch64_inst *inst ATTRIBUTE_UNUSED)
469 {
470 aarch64_field field = {0, 0};
471 aarch64_insn QSsize; /* fields Q:S:size. */
472 aarch64_insn opcodeh2; /* opcode<2:1> */
473
474 /* Rt */
475 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
476
477 /* Decode the index, opcode<2:1> and size. */
478 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
479 opcodeh2 = extract_field_2 (&field, code, 0);
480 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
481 switch (opcodeh2)
482 {
483 case 0x0:
484 info->qualifier = AARCH64_OPND_QLF_S_B;
485 /* Index encoded in "Q:S:size". */
486 info->reglist.index = QSsize;
487 break;
488 case 0x1:
489 if (QSsize & 0x1)
490 /* UND. */
491 return 0;
492 info->qualifier = AARCH64_OPND_QLF_S_H;
493 /* Index encoded in "Q:S:size<1>". */
494 info->reglist.index = QSsize >> 1;
495 break;
496 case 0x2:
497 if ((QSsize >> 1) & 0x1)
498 /* UND. */
499 return 0;
500 if ((QSsize & 0x1) == 0)
501 {
502 info->qualifier = AARCH64_OPND_QLF_S_S;
503 /* Index encoded in "Q:S". */
504 info->reglist.index = QSsize >> 2;
505 }
506 else
507 {
508 if (extract_field (FLD_S, code, 0))
509 /* UND */
510 return 0;
511 info->qualifier = AARCH64_OPND_QLF_S_D;
512 /* Index encoded in "Q". */
513 info->reglist.index = QSsize >> 3;
514 }
515 break;
516 default:
517 return 0;
518 }
519
520 info->reglist.has_index = 1;
521 info->reglist.num_regs = 0;
522 /* Number of registers is equal to the number of elements in
523 each structure to be loaded/stored. */
524 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
525 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
526
527 return 1;
528 }
529
530 /* Decode fields immh:immb and/or Q for e.g.
531 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
532 or SSHR <V><d>, <V><n>, #<shift>. */
533
534 int
535 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
536 aarch64_opnd_info *info, const aarch64_insn code,
537 const aarch64_inst *inst)
538 {
539 int pos;
540 aarch64_insn Q, imm, immh;
541 enum aarch64_insn_class iclass = inst->opcode->iclass;
542
543 immh = extract_field (FLD_immh, code, 0);
544 if (immh == 0)
545 return 0;
546 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
547 pos = 4;
548 /* Get highest set bit in immh. */
549 while (--pos >= 0 && (immh & 0x8) == 0)
550 immh <<= 1;
551
552 assert ((iclass == asimdshf || iclass == asisdshf)
553 && (info->type == AARCH64_OPND_IMM_VLSR
554 || info->type == AARCH64_OPND_IMM_VLSL));
555
556 if (iclass == asimdshf)
557 {
558 Q = extract_field (FLD_Q, code, 0);
559 /* immh Q <T>
560 0000 x SEE AdvSIMD modified immediate
561 0001 0 8B
562 0001 1 16B
563 001x 0 4H
564 001x 1 8H
565 01xx 0 2S
566 01xx 1 4S
567 1xxx 0 RESERVED
568 1xxx 1 2D */
569 info->qualifier =
570 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
571 }
572 else
573 info->qualifier = get_sreg_qualifier_from_value (pos);
574
575 if (info->type == AARCH64_OPND_IMM_VLSR)
576 /* immh <shift>
577 0000 SEE AdvSIMD modified immediate
578 0001 (16-UInt(immh:immb))
579 001x (32-UInt(immh:immb))
580 01xx (64-UInt(immh:immb))
581 1xxx (128-UInt(immh:immb)) */
582 info->imm.value = (16 << pos) - imm;
583 else
584 /* immh:immb
585 immh <shift>
586 0000 SEE AdvSIMD modified immediate
587 0001 (UInt(immh:immb)-8)
588 001x (UInt(immh:immb)-16)
589 01xx (UInt(immh:immb)-32)
590 1xxx (UInt(immh:immb)-64) */
591 info->imm.value = imm - (8 << pos);
592
593 return 1;
594 }
595
596 /* Decode shift immediate for e.g. sshr (imm). */
597 int
598 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
599 aarch64_opnd_info *info, const aarch64_insn code,
600 const aarch64_inst *inst ATTRIBUTE_UNUSED)
601 {
602 int64_t imm;
603 aarch64_insn val;
604 val = extract_field (FLD_size, code, 0);
605 switch (val)
606 {
607 case 0: imm = 8; break;
608 case 1: imm = 16; break;
609 case 2: imm = 32; break;
610 default: return 0;
611 }
612 info->imm.value = imm;
613 return 1;
614 }
615
616 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
617 value in the field(s) will be extracted as unsigned immediate value. */
618 int
619 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
620 const aarch64_insn code,
621 const aarch64_inst *inst ATTRIBUTE_UNUSED)
622 {
623 int64_t imm;
624
625 imm = extract_all_fields (self, code);
626
627 if (operand_need_sign_extension (self))
628 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
629
630 if (operand_need_shift_by_two (self))
631 imm <<= 2;
632
633 if (info->type == AARCH64_OPND_ADDR_ADRP)
634 imm <<= 12;
635
636 info->imm.value = imm;
637 return 1;
638 }
639
640 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
641 int
642 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
643 const aarch64_insn code,
644 const aarch64_inst *inst ATTRIBUTE_UNUSED)
645 {
646 aarch64_ext_imm (self, info, code, inst);
647 info->shifter.kind = AARCH64_MOD_LSL;
648 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
649 return 1;
650 }
651
652 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
653 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
654 int
655 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 aarch64_opnd_info *info,
657 const aarch64_insn code,
658 const aarch64_inst *inst ATTRIBUTE_UNUSED)
659 {
660 uint64_t imm;
661 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
662 aarch64_field field = {0, 0};
663
664 assert (info->idx == 1);
665
666 if (info->type == AARCH64_OPND_SIMD_FPIMM)
667 info->imm.is_fp = 1;
668
669 /* a:b:c:d:e:f:g:h */
670 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
671 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
672 {
673 /* Either MOVI <Dd>, #<imm>
674 or MOVI <Vd>.2D, #<imm>.
675 <imm> is a 64-bit immediate
676 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
677 encoded in "a:b:c:d:e:f:g:h". */
678 int i;
679 unsigned abcdefgh = imm;
680 for (imm = 0ull, i = 0; i < 8; i++)
681 if (((abcdefgh >> i) & 0x1) != 0)
682 imm |= 0xffull << (8 * i);
683 }
684 info->imm.value = imm;
685
686 /* cmode */
687 info->qualifier = get_expected_qualifier (inst, info->idx);
688 switch (info->qualifier)
689 {
690 case AARCH64_OPND_QLF_NIL:
691 /* no shift */
692 info->shifter.kind = AARCH64_MOD_NONE;
693 return 1;
694 case AARCH64_OPND_QLF_LSL:
695 /* shift zeros */
696 info->shifter.kind = AARCH64_MOD_LSL;
697 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
698 {
699 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
700 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
701 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
702 default: assert (0); return 0;
703 }
704 /* 00: 0; 01: 8; 10:16; 11:24. */
705 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
706 break;
707 case AARCH64_OPND_QLF_MSL:
708 /* shift ones */
709 info->shifter.kind = AARCH64_MOD_MSL;
710 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
711 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
712 break;
713 default:
714 assert (0);
715 return 0;
716 }
717
718 return 1;
719 }
720
721 /* Decode an 8-bit floating-point immediate. */
722 int
723 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
724 const aarch64_insn code,
725 const aarch64_inst *inst ATTRIBUTE_UNUSED)
726 {
727 info->imm.value = extract_all_fields (self, code);
728 info->imm.is_fp = 1;
729 return 1;
730 }
731
732 /* Decode a 1-bit rotate immediate (#90 or #270). */
733 int
734 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
735 const aarch64_insn code,
736 const aarch64_inst *inst ATTRIBUTE_UNUSED)
737 {
738 uint64_t rot = extract_field (self->fields[0], code, 0);
739 assert (rot < 2U);
740 info->imm.value = rot * 180 + 90;
741 return 1;
742 }
743
744 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
745 int
746 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
747 const aarch64_insn code,
748 const aarch64_inst *inst ATTRIBUTE_UNUSED)
749 {
750 uint64_t rot = extract_field (self->fields[0], code, 0);
751 assert (rot < 4U);
752 info->imm.value = rot * 90;
753 return 1;
754 }
755
756 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
757 int
758 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
759 aarch64_opnd_info *info, const aarch64_insn code,
760 const aarch64_inst *inst ATTRIBUTE_UNUSED)
761 {
762 info->imm.value = 64- extract_field (FLD_scale, code, 0);
763 return 1;
764 }
765
766 /* Decode arithmetic immediate for e.g.
767 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
768 int
769 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
770 aarch64_opnd_info *info, const aarch64_insn code,
771 const aarch64_inst *inst ATTRIBUTE_UNUSED)
772 {
773 aarch64_insn value;
774
775 info->shifter.kind = AARCH64_MOD_LSL;
776 /* shift */
777 value = extract_field (FLD_shift, code, 0);
778 if (value >= 2)
779 return 0;
780 info->shifter.amount = value ? 12 : 0;
781 /* imm12 (unsigned) */
782 info->imm.value = extract_field (FLD_imm12, code, 0);
783
784 return 1;
785 }
786
787 /* Return true if VALUE is a valid logical immediate encoding, storing the
788 decoded value in *RESULT if so. ESIZE is the number of bytes in the
789 decoded immediate. */
790 static int
791 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
792 {
793 uint64_t imm, mask;
794 uint32_t N, R, S;
795 unsigned simd_size;
796
797 /* value is N:immr:imms. */
798 S = value & 0x3f;
799 R = (value >> 6) & 0x3f;
800 N = (value >> 12) & 0x1;
801
802 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
803 (in other words, right rotated by R), then replicated. */
804 if (N != 0)
805 {
806 simd_size = 64;
807 mask = 0xffffffffffffffffull;
808 }
809 else
810 {
811 switch (S)
812 {
813 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
814 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
815 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
816 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
817 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
818 default: return 0;
819 }
820 mask = (1ull << simd_size) - 1;
821 /* Top bits are IGNORED. */
822 R &= simd_size - 1;
823 }
824
825 if (simd_size > esize * 8)
826 return 0;
827
828 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
829 if (S == simd_size - 1)
830 return 0;
831 /* S+1 consecutive bits to 1. */
832 /* NOTE: S can't be 63 due to detection above. */
833 imm = (1ull << (S + 1)) - 1;
834 /* Rotate to the left by simd_size - R. */
835 if (R != 0)
836 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
837 /* Replicate the value according to SIMD size. */
838 switch (simd_size)
839 {
840 case 2: imm = (imm << 2) | imm;
841 /* Fall through. */
842 case 4: imm = (imm << 4) | imm;
843 /* Fall through. */
844 case 8: imm = (imm << 8) | imm;
845 /* Fall through. */
846 case 16: imm = (imm << 16) | imm;
847 /* Fall through. */
848 case 32: imm = (imm << 32) | imm;
849 /* Fall through. */
850 case 64: break;
851 default: assert (0); return 0;
852 }
853
854 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
855
856 return 1;
857 }
858
859 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
860 int
861 aarch64_ext_limm (const aarch64_operand *self,
862 aarch64_opnd_info *info, const aarch64_insn code,
863 const aarch64_inst *inst)
864 {
865 uint32_t esize;
866 aarch64_insn value;
867
868 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
869 self->fields[2]);
870 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
871 return decode_limm (esize, value, &info->imm.value);
872 }
873
874 /* Decode a logical immediate for the BIC alias of AND (etc.). */
875 int
876 aarch64_ext_inv_limm (const aarch64_operand *self,
877 aarch64_opnd_info *info, const aarch64_insn code,
878 const aarch64_inst *inst)
879 {
880 if (!aarch64_ext_limm (self, info, code, inst))
881 return 0;
882 info->imm.value = ~info->imm.value;
883 return 1;
884 }
885
886 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
887 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
888 int
889 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
890 aarch64_opnd_info *info,
891 const aarch64_insn code, const aarch64_inst *inst)
892 {
893 aarch64_insn value;
894
895 /* Rt */
896 info->reg.regno = extract_field (FLD_Rt, code, 0);
897
898 /* size */
899 value = extract_field (FLD_ldst_size, code, 0);
900 if (inst->opcode->iclass == ldstpair_indexed
901 || inst->opcode->iclass == ldstnapair_offs
902 || inst->opcode->iclass == ldstpair_off
903 || inst->opcode->iclass == loadlit)
904 {
905 enum aarch64_opnd_qualifier qualifier;
906 switch (value)
907 {
908 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
909 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
910 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
911 default: return 0;
912 }
913 info->qualifier = qualifier;
914 }
915 else
916 {
917 /* opc1:size */
918 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
919 if (value > 0x4)
920 return 0;
921 info->qualifier = get_sreg_qualifier_from_value (value);
922 }
923
924 return 1;
925 }
926
927 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
928 int
929 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
930 aarch64_opnd_info *info,
931 aarch64_insn code,
932 const aarch64_inst *inst ATTRIBUTE_UNUSED)
933 {
934 /* Rn */
935 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
936 return 1;
937 }
938
939 /* Decode the address operand for e.g.
940 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
941 int
942 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
943 aarch64_opnd_info *info,
944 aarch64_insn code, const aarch64_inst *inst)
945 {
946 aarch64_insn S, value;
947
948 /* Rn */
949 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
950 /* Rm */
951 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
952 /* option */
953 value = extract_field (FLD_option, code, 0);
954 info->shifter.kind =
955 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
956 /* Fix-up the shifter kind; although the table-driven approach is
957 efficient, it is slightly inflexible, thus needing this fix-up. */
958 if (info->shifter.kind == AARCH64_MOD_UXTX)
959 info->shifter.kind = AARCH64_MOD_LSL;
960 /* S */
961 S = extract_field (FLD_S, code, 0);
962 if (S == 0)
963 {
964 info->shifter.amount = 0;
965 info->shifter.amount_present = 0;
966 }
967 else
968 {
969 int size;
970 /* Need information in other operand(s) to help achieve the decoding
971 from 'S' field. */
972 info->qualifier = get_expected_qualifier (inst, info->idx);
973 /* Get the size of the data element that is accessed, which may be
974 different from that of the source register size, e.g. in strb/ldrb. */
975 size = aarch64_get_qualifier_esize (info->qualifier);
976 info->shifter.amount = get_logsz (size);
977 info->shifter.amount_present = 1;
978 }
979
980 return 1;
981 }
982
983 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
984 int
985 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
986 aarch64_insn code, const aarch64_inst *inst)
987 {
988 aarch64_insn imm;
989 info->qualifier = get_expected_qualifier (inst, info->idx);
990
991 /* Rn */
992 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
993 /* simm (imm9 or imm7) */
994 imm = extract_field (self->fields[0], code, 0);
995 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
996 if (self->fields[0] == FLD_imm7)
997 /* scaled immediate in ld/st pair instructions. */
998 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
999 /* qualifier */
1000 if (inst->opcode->iclass == ldst_unscaled
1001 || inst->opcode->iclass == ldstnapair_offs
1002 || inst->opcode->iclass == ldstpair_off
1003 || inst->opcode->iclass == ldst_unpriv)
1004 info->addr.writeback = 0;
1005 else
1006 {
1007 /* pre/post- index */
1008 info->addr.writeback = 1;
1009 if (extract_field (self->fields[1], code, 0) == 1)
1010 info->addr.preind = 1;
1011 else
1012 info->addr.postind = 1;
1013 }
1014
1015 return 1;
1016 }
1017
1018 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1019 int
1020 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1021 aarch64_insn code,
1022 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1023 {
1024 int shift;
1025 info->qualifier = get_expected_qualifier (inst, info->idx);
1026 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1027 /* Rn */
1028 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1029 /* uimm12 */
1030 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1031 return 1;
1032 }
1033
1034 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1035 int
1036 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1037 aarch64_insn code,
1038 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1039 {
1040 aarch64_insn imm;
1041
1042 info->qualifier = get_expected_qualifier (inst, info->idx);
1043 /* Rn */
1044 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1045 /* simm10 */
1046 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1047 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1048 if (extract_field (self->fields[3], code, 0) == 1) {
1049 info->addr.writeback = 1;
1050 info->addr.preind = 1;
1051 }
1052 return 1;
1053 }
1054
1055 /* Decode the address operand for e.g.
1056 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1057 int
1058 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1059 aarch64_opnd_info *info,
1060 aarch64_insn code, const aarch64_inst *inst)
1061 {
1062 /* The opcode dependent area stores the number of elements in
1063 each structure to be loaded/stored. */
1064 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1065
1066 /* Rn */
1067 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1068 /* Rm | #<amount> */
1069 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1070 if (info->addr.offset.regno == 31)
1071 {
1072 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1073 /* Special handling of loading single structure to all lane. */
1074 info->addr.offset.imm = (is_ld1r ? 1
1075 : inst->operands[0].reglist.num_regs)
1076 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1077 else
1078 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1079 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1080 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1081 }
1082 else
1083 info->addr.offset.is_reg = 1;
1084 info->addr.writeback = 1;
1085
1086 return 1;
1087 }
1088
1089 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1090 int
1091 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1092 aarch64_opnd_info *info,
1093 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1094 {
1095 aarch64_insn value;
1096 /* cond */
1097 value = extract_field (FLD_cond, code, 0);
1098 info->cond = get_cond_from_value (value);
1099 return 1;
1100 }
1101
1102 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1103 int
1104 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1105 aarch64_opnd_info *info,
1106 aarch64_insn code,
1107 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1108 {
1109 /* op0:op1:CRn:CRm:op2 */
1110 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1111 FLD_CRm, FLD_op2);
1112 return 1;
1113 }
1114
1115 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1116 int
1117 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1118 aarch64_opnd_info *info, aarch64_insn code,
1119 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1120 {
1121 int i;
1122 /* op1:op2 */
1123 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1124 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1125 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1126 return 1;
1127 /* Reserved value in <pstatefield>. */
1128 return 0;
1129 }
1130
1131 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1132 int
1133 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1134 aarch64_opnd_info *info,
1135 aarch64_insn code,
1136 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1137 {
1138 int i;
1139 aarch64_insn value;
1140 const aarch64_sys_ins_reg *sysins_ops;
1141 /* op0:op1:CRn:CRm:op2 */
1142 value = extract_fields (code, 0, 5,
1143 FLD_op0, FLD_op1, FLD_CRn,
1144 FLD_CRm, FLD_op2);
1145
1146 switch (info->type)
1147 {
1148 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1149 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1150 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1151 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1152 default: assert (0); return 0;
1153 }
1154
1155 for (i = 0; sysins_ops[i].name != NULL; ++i)
1156 if (sysins_ops[i].value == value)
1157 {
1158 info->sysins_op = sysins_ops + i;
1159 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1160 info->sysins_op->name,
1161 (unsigned)info->sysins_op->value,
1162 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1163 return 1;
1164 }
1165
1166 return 0;
1167 }
1168
1169 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1170
1171 int
1172 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1173 aarch64_opnd_info *info,
1174 aarch64_insn code,
1175 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1176 {
1177 /* CRm */
1178 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1179 return 1;
1180 }
1181
1182 /* Decode the prefetch operation option operand for e.g.
1183 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1184
1185 int
1186 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1187 aarch64_opnd_info *info,
1188 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1189 {
1190 /* prfop in Rt */
1191 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1192 return 1;
1193 }
1194
1195 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1196 to the matching name/value pair in aarch64_hint_options. */
1197
1198 int
1199 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1200 aarch64_opnd_info *info,
1201 aarch64_insn code,
1202 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1203 {
1204 /* CRm:op2. */
1205 unsigned hint_number;
1206 int i;
1207
1208 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1209
1210 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1211 {
1212 if (hint_number == aarch64_hint_options[i].value)
1213 {
1214 info->hint_option = &(aarch64_hint_options[i]);
1215 return 1;
1216 }
1217 }
1218
1219 return 0;
1220 }
1221
1222 /* Decode the extended register operand for e.g.
1223 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1224 int
1225 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1226 aarch64_opnd_info *info,
1227 aarch64_insn code,
1228 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1229 {
1230 aarch64_insn value;
1231
1232 /* Rm */
1233 info->reg.regno = extract_field (FLD_Rm, code, 0);
1234 /* option */
1235 value = extract_field (FLD_option, code, 0);
1236 info->shifter.kind =
1237 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1238 /* imm3 */
1239 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1240
1241 /* This makes the constraint checking happy. */
1242 info->shifter.operator_present = 1;
1243
1244 /* Assume inst->operands[0].qualifier has been resolved. */
1245 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1246 info->qualifier = AARCH64_OPND_QLF_W;
1247 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1248 && (info->shifter.kind == AARCH64_MOD_UXTX
1249 || info->shifter.kind == AARCH64_MOD_SXTX))
1250 info->qualifier = AARCH64_OPND_QLF_X;
1251
1252 return 1;
1253 }
1254
1255 /* Decode the shifted register operand for e.g.
1256 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1257 int
1258 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1259 aarch64_opnd_info *info,
1260 aarch64_insn code,
1261 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1262 {
1263 aarch64_insn value;
1264
1265 /* Rm */
1266 info->reg.regno = extract_field (FLD_Rm, code, 0);
1267 /* shift */
1268 value = extract_field (FLD_shift, code, 0);
1269 info->shifter.kind =
1270 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1271 if (info->shifter.kind == AARCH64_MOD_ROR
1272 && inst->opcode->iclass != log_shift)
1273 /* ROR is not available for the shifted register operand in arithmetic
1274 instructions. */
1275 return 0;
1276 /* imm6 */
1277 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1278
1279 /* This makes the constraint checking happy. */
1280 info->shifter.operator_present = 1;
1281
1282 return 1;
1283 }
1284
1285 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1286 where <offset> is given by the OFFSET parameter and where <factor> is
1287 1 plus SELF's operand-dependent value. fields[0] specifies the field
1288 that holds <base>. */
1289 static int
1290 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1291 aarch64_opnd_info *info, aarch64_insn code,
1292 int64_t offset)
1293 {
1294 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1295 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1296 info->addr.offset.is_reg = FALSE;
1297 info->addr.writeback = FALSE;
1298 info->addr.preind = TRUE;
1299 if (offset != 0)
1300 info->shifter.kind = AARCH64_MOD_MUL_VL;
1301 info->shifter.amount = 1;
1302 info->shifter.operator_present = (info->addr.offset.imm != 0);
1303 info->shifter.amount_present = FALSE;
1304 return 1;
1305 }
1306
1307 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1308 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1309 SELF's operand-dependent value. fields[0] specifies the field that
1310 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1311 int
1312 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1313 aarch64_opnd_info *info, aarch64_insn code,
1314 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1315 {
1316 int offset;
1317
1318 offset = extract_field (FLD_SVE_imm4, code, 0);
1319 offset = ((offset + 8) & 15) - 8;
1320 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1321 }
1322
1323 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1324 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1325 SELF's operand-dependent value. fields[0] specifies the field that
1326 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1327 int
1328 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1329 aarch64_opnd_info *info, aarch64_insn code,
1330 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1331 {
1332 int offset;
1333
1334 offset = extract_field (FLD_SVE_imm6, code, 0);
1335 offset = (((offset + 32) & 63) - 32);
1336 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1337 }
1338
1339 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1340 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1341 SELF's operand-dependent value. fields[0] specifies the field that
1342 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1343 and imm3 fields, with imm3 being the less-significant part. */
1344 int
1345 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1346 aarch64_opnd_info *info,
1347 aarch64_insn code,
1348 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1349 {
1350 int offset;
1351
1352 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1353 offset = (((offset + 256) & 511) - 256);
1354 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1355 }
1356
1357 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1358 is given by the OFFSET parameter and where <shift> is SELF's operand-
1359 dependent value. fields[0] specifies the base register field <base>. */
1360 static int
1361 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1362 aarch64_opnd_info *info, aarch64_insn code,
1363 int64_t offset)
1364 {
1365 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1366 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1367 info->addr.offset.is_reg = FALSE;
1368 info->addr.writeback = FALSE;
1369 info->addr.preind = TRUE;
1370 info->shifter.operator_present = FALSE;
1371 info->shifter.amount_present = FALSE;
1372 return 1;
1373 }
1374
1375 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1376 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1377 value. fields[0] specifies the base register field. */
1378 int
1379 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1380 aarch64_opnd_info *info, aarch64_insn code,
1381 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1382 {
1383 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1384 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1385 }
1386
1387 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1388 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1389 value. fields[0] specifies the base register field. */
1390 int
1391 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1392 aarch64_opnd_info *info, aarch64_insn code,
1393 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1394 {
1395 int offset = extract_field (FLD_SVE_imm6, code, 0);
1396 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1397 }
1398
1399 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1400 is SELF's operand-dependent value. fields[0] specifies the base
1401 register field and fields[1] specifies the offset register field. */
1402 int
1403 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1404 aarch64_opnd_info *info, aarch64_insn code,
1405 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1406 {
1407 int index_regno;
1408
1409 index_regno = extract_field (self->fields[1], code, 0);
1410 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1411 return 0;
1412
1413 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1414 info->addr.offset.regno = index_regno;
1415 info->addr.offset.is_reg = TRUE;
1416 info->addr.writeback = FALSE;
1417 info->addr.preind = TRUE;
1418 info->shifter.kind = AARCH64_MOD_LSL;
1419 info->shifter.amount = get_operand_specific_data (self);
1420 info->shifter.operator_present = (info->shifter.amount != 0);
1421 info->shifter.amount_present = (info->shifter.amount != 0);
1422 return 1;
1423 }
1424
1425 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1426 <shift> is SELF's operand-dependent value. fields[0] specifies the
1427 base register field, fields[1] specifies the offset register field and
1428 fields[2] is a single-bit field that selects SXTW over UXTW. */
1429 int
1430 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1431 aarch64_opnd_info *info, aarch64_insn code,
1432 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1433 {
1434 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1435 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1436 info->addr.offset.is_reg = TRUE;
1437 info->addr.writeback = FALSE;
1438 info->addr.preind = TRUE;
1439 if (extract_field (self->fields[2], code, 0))
1440 info->shifter.kind = AARCH64_MOD_SXTW;
1441 else
1442 info->shifter.kind = AARCH64_MOD_UXTW;
1443 info->shifter.amount = get_operand_specific_data (self);
1444 info->shifter.operator_present = TRUE;
1445 info->shifter.amount_present = (info->shifter.amount != 0);
1446 return 1;
1447 }
1448
1449 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1450 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1451 fields[0] specifies the base register field. */
1452 int
1453 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1454 aarch64_opnd_info *info, aarch64_insn code,
1455 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1456 {
1457 int offset = extract_field (FLD_imm5, code, 0);
1458 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1459 }
1460
1461 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1462 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1463 number. fields[0] specifies the base register field and fields[1]
1464 specifies the offset register field. */
1465 static int
1466 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1467 aarch64_insn code, enum aarch64_modifier_kind kind)
1468 {
1469 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1470 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1471 info->addr.offset.is_reg = TRUE;
1472 info->addr.writeback = FALSE;
1473 info->addr.preind = TRUE;
1474 info->shifter.kind = kind;
1475 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1476 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1477 || info->shifter.amount != 0);
1478 info->shifter.amount_present = (info->shifter.amount != 0);
1479 return 1;
1480 }
1481
1482 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1483 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1484 field and fields[1] specifies the offset register field. */
1485 int
1486 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1487 aarch64_opnd_info *info, aarch64_insn code,
1488 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1489 {
1490 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1491 }
1492
1493 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1494 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1495 field and fields[1] specifies the offset register field. */
1496 int
1497 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1498 aarch64_opnd_info *info, aarch64_insn code,
1499 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1500 {
1501 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1502 }
1503
1504 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1505 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1506 field and fields[1] specifies the offset register field. */
1507 int
1508 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1509 aarch64_opnd_info *info, aarch64_insn code,
1510 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1511 {
1512 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1513 }
1514
1515 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1516 has the raw field value and that the low 8 bits decode to VALUE. */
1517 static int
1518 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1519 {
1520 info->shifter.kind = AARCH64_MOD_LSL;
1521 info->shifter.amount = 0;
1522 if (info->imm.value & 0x100)
1523 {
1524 if (value == 0)
1525 /* Decode 0x100 as #0, LSL #8. */
1526 info->shifter.amount = 8;
1527 else
1528 value *= 256;
1529 }
1530 info->shifter.operator_present = (info->shifter.amount != 0);
1531 info->shifter.amount_present = (info->shifter.amount != 0);
1532 info->imm.value = value;
1533 return 1;
1534 }
1535
1536 /* Decode an SVE ADD/SUB immediate. */
1537 int
1538 aarch64_ext_sve_aimm (const aarch64_operand *self,
1539 aarch64_opnd_info *info, const aarch64_insn code,
1540 const aarch64_inst *inst)
1541 {
1542 return (aarch64_ext_imm (self, info, code, inst)
1543 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1544 }
1545
1546 /* Decode an SVE CPY/DUP immediate. */
1547 int
1548 aarch64_ext_sve_asimm (const aarch64_operand *self,
1549 aarch64_opnd_info *info, const aarch64_insn code,
1550 const aarch64_inst *inst)
1551 {
1552 return (aarch64_ext_imm (self, info, code, inst)
1553 && decode_sve_aimm (info, (int8_t) info->imm.value));
1554 }
1555
1556 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1557 The fields array specifies which field to use. */
1558 int
1559 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1560 aarch64_opnd_info *info, aarch64_insn code,
1561 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1562 {
1563 if (extract_field (self->fields[0], code, 0))
1564 info->imm.value = 0x3f800000;
1565 else
1566 info->imm.value = 0x3f000000;
1567 info->imm.is_fp = TRUE;
1568 return 1;
1569 }
1570
1571 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1572 The fields array specifies which field to use. */
1573 int
1574 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1575 aarch64_opnd_info *info, aarch64_insn code,
1576 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1577 {
1578 if (extract_field (self->fields[0], code, 0))
1579 info->imm.value = 0x40000000;
1580 else
1581 info->imm.value = 0x3f000000;
1582 info->imm.is_fp = TRUE;
1583 return 1;
1584 }
1585
1586 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1587 The fields array specifies which field to use. */
1588 int
1589 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1590 aarch64_opnd_info *info, aarch64_insn code,
1591 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1592 {
1593 if (extract_field (self->fields[0], code, 0))
1594 info->imm.value = 0x3f800000;
1595 else
1596 info->imm.value = 0x0;
1597 info->imm.is_fp = TRUE;
1598 return 1;
1599 }
1600
1601 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1602 array specifies which field to use for Zn. MM is encoded in the
1603 concatenation of imm5 and SVE_tszh, with imm5 being the less
1604 significant part. */
1605 int
1606 aarch64_ext_sve_index (const aarch64_operand *self,
1607 aarch64_opnd_info *info, aarch64_insn code,
1608 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1609 {
1610 int val;
1611
1612 info->reglane.regno = extract_field (self->fields[0], code, 0);
1613 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1614 if ((val & 31) == 0)
1615 return 0;
1616 while ((val & 1) == 0)
1617 val /= 2;
1618 info->reglane.index = val / 2;
1619 return 1;
1620 }
1621
1622 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1623 int
1624 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1625 aarch64_opnd_info *info, const aarch64_insn code,
1626 const aarch64_inst *inst)
1627 {
1628 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1629 return (aarch64_ext_limm (self, info, code, inst)
1630 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1631 }
1632
1633 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1634 and where MM occupies the most-significant part. The operand-dependent
1635 value specifies the number of bits in Zn. */
1636 int
1637 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1638 aarch64_opnd_info *info, aarch64_insn code,
1639 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1640 {
1641 unsigned int reg_bits = get_operand_specific_data (self);
1642 unsigned int val = extract_all_fields (self, code);
1643 info->reglane.regno = val & ((1 << reg_bits) - 1);
1644 info->reglane.index = val >> reg_bits;
1645 return 1;
1646 }
1647
1648 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1649 to use for Zn. The opcode-dependent value specifies the number
1650 of registers in the list. */
1651 int
1652 aarch64_ext_sve_reglist (const aarch64_operand *self,
1653 aarch64_opnd_info *info, aarch64_insn code,
1654 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1655 {
1656 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1657 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1658 return 1;
1659 }
1660
1661 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1662 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1663 field. */
1664 int
1665 aarch64_ext_sve_scale (const aarch64_operand *self,
1666 aarch64_opnd_info *info, aarch64_insn code,
1667 const aarch64_inst *inst)
1668 {
1669 int val;
1670
1671 if (!aarch64_ext_imm (self, info, code, inst))
1672 return 0;
1673 val = extract_field (FLD_SVE_imm4, code, 0);
1674 info->shifter.kind = AARCH64_MOD_MUL;
1675 info->shifter.amount = val + 1;
1676 info->shifter.operator_present = (val != 0);
1677 info->shifter.amount_present = (val != 0);
1678 return 1;
1679 }
1680
1681 /* Return the top set bit in VALUE, which is expected to be relatively
1682 small. */
1683 static uint64_t
1684 get_top_bit (uint64_t value)
1685 {
1686 while ((value & -value) != value)
1687 value -= value & -value;
1688 return value;
1689 }
1690
1691 /* Decode an SVE shift-left immediate. */
1692 int
1693 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1694 aarch64_opnd_info *info, const aarch64_insn code,
1695 const aarch64_inst *inst)
1696 {
1697 if (!aarch64_ext_imm (self, info, code, inst)
1698 || info->imm.value == 0)
1699 return 0;
1700
1701 info->imm.value -= get_top_bit (info->imm.value);
1702 return 1;
1703 }
1704
1705 /* Decode an SVE shift-right immediate. */
1706 int
1707 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1708 aarch64_opnd_info *info, const aarch64_insn code,
1709 const aarch64_inst *inst)
1710 {
1711 if (!aarch64_ext_imm (self, info, code, inst)
1712 || info->imm.value == 0)
1713 return 0;
1714
1715 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1716 return 1;
1717 }
1718 \f
1719 /* Bitfields that are commonly used to encode certain operands' information
1720 may be partially used as part of the base opcode in some instructions.
1721 For example, the bit 1 of the field 'size' in
1722 FCVTXN <Vb><d>, <Va><n>
1723 is actually part of the base opcode, while only size<0> is available
1724 for encoding the register type. Another example is the AdvSIMD
1725 instruction ORR (register), in which the field 'size' is also used for
1726 the base opcode, leaving only the field 'Q' available to encode the
1727 vector register arrangement specifier '8B' or '16B'.
1728
1729 This function tries to deduce the qualifier from the value of partially
1730 constrained field(s). Given the VALUE of such a field or fields, the
1731 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1732 operand encoding), the function returns the matching qualifier or
1733 AARCH64_OPND_QLF_NIL if nothing matches.
1734
1735 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1736 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1737 may end with AARCH64_OPND_QLF_NIL. */
1738
1739 static enum aarch64_opnd_qualifier
1740 get_qualifier_from_partial_encoding (aarch64_insn value,
1741 const enum aarch64_opnd_qualifier* \
1742 candidates,
1743 aarch64_insn mask)
1744 {
1745 int i;
1746 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1747 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1748 {
1749 aarch64_insn standard_value;
1750 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1751 break;
1752 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1753 if ((standard_value & mask) == (value & mask))
1754 return candidates[i];
1755 }
1756 return AARCH64_OPND_QLF_NIL;
1757 }
1758
1759 /* Given a list of qualifier sequences, return all possible valid qualifiers
1760 for operand IDX in QUALIFIERS.
1761 Assume QUALIFIERS is an array whose length is large enough. */
1762
1763 static void
1764 get_operand_possible_qualifiers (int idx,
1765 const aarch64_opnd_qualifier_seq_t *list,
1766 enum aarch64_opnd_qualifier *qualifiers)
1767 {
1768 int i;
1769 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1770 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1771 break;
1772 }
1773
1774 /* Decode the size Q field for e.g. SHADD.
1775 We tag one operand with the qualifer according to the code;
1776 whether the qualifier is valid for this opcode or not, it is the
1777 duty of the semantic checking. */
1778
1779 static int
1780 decode_sizeq (aarch64_inst *inst)
1781 {
1782 int idx;
1783 enum aarch64_opnd_qualifier qualifier;
1784 aarch64_insn code;
1785 aarch64_insn value, mask;
1786 enum aarch64_field_kind fld_sz;
1787 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1788
1789 if (inst->opcode->iclass == asisdlse
1790 || inst->opcode->iclass == asisdlsep
1791 || inst->opcode->iclass == asisdlso
1792 || inst->opcode->iclass == asisdlsop)
1793 fld_sz = FLD_vldst_size;
1794 else
1795 fld_sz = FLD_size;
1796
1797 code = inst->value;
1798 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1799 /* Obtain the info that which bits of fields Q and size are actually
1800 available for operand encoding. Opcodes like FMAXNM and FMLA have
1801 size[1] unavailable. */
1802 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1803
1804 /* The index of the operand we are going to tag a qualifier and the qualifer
1805 itself are reasoned from the value of the size and Q fields and the
1806 possible valid qualifier lists. */
1807 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1808 DEBUG_TRACE ("key idx: %d", idx);
1809
1810 /* For most related instruciton, size:Q are fully available for operand
1811 encoding. */
1812 if (mask == 0x7)
1813 {
1814 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1815 return 1;
1816 }
1817
1818 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1819 candidates);
1820 #ifdef DEBUG_AARCH64
1821 if (debug_dump)
1822 {
1823 int i;
1824 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1825 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1826 DEBUG_TRACE ("qualifier %d: %s", i,
1827 aarch64_get_qualifier_name(candidates[i]));
1828 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1829 }
1830 #endif /* DEBUG_AARCH64 */
1831
1832 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1833
1834 if (qualifier == AARCH64_OPND_QLF_NIL)
1835 return 0;
1836
1837 inst->operands[idx].qualifier = qualifier;
1838 return 1;
1839 }
1840
1841 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1842 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1843
1844 static int
1845 decode_asimd_fcvt (aarch64_inst *inst)
1846 {
1847 aarch64_field field = {0, 0};
1848 aarch64_insn value;
1849 enum aarch64_opnd_qualifier qualifier;
1850
1851 gen_sub_field (FLD_size, 0, 1, &field);
1852 value = extract_field_2 (&field, inst->value, 0);
1853 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1854 : AARCH64_OPND_QLF_V_2D;
1855 switch (inst->opcode->op)
1856 {
1857 case OP_FCVTN:
1858 case OP_FCVTN2:
1859 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1860 inst->operands[1].qualifier = qualifier;
1861 break;
1862 case OP_FCVTL:
1863 case OP_FCVTL2:
1864 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1865 inst->operands[0].qualifier = qualifier;
1866 break;
1867 default:
1868 assert (0);
1869 return 0;
1870 }
1871
1872 return 1;
1873 }
1874
1875 /* Decode size[0], i.e. bit 22, for
1876 e.g. FCVTXN <Vb><d>, <Va><n>. */
1877
1878 static int
1879 decode_asisd_fcvtxn (aarch64_inst *inst)
1880 {
1881 aarch64_field field = {0, 0};
1882 gen_sub_field (FLD_size, 0, 1, &field);
1883 if (!extract_field_2 (&field, inst->value, 0))
1884 return 0;
1885 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1886 return 1;
1887 }
1888
1889 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1890 static int
1891 decode_fcvt (aarch64_inst *inst)
1892 {
1893 enum aarch64_opnd_qualifier qualifier;
1894 aarch64_insn value;
1895 const aarch64_field field = {15, 2};
1896
1897 /* opc dstsize */
1898 value = extract_field_2 (&field, inst->value, 0);
1899 switch (value)
1900 {
1901 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1902 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1903 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1904 default: return 0;
1905 }
1906 inst->operands[0].qualifier = qualifier;
1907
1908 return 1;
1909 }
1910
1911 /* Do miscellaneous decodings that are not common enough to be driven by
1912 flags. */
1913
1914 static int
1915 do_misc_decoding (aarch64_inst *inst)
1916 {
1917 unsigned int value;
1918 switch (inst->opcode->op)
1919 {
1920 case OP_FCVT:
1921 return decode_fcvt (inst);
1922
1923 case OP_FCVTN:
1924 case OP_FCVTN2:
1925 case OP_FCVTL:
1926 case OP_FCVTL2:
1927 return decode_asimd_fcvt (inst);
1928
1929 case OP_FCVTXN_S:
1930 return decode_asisd_fcvtxn (inst);
1931
1932 case OP_MOV_P_P:
1933 case OP_MOVS_P_P:
1934 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1935 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1936 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1937
1938 case OP_MOV_Z_P_Z:
1939 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1940 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1941
1942 case OP_MOV_Z_V:
1943 /* Index must be zero. */
1944 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1945 return value > 0 && value <= 16 && value == (value & -value);
1946
1947 case OP_MOV_Z_Z:
1948 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1949 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1950
1951 case OP_MOV_Z_Zi:
1952 /* Index must be nonzero. */
1953 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1954 return value > 0 && value != (value & -value);
1955
1956 case OP_MOVM_P_P_P:
1957 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1958 == extract_field (FLD_SVE_Pm, inst->value, 0));
1959
1960 case OP_MOVZS_P_P_P:
1961 case OP_MOVZ_P_P_P:
1962 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1963 == extract_field (FLD_SVE_Pm, inst->value, 0));
1964
1965 case OP_NOTS_P_P_P_Z:
1966 case OP_NOT_P_P_P_Z:
1967 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1968 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1969
1970 default:
1971 return 0;
1972 }
1973 }
1974
1975 /* Opcodes that have fields shared by multiple operands are usually flagged
1976 with flags. In this function, we detect such flags, decode the related
1977 field(s) and store the information in one of the related operands. The
1978 'one' operand is not any operand but one of the operands that can
1979 accommadate all the information that has been decoded. */
1980
1981 static int
1982 do_special_decoding (aarch64_inst *inst)
1983 {
1984 int idx;
1985 aarch64_insn value;
1986 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1987 if (inst->opcode->flags & F_COND)
1988 {
1989 value = extract_field (FLD_cond2, inst->value, 0);
1990 inst->cond = get_cond_from_value (value);
1991 }
1992 /* 'sf' field. */
1993 if (inst->opcode->flags & F_SF)
1994 {
1995 idx = select_operand_for_sf_field_coding (inst->opcode);
1996 value = extract_field (FLD_sf, inst->value, 0);
1997 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1998 if ((inst->opcode->flags & F_N)
1999 && extract_field (FLD_N, inst->value, 0) != value)
2000 return 0;
2001 }
2002 /* 'sf' field. */
2003 if (inst->opcode->flags & F_LSE_SZ)
2004 {
2005 idx = select_operand_for_sf_field_coding (inst->opcode);
2006 value = extract_field (FLD_lse_sz, inst->value, 0);
2007 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2008 }
2009 /* size:Q fields. */
2010 if (inst->opcode->flags & F_SIZEQ)
2011 return decode_sizeq (inst);
2012
2013 if (inst->opcode->flags & F_FPTYPE)
2014 {
2015 idx = select_operand_for_fptype_field_coding (inst->opcode);
2016 value = extract_field (FLD_type, inst->value, 0);
2017 switch (value)
2018 {
2019 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2020 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2021 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2022 default: return 0;
2023 }
2024 }
2025
2026 if (inst->opcode->flags & F_SSIZE)
2027 {
2028 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2029 of the base opcode. */
2030 aarch64_insn mask;
2031 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2032 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2033 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2034 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2035 /* For most related instruciton, the 'size' field is fully available for
2036 operand encoding. */
2037 if (mask == 0x3)
2038 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2039 else
2040 {
2041 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2042 candidates);
2043 inst->operands[idx].qualifier
2044 = get_qualifier_from_partial_encoding (value, candidates, mask);
2045 }
2046 }
2047
2048 if (inst->opcode->flags & F_T)
2049 {
2050 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2051 int num = 0;
2052 unsigned val, Q;
2053 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2054 == AARCH64_OPND_CLASS_SIMD_REG);
2055 /* imm5<3:0> q <t>
2056 0000 x reserved
2057 xxx1 0 8b
2058 xxx1 1 16b
2059 xx10 0 4h
2060 xx10 1 8h
2061 x100 0 2s
2062 x100 1 4s
2063 1000 0 reserved
2064 1000 1 2d */
2065 val = extract_field (FLD_imm5, inst->value, 0);
2066 while ((val & 0x1) == 0 && ++num <= 3)
2067 val >>= 1;
2068 if (num > 3)
2069 return 0;
2070 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2071 inst->operands[0].qualifier =
2072 get_vreg_qualifier_from_value ((num << 1) | Q);
2073 }
2074
2075 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2076 {
2077 /* Use Rt to encode in the case of e.g.
2078 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2079 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2080 if (idx == -1)
2081 {
2082 /* Otherwise use the result operand, which has to be a integer
2083 register. */
2084 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2085 == AARCH64_OPND_CLASS_INT_REG);
2086 idx = 0;
2087 }
2088 assert (idx == 0 || idx == 1);
2089 value = extract_field (FLD_Q, inst->value, 0);
2090 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2091 }
2092
2093 if (inst->opcode->flags & F_LDS_SIZE)
2094 {
2095 aarch64_field field = {0, 0};
2096 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2097 == AARCH64_OPND_CLASS_INT_REG);
2098 gen_sub_field (FLD_opc, 0, 1, &field);
2099 value = extract_field_2 (&field, inst->value, 0);
2100 inst->operands[0].qualifier
2101 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2102 }
2103
2104 /* Miscellaneous decoding; done as the last step. */
2105 if (inst->opcode->flags & F_MISC)
2106 return do_misc_decoding (inst);
2107
2108 return 1;
2109 }
2110
2111 /* Converters converting a real opcode instruction to its alias form. */
2112
2113 /* ROR <Wd>, <Ws>, #<shift>
2114 is equivalent to:
2115 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2116 static int
2117 convert_extr_to_ror (aarch64_inst *inst)
2118 {
2119 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2120 {
2121 copy_operand_info (inst, 2, 3);
2122 inst->operands[3].type = AARCH64_OPND_NIL;
2123 return 1;
2124 }
2125 return 0;
2126 }
2127
2128 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2129 is equivalent to:
2130 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2131 static int
2132 convert_shll_to_xtl (aarch64_inst *inst)
2133 {
2134 if (inst->operands[2].imm.value == 0)
2135 {
2136 inst->operands[2].type = AARCH64_OPND_NIL;
2137 return 1;
2138 }
2139 return 0;
2140 }
2141
2142 /* Convert
2143 UBFM <Xd>, <Xn>, #<shift>, #63.
2144 to
2145 LSR <Xd>, <Xn>, #<shift>. */
2146 static int
2147 convert_bfm_to_sr (aarch64_inst *inst)
2148 {
2149 int64_t imms, val;
2150
2151 imms = inst->operands[3].imm.value;
2152 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2153 if (imms == val)
2154 {
2155 inst->operands[3].type = AARCH64_OPND_NIL;
2156 return 1;
2157 }
2158
2159 return 0;
2160 }
2161
2162 /* Convert MOV to ORR. */
2163 static int
2164 convert_orr_to_mov (aarch64_inst *inst)
2165 {
2166 /* MOV <Vd>.<T>, <Vn>.<T>
2167 is equivalent to:
2168 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2169 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2170 {
2171 inst->operands[2].type = AARCH64_OPND_NIL;
2172 return 1;
2173 }
2174 return 0;
2175 }
2176
2177 /* When <imms> >= <immr>, the instruction written:
2178 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2179 is equivalent to:
2180 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2181
2182 static int
2183 convert_bfm_to_bfx (aarch64_inst *inst)
2184 {
2185 int64_t immr, imms;
2186
2187 immr = inst->operands[2].imm.value;
2188 imms = inst->operands[3].imm.value;
2189 if (imms >= immr)
2190 {
2191 int64_t lsb = immr;
2192 inst->operands[2].imm.value = lsb;
2193 inst->operands[3].imm.value = imms + 1 - lsb;
2194 /* The two opcodes have different qualifiers for
2195 the immediate operands; reset to help the checking. */
2196 reset_operand_qualifier (inst, 2);
2197 reset_operand_qualifier (inst, 3);
2198 return 1;
2199 }
2200
2201 return 0;
2202 }
2203
2204 /* When <imms> < <immr>, the instruction written:
2205 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2206 is equivalent to:
2207 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2208
2209 static int
2210 convert_bfm_to_bfi (aarch64_inst *inst)
2211 {
2212 int64_t immr, imms, val;
2213
2214 immr = inst->operands[2].imm.value;
2215 imms = inst->operands[3].imm.value;
2216 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2217 if (imms < immr)
2218 {
2219 inst->operands[2].imm.value = (val - immr) & (val - 1);
2220 inst->operands[3].imm.value = imms + 1;
2221 /* The two opcodes have different qualifiers for
2222 the immediate operands; reset to help the checking. */
2223 reset_operand_qualifier (inst, 2);
2224 reset_operand_qualifier (inst, 3);
2225 return 1;
2226 }
2227
2228 return 0;
2229 }
2230
2231 /* The instruction written:
2232 BFC <Xd>, #<lsb>, #<width>
2233 is equivalent to:
2234 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2235
2236 static int
2237 convert_bfm_to_bfc (aarch64_inst *inst)
2238 {
2239 int64_t immr, imms, val;
2240
2241 /* Should have been assured by the base opcode value. */
2242 assert (inst->operands[1].reg.regno == 0x1f);
2243
2244 immr = inst->operands[2].imm.value;
2245 imms = inst->operands[3].imm.value;
2246 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2247 if (imms < immr)
2248 {
2249 /* Drop XZR from the second operand. */
2250 copy_operand_info (inst, 1, 2);
2251 copy_operand_info (inst, 2, 3);
2252 inst->operands[3].type = AARCH64_OPND_NIL;
2253
2254 /* Recalculate the immediates. */
2255 inst->operands[1].imm.value = (val - immr) & (val - 1);
2256 inst->operands[2].imm.value = imms + 1;
2257
2258 /* The two opcodes have different qualifiers for the operands; reset to
2259 help the checking. */
2260 reset_operand_qualifier (inst, 1);
2261 reset_operand_qualifier (inst, 2);
2262 reset_operand_qualifier (inst, 3);
2263
2264 return 1;
2265 }
2266
2267 return 0;
2268 }
2269
2270 /* The instruction written:
2271 LSL <Xd>, <Xn>, #<shift>
2272 is equivalent to:
2273 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2274
2275 static int
2276 convert_ubfm_to_lsl (aarch64_inst *inst)
2277 {
2278 int64_t immr = inst->operands[2].imm.value;
2279 int64_t imms = inst->operands[3].imm.value;
2280 int64_t val
2281 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2282
2283 if ((immr == 0 && imms == val) || immr == imms + 1)
2284 {
2285 inst->operands[3].type = AARCH64_OPND_NIL;
2286 inst->operands[2].imm.value = val - imms;
2287 return 1;
2288 }
2289
2290 return 0;
2291 }
2292
2293 /* CINC <Wd>, <Wn>, <cond>
2294 is equivalent to:
2295 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2296 where <cond> is not AL or NV. */
2297
2298 static int
2299 convert_from_csel (aarch64_inst *inst)
2300 {
2301 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2302 && (inst->operands[3].cond->value & 0xe) != 0xe)
2303 {
2304 copy_operand_info (inst, 2, 3);
2305 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2306 inst->operands[3].type = AARCH64_OPND_NIL;
2307 return 1;
2308 }
2309 return 0;
2310 }
2311
2312 /* CSET <Wd>, <cond>
2313 is equivalent to:
2314 CSINC <Wd>, WZR, WZR, invert(<cond>)
2315 where <cond> is not AL or NV. */
2316
2317 static int
2318 convert_csinc_to_cset (aarch64_inst *inst)
2319 {
2320 if (inst->operands[1].reg.regno == 0x1f
2321 && inst->operands[2].reg.regno == 0x1f
2322 && (inst->operands[3].cond->value & 0xe) != 0xe)
2323 {
2324 copy_operand_info (inst, 1, 3);
2325 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2326 inst->operands[3].type = AARCH64_OPND_NIL;
2327 inst->operands[2].type = AARCH64_OPND_NIL;
2328 return 1;
2329 }
2330 return 0;
2331 }
2332
2333 /* MOV <Wd>, #<imm>
2334 is equivalent to:
2335 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2336
2337 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2338 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2339 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2340 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2341 machine-instruction mnemonic must be used. */
2342
2343 static int
2344 convert_movewide_to_mov (aarch64_inst *inst)
2345 {
2346 uint64_t value = inst->operands[1].imm.value;
2347 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2348 if (value == 0 && inst->operands[1].shifter.amount != 0)
2349 return 0;
2350 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2351 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2352 value <<= inst->operands[1].shifter.amount;
2353 /* As an alias convertor, it has to be clear that the INST->OPCODE
2354 is the opcode of the real instruction. */
2355 if (inst->opcode->op == OP_MOVN)
2356 {
2357 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2358 value = ~value;
2359 /* A MOVN has an immediate that could be encoded by MOVZ. */
2360 if (aarch64_wide_constant_p (value, is32, NULL))
2361 return 0;
2362 }
2363 inst->operands[1].imm.value = value;
2364 inst->operands[1].shifter.amount = 0;
2365 return 1;
2366 }
2367
2368 /* MOV <Wd>, #<imm>
2369 is equivalent to:
2370 ORR <Wd>, WZR, #<imm>.
2371
2372 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2373 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2374 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2375 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2376 machine-instruction mnemonic must be used. */
2377
2378 static int
2379 convert_movebitmask_to_mov (aarch64_inst *inst)
2380 {
2381 int is32;
2382 uint64_t value;
2383
2384 /* Should have been assured by the base opcode value. */
2385 assert (inst->operands[1].reg.regno == 0x1f);
2386 copy_operand_info (inst, 1, 2);
2387 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2388 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2389 value = inst->operands[1].imm.value;
2390 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2391 instruction. */
2392 if (inst->operands[0].reg.regno != 0x1f
2393 && (aarch64_wide_constant_p (value, is32, NULL)
2394 || aarch64_wide_constant_p (~value, is32, NULL)))
2395 return 0;
2396
2397 inst->operands[2].type = AARCH64_OPND_NIL;
2398 return 1;
2399 }
2400
2401 /* Some alias opcodes are disassembled by being converted from their real-form.
2402 N.B. INST->OPCODE is the real opcode rather than the alias. */
2403
2404 static int
2405 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2406 {
2407 switch (alias->op)
2408 {
2409 case OP_ASR_IMM:
2410 case OP_LSR_IMM:
2411 return convert_bfm_to_sr (inst);
2412 case OP_LSL_IMM:
2413 return convert_ubfm_to_lsl (inst);
2414 case OP_CINC:
2415 case OP_CINV:
2416 case OP_CNEG:
2417 return convert_from_csel (inst);
2418 case OP_CSET:
2419 case OP_CSETM:
2420 return convert_csinc_to_cset (inst);
2421 case OP_UBFX:
2422 case OP_BFXIL:
2423 case OP_SBFX:
2424 return convert_bfm_to_bfx (inst);
2425 case OP_SBFIZ:
2426 case OP_BFI:
2427 case OP_UBFIZ:
2428 return convert_bfm_to_bfi (inst);
2429 case OP_BFC:
2430 return convert_bfm_to_bfc (inst);
2431 case OP_MOV_V:
2432 return convert_orr_to_mov (inst);
2433 case OP_MOV_IMM_WIDE:
2434 case OP_MOV_IMM_WIDEN:
2435 return convert_movewide_to_mov (inst);
2436 case OP_MOV_IMM_LOG:
2437 return convert_movebitmask_to_mov (inst);
2438 case OP_ROR_IMM:
2439 return convert_extr_to_ror (inst);
2440 case OP_SXTL:
2441 case OP_SXTL2:
2442 case OP_UXTL:
2443 case OP_UXTL2:
2444 return convert_shll_to_xtl (inst);
2445 default:
2446 return 0;
2447 }
2448 }
2449
2450 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2451 aarch64_inst *, int);
2452
2453 /* Given the instruction information in *INST, check if the instruction has
2454 any alias form that can be used to represent *INST. If the answer is yes,
2455 update *INST to be in the form of the determined alias. */
2456
2457 /* In the opcode description table, the following flags are used in opcode
2458 entries to help establish the relations between the real and alias opcodes:
2459
2460 F_ALIAS: opcode is an alias
2461 F_HAS_ALIAS: opcode has alias(es)
2462 F_P1
2463 F_P2
2464 F_P3: Disassembly preference priority 1-3 (the larger the
2465 higher). If nothing is specified, it is the priority
2466 0 by default, i.e. the lowest priority.
2467
2468 Although the relation between the machine and the alias instructions are not
2469 explicitly described, it can be easily determined from the base opcode
2470 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2471 description entries:
2472
2473 The mask of an alias opcode must be equal to or a super-set (i.e. more
2474 constrained) of that of the aliased opcode; so is the base opcode value.
2475
2476 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2477 && (opcode->mask & real->mask) == real->mask
2478 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2479 then OPCODE is an alias of, and only of, the REAL instruction
2480
2481 The alias relationship is forced flat-structured to keep related algorithm
2482 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2483
2484 During the disassembling, the decoding decision tree (in
2485 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2486 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2487 not specified), the disassembler will check whether there is any alias
2488 instruction exists for this real instruction. If there is, the disassembler
2489 will try to disassemble the 32-bit binary again using the alias's rule, or
2490 try to convert the IR to the form of the alias. In the case of the multiple
2491 aliases, the aliases are tried one by one from the highest priority
2492 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2493 first succeeds first adopted.
2494
2495 You may ask why there is a need for the conversion of IR from one form to
2496 another in handling certain aliases. This is because on one hand it avoids
2497 adding more operand code to handle unusual encoding/decoding; on other
2498 hand, during the disassembling, the conversion is an effective approach to
2499 check the condition of an alias (as an alias may be adopted only if certain
2500 conditions are met).
2501
2502 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2503 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2504 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2505
2506 static void
2507 determine_disassembling_preference (struct aarch64_inst *inst)
2508 {
2509 const aarch64_opcode *opcode;
2510 const aarch64_opcode *alias;
2511
2512 opcode = inst->opcode;
2513
2514 /* This opcode does not have an alias, so use itself. */
2515 if (!opcode_has_alias (opcode))
2516 return;
2517
2518 alias = aarch64_find_alias_opcode (opcode);
2519 assert (alias);
2520
2521 #ifdef DEBUG_AARCH64
2522 if (debug_dump)
2523 {
2524 const aarch64_opcode *tmp = alias;
2525 printf ("#### LIST orderd: ");
2526 while (tmp)
2527 {
2528 printf ("%s, ", tmp->name);
2529 tmp = aarch64_find_next_alias_opcode (tmp);
2530 }
2531 printf ("\n");
2532 }
2533 #endif /* DEBUG_AARCH64 */
2534
2535 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2536 {
2537 DEBUG_TRACE ("try %s", alias->name);
2538 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2539
2540 /* An alias can be a pseudo opcode which will never be used in the
2541 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2542 aliasing AND. */
2543 if (pseudo_opcode_p (alias))
2544 {
2545 DEBUG_TRACE ("skip pseudo %s", alias->name);
2546 continue;
2547 }
2548
2549 if ((inst->value & alias->mask) != alias->opcode)
2550 {
2551 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2552 continue;
2553 }
2554 /* No need to do any complicated transformation on operands, if the alias
2555 opcode does not have any operand. */
2556 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2557 {
2558 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2559 aarch64_replace_opcode (inst, alias);
2560 return;
2561 }
2562 if (alias->flags & F_CONV)
2563 {
2564 aarch64_inst copy;
2565 memcpy (&copy, inst, sizeof (aarch64_inst));
2566 /* ALIAS is the preference as long as the instruction can be
2567 successfully converted to the form of ALIAS. */
2568 if (convert_to_alias (&copy, alias) == 1)
2569 {
2570 aarch64_replace_opcode (&copy, alias);
2571 assert (aarch64_match_operands_constraint (&copy, NULL));
2572 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2573 memcpy (inst, &copy, sizeof (aarch64_inst));
2574 return;
2575 }
2576 }
2577 else
2578 {
2579 /* Directly decode the alias opcode. */
2580 aarch64_inst temp;
2581 memset (&temp, '\0', sizeof (aarch64_inst));
2582 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2583 {
2584 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2585 memcpy (inst, &temp, sizeof (aarch64_inst));
2586 return;
2587 }
2588 }
2589 }
2590 }
2591
2592 /* Some instructions (including all SVE ones) use the instruction class
2593 to describe how a qualifiers_list index is represented in the instruction
2594 encoding. If INST is such an instruction, decode the appropriate fields
2595 and fill in the operand qualifiers accordingly. Return true if no
2596 problems are found. */
2597
2598 static bfd_boolean
2599 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2600 {
2601 int i, variant;
2602
2603 variant = 0;
2604 switch (inst->opcode->iclass)
2605 {
2606 case sve_cpy:
2607 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2608 break;
2609
2610 case sve_index:
2611 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2612 if ((i & 31) == 0)
2613 return FALSE;
2614 while ((i & 1) == 0)
2615 {
2616 i >>= 1;
2617 variant += 1;
2618 }
2619 break;
2620
2621 case sve_limm:
2622 /* Pick the smallest applicable element size. */
2623 if ((inst->value & 0x20600) == 0x600)
2624 variant = 0;
2625 else if ((inst->value & 0x20400) == 0x400)
2626 variant = 1;
2627 else if ((inst->value & 0x20000) == 0)
2628 variant = 2;
2629 else
2630 variant = 3;
2631 break;
2632
2633 case sve_misc:
2634 /* sve_misc instructions have only a single variant. */
2635 break;
2636
2637 case sve_movprfx:
2638 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2639 break;
2640
2641 case sve_pred_zm:
2642 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2643 break;
2644
2645 case sve_shift_pred:
2646 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2647 sve_shift:
2648 if (i == 0)
2649 return FALSE;
2650 while (i != 1)
2651 {
2652 i >>= 1;
2653 variant += 1;
2654 }
2655 break;
2656
2657 case sve_shift_unpred:
2658 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2659 goto sve_shift;
2660
2661 case sve_size_bhs:
2662 variant = extract_field (FLD_size, inst->value, 0);
2663 if (variant >= 3)
2664 return FALSE;
2665 break;
2666
2667 case sve_size_bhsd:
2668 variant = extract_field (FLD_size, inst->value, 0);
2669 break;
2670
2671 case sve_size_hsd:
2672 i = extract_field (FLD_size, inst->value, 0);
2673 if (i < 1)
2674 return FALSE;
2675 variant = i - 1;
2676 break;
2677
2678 case sve_size_sd:
2679 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2680 break;
2681
2682 default:
2683 /* No mapping between instruction class and qualifiers. */
2684 return TRUE;
2685 }
2686
2687 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2688 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2689 return TRUE;
2690 }
2691 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2692 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2693 return 1.
2694
2695 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2696 determined and used to disassemble CODE; this is done just before the
2697 return. */
2698
2699 static int
2700 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2701 aarch64_inst *inst, int noaliases_p)
2702 {
2703 int i;
2704
2705 DEBUG_TRACE ("enter with %s", opcode->name);
2706
2707 assert (opcode && inst);
2708
2709 /* Check the base opcode. */
2710 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2711 {
2712 DEBUG_TRACE ("base opcode match FAIL");
2713 goto decode_fail;
2714 }
2715
2716 /* Clear inst. */
2717 memset (inst, '\0', sizeof (aarch64_inst));
2718
2719 inst->opcode = opcode;
2720 inst->value = code;
2721
2722 /* Assign operand codes and indexes. */
2723 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2724 {
2725 if (opcode->operands[i] == AARCH64_OPND_NIL)
2726 break;
2727 inst->operands[i].type = opcode->operands[i];
2728 inst->operands[i].idx = i;
2729 }
2730
2731 /* Call the opcode decoder indicated by flags. */
2732 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2733 {
2734 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2735 goto decode_fail;
2736 }
2737
2738 /* Possibly use the instruction class to determine the correct
2739 qualifier. */
2740 if (!aarch64_decode_variant_using_iclass (inst))
2741 {
2742 DEBUG_TRACE ("iclass-based decoder FAIL");
2743 goto decode_fail;
2744 }
2745
2746 /* Call operand decoders. */
2747 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2748 {
2749 const aarch64_operand *opnd;
2750 enum aarch64_opnd type;
2751
2752 type = opcode->operands[i];
2753 if (type == AARCH64_OPND_NIL)
2754 break;
2755 opnd = &aarch64_operands[type];
2756 if (operand_has_extractor (opnd)
2757 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2758 {
2759 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2760 goto decode_fail;
2761 }
2762 }
2763
2764 /* If the opcode has a verifier, then check it now. */
2765 if (opcode->verifier && ! opcode->verifier (opcode, code))
2766 {
2767 DEBUG_TRACE ("operand verifier FAIL");
2768 goto decode_fail;
2769 }
2770
2771 /* Match the qualifiers. */
2772 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2773 {
2774 /* Arriving here, the CODE has been determined as a valid instruction
2775 of OPCODE and *INST has been filled with information of this OPCODE
2776 instruction. Before the return, check if the instruction has any
2777 alias and should be disassembled in the form of its alias instead.
2778 If the answer is yes, *INST will be updated. */
2779 if (!noaliases_p)
2780 determine_disassembling_preference (inst);
2781 DEBUG_TRACE ("SUCCESS");
2782 return 1;
2783 }
2784 else
2785 {
2786 DEBUG_TRACE ("constraint matching FAIL");
2787 }
2788
2789 decode_fail:
2790 return 0;
2791 }
2792 \f
2793 /* This does some user-friendly fix-up to *INST. It is currently focus on
2794 the adjustment of qualifiers to help the printed instruction
2795 recognized/understood more easily. */
2796
2797 static void
2798 user_friendly_fixup (aarch64_inst *inst)
2799 {
2800 switch (inst->opcode->iclass)
2801 {
2802 case testbranch:
2803 /* TBNZ Xn|Wn, #uimm6, label
2804 Test and Branch Not Zero: conditionally jumps to label if bit number
2805 uimm6 in register Xn is not zero. The bit number implies the width of
2806 the register, which may be written and should be disassembled as Wn if
2807 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2808 */
2809 if (inst->operands[1].imm.value < 32)
2810 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2811 break;
2812 default: break;
2813 }
2814 }
2815
2816 /* Decode INSN and fill in *INST the instruction information. An alias
2817 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2818 success. */
2819
2820 int
2821 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2822 bfd_boolean noaliases_p)
2823 {
2824 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2825
2826 #ifdef DEBUG_AARCH64
2827 if (debug_dump)
2828 {
2829 const aarch64_opcode *tmp = opcode;
2830 printf ("\n");
2831 DEBUG_TRACE ("opcode lookup:");
2832 while (tmp != NULL)
2833 {
2834 aarch64_verbose (" %s", tmp->name);
2835 tmp = aarch64_find_next_opcode (tmp);
2836 }
2837 }
2838 #endif /* DEBUG_AARCH64 */
2839
2840 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2841 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2842 opcode field and value, apart from the difference that one of them has an
2843 extra field as part of the opcode, but such a field is used for operand
2844 encoding in other opcode(s) ('immh' in the case of the example). */
2845 while (opcode != NULL)
2846 {
2847 /* But only one opcode can be decoded successfully for, as the
2848 decoding routine will check the constraint carefully. */
2849 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2850 return ERR_OK;
2851 opcode = aarch64_find_next_opcode (opcode);
2852 }
2853
2854 return ERR_UND;
2855 }
2856
2857 /* Print operands. */
2858
2859 static void
2860 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2861 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2862 {
2863 int i, pcrel_p, num_printed;
2864 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2865 {
2866 char str[128];
2867 /* We regard the opcode operand info more, however we also look into
2868 the inst->operands to support the disassembling of the optional
2869 operand.
2870 The two operand code should be the same in all cases, apart from
2871 when the operand can be optional. */
2872 if (opcode->operands[i] == AARCH64_OPND_NIL
2873 || opnds[i].type == AARCH64_OPND_NIL)
2874 break;
2875
2876 /* Generate the operand string in STR. */
2877 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2878 &info->target);
2879
2880 /* Print the delimiter (taking account of omitted operand(s)). */
2881 if (str[0] != '\0')
2882 (*info->fprintf_func) (info->stream, "%s",
2883 num_printed++ == 0 ? "\t" : ", ");
2884
2885 /* Print the operand. */
2886 if (pcrel_p)
2887 (*info->print_address_func) (info->target, info);
2888 else
2889 (*info->fprintf_func) (info->stream, "%s", str);
2890 }
2891 }
2892
2893 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2894
2895 static void
2896 remove_dot_suffix (char *name, const aarch64_inst *inst)
2897 {
2898 char *ptr;
2899 size_t len;
2900
2901 ptr = strchr (inst->opcode->name, '.');
2902 assert (ptr && inst->cond);
2903 len = ptr - inst->opcode->name;
2904 assert (len < 8);
2905 strncpy (name, inst->opcode->name, len);
2906 name[len] = '\0';
2907 }
2908
2909 /* Print the instruction mnemonic name. */
2910
2911 static void
2912 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2913 {
2914 if (inst->opcode->flags & F_COND)
2915 {
2916 /* For instructions that are truly conditionally executed, e.g. b.cond,
2917 prepare the full mnemonic name with the corresponding condition
2918 suffix. */
2919 char name[8];
2920
2921 remove_dot_suffix (name, inst);
2922 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2923 }
2924 else
2925 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2926 }
2927
2928 /* Decide whether we need to print a comment after the operands of
2929 instruction INST. */
2930
2931 static void
2932 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2933 {
2934 if (inst->opcode->flags & F_COND)
2935 {
2936 char name[8];
2937 unsigned int i, num_conds;
2938
2939 remove_dot_suffix (name, inst);
2940 num_conds = ARRAY_SIZE (inst->cond->names);
2941 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2942 (*info->fprintf_func) (info->stream, "%s %s.%s",
2943 i == 1 ? " //" : ",",
2944 name, inst->cond->names[i]);
2945 }
2946 }
2947
2948 /* Print the instruction according to *INST. */
2949
2950 static void
2951 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2952 struct disassemble_info *info)
2953 {
2954 print_mnemonic_name (inst, info);
2955 print_operands (pc, inst->opcode, inst->operands, info);
2956 print_comment (inst, info);
2957 }
2958
2959 /* Entry-point of the instruction disassembler and printer. */
2960
2961 static void
2962 print_insn_aarch64_word (bfd_vma pc,
2963 uint32_t word,
2964 struct disassemble_info *info)
2965 {
2966 static const char *err_msg[6] =
2967 {
2968 [ERR_OK] = "_",
2969 [-ERR_UND] = "undefined",
2970 [-ERR_UNP] = "unpredictable",
2971 [-ERR_NYI] = "NYI"
2972 };
2973
2974 int ret;
2975 aarch64_inst inst;
2976
2977 info->insn_info_valid = 1;
2978 info->branch_delay_insns = 0;
2979 info->data_size = 0;
2980 info->target = 0;
2981 info->target2 = 0;
2982
2983 if (info->flags & INSN_HAS_RELOC)
2984 /* If the instruction has a reloc associated with it, then
2985 the offset field in the instruction will actually be the
2986 addend for the reloc. (If we are using REL type relocs).
2987 In such cases, we can ignore the pc when computing
2988 addresses, since the addend is not currently pc-relative. */
2989 pc = 0;
2990
2991 ret = aarch64_decode_insn (word, &inst, no_aliases);
2992
2993 if (((word >> 21) & 0x3ff) == 1)
2994 {
2995 /* RESERVED for ALES. */
2996 assert (ret != ERR_OK);
2997 ret = ERR_NYI;
2998 }
2999
3000 switch (ret)
3001 {
3002 case ERR_UND:
3003 case ERR_UNP:
3004 case ERR_NYI:
3005 /* Handle undefined instructions. */
3006 info->insn_type = dis_noninsn;
3007 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3008 word, err_msg[-ret]);
3009 break;
3010 case ERR_OK:
3011 user_friendly_fixup (&inst);
3012 print_aarch64_insn (pc, &inst, info);
3013 break;
3014 default:
3015 abort ();
3016 }
3017 }
3018
3019 /* Disallow mapping symbols ($x, $d etc) from
3020 being displayed in symbol relative addresses. */
3021
3022 bfd_boolean
3023 aarch64_symbol_is_valid (asymbol * sym,
3024 struct disassemble_info * info ATTRIBUTE_UNUSED)
3025 {
3026 const char * name;
3027
3028 if (sym == NULL)
3029 return FALSE;
3030
3031 name = bfd_asymbol_name (sym);
3032
3033 return name
3034 && (name[0] != '$'
3035 || (name[1] != 'x' && name[1] != 'd')
3036 || (name[2] != '\0' && name[2] != '.'));
3037 }
3038
3039 /* Print data bytes on INFO->STREAM. */
3040
3041 static void
3042 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3043 uint32_t word,
3044 struct disassemble_info *info)
3045 {
3046 switch (info->bytes_per_chunk)
3047 {
3048 case 1:
3049 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3050 break;
3051 case 2:
3052 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3053 break;
3054 case 4:
3055 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3056 break;
3057 default:
3058 abort ();
3059 }
3060 }
3061
3062 /* Try to infer the code or data type from a symbol.
3063 Returns nonzero if *MAP_TYPE was set. */
3064
3065 static int
3066 get_sym_code_type (struct disassemble_info *info, int n,
3067 enum map_type *map_type)
3068 {
3069 elf_symbol_type *es;
3070 unsigned int type;
3071 const char *name;
3072
3073 es = *(elf_symbol_type **)(info->symtab + n);
3074 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3075
3076 /* If the symbol has function type then use that. */
3077 if (type == STT_FUNC)
3078 {
3079 *map_type = MAP_INSN;
3080 return TRUE;
3081 }
3082
3083 /* Check for mapping symbols. */
3084 name = bfd_asymbol_name(info->symtab[n]);
3085 if (name[0] == '$'
3086 && (name[1] == 'x' || name[1] == 'd')
3087 && (name[2] == '\0' || name[2] == '.'))
3088 {
3089 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3090 return TRUE;
3091 }
3092
3093 return FALSE;
3094 }
3095
3096 /* Entry-point of the AArch64 disassembler. */
3097
3098 int
3099 print_insn_aarch64 (bfd_vma pc,
3100 struct disassemble_info *info)
3101 {
3102 bfd_byte buffer[INSNLEN];
3103 int status;
3104 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3105 bfd_boolean found = FALSE;
3106 unsigned int size = 4;
3107 unsigned long data;
3108
3109 if (info->disassembler_options)
3110 {
3111 set_default_aarch64_dis_options (info);
3112
3113 parse_aarch64_dis_options (info->disassembler_options);
3114
3115 /* To avoid repeated parsing of these options, we remove them here. */
3116 info->disassembler_options = NULL;
3117 }
3118
3119 /* Aarch64 instructions are always little-endian */
3120 info->endian_code = BFD_ENDIAN_LITTLE;
3121
3122 /* First check the full symtab for a mapping symbol, even if there
3123 are no usable non-mapping symbols for this address. */
3124 if (info->symtab_size != 0
3125 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3126 {
3127 enum map_type type = MAP_INSN;
3128 int last_sym = -1;
3129 bfd_vma addr;
3130 int n;
3131
3132 if (pc <= last_mapping_addr)
3133 last_mapping_sym = -1;
3134
3135 /* Start scanning at the start of the function, or wherever
3136 we finished last time. */
3137 n = info->symtab_pos + 1;
3138 if (n < last_mapping_sym)
3139 n = last_mapping_sym;
3140
3141 /* Scan up to the location being disassembled. */
3142 for (; n < info->symtab_size; n++)
3143 {
3144 addr = bfd_asymbol_value (info->symtab[n]);
3145 if (addr > pc)
3146 break;
3147 if ((info->section == NULL
3148 || info->section == info->symtab[n]->section)
3149 && get_sym_code_type (info, n, &type))
3150 {
3151 last_sym = n;
3152 found = TRUE;
3153 }
3154 }
3155
3156 if (!found)
3157 {
3158 n = info->symtab_pos;
3159 if (n < last_mapping_sym)
3160 n = last_mapping_sym;
3161
3162 /* No mapping symbol found at this address. Look backwards
3163 for a preceeding one. */
3164 for (; n >= 0; n--)
3165 {
3166 if (get_sym_code_type (info, n, &type))
3167 {
3168 last_sym = n;
3169 found = TRUE;
3170 break;
3171 }
3172 }
3173 }
3174
3175 last_mapping_sym = last_sym;
3176 last_type = type;
3177
3178 /* Look a little bit ahead to see if we should print out
3179 less than four bytes of data. If there's a symbol,
3180 mapping or otherwise, after two bytes then don't
3181 print more. */
3182 if (last_type == MAP_DATA)
3183 {
3184 size = 4 - (pc & 3);
3185 for (n = last_sym + 1; n < info->symtab_size; n++)
3186 {
3187 addr = bfd_asymbol_value (info->symtab[n]);
3188 if (addr > pc)
3189 {
3190 if (addr - pc < size)
3191 size = addr - pc;
3192 break;
3193 }
3194 }
3195 /* If the next symbol is after three bytes, we need to
3196 print only part of the data, so that we can use either
3197 .byte or .short. */
3198 if (size == 3)
3199 size = (pc & 1) ? 1 : 2;
3200 }
3201 }
3202
3203 if (last_type == MAP_DATA)
3204 {
3205 /* size was set above. */
3206 info->bytes_per_chunk = size;
3207 info->display_endian = info->endian;
3208 printer = print_insn_data;
3209 }
3210 else
3211 {
3212 info->bytes_per_chunk = size = INSNLEN;
3213 info->display_endian = info->endian_code;
3214 printer = print_insn_aarch64_word;
3215 }
3216
3217 status = (*info->read_memory_func) (pc, buffer, size, info);
3218 if (status != 0)
3219 {
3220 (*info->memory_error_func) (status, pc, info);
3221 return -1;
3222 }
3223
3224 data = bfd_get_bits (buffer, size * 8,
3225 info->display_endian == BFD_ENDIAN_BIG);
3226
3227 (*printer) (pc, data, info);
3228
3229 return size;
3230 }
3231 \f
3232 void
3233 print_aarch64_disassembler_options (FILE *stream)
3234 {
3235 fprintf (stream, _("\n\
3236 The following AARCH64 specific disassembler options are supported for use\n\
3237 with the -M switch (multiple options should be separated by commas):\n"));
3238
3239 fprintf (stream, _("\n\
3240 no-aliases Don't print instruction aliases.\n"));
3241
3242 fprintf (stream, _("\n\
3243 aliases Do print instruction aliases.\n"));
3244
3245 #ifdef DEBUG_AARCH64
3246 fprintf (stream, _("\n\
3247 debug_dump Temp switch for debug trace.\n"));
3248 #endif /* DEBUG_AARCH64 */
3249
3250 fprintf (stream, _("\n"));
3251 }