]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
Update copyright years
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Sign-extend bit I of VALUE. */
149 static inline int32_t
150 sign_extend (aarch64_insn value, unsigned i)
151 {
152 uint32_t ret = value;
153
154 assert (i < 32);
155 if ((value >> i) & 0x1)
156 {
157 uint32_t val = (uint32_t)(-1) << i;
158 ret = ret | val;
159 }
160 return (int32_t) ret;
161 }
162
163 /* N.B. the following inline helpfer functions create a dependency on the
164 order of operand qualifier enumerators. */
165
166 /* Given VALUE, return qualifier for a general purpose register. */
167 static inline enum aarch64_opnd_qualifier
168 get_greg_qualifier_from_value (aarch64_insn value)
169 {
170 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
171 assert (value <= 0x1
172 && aarch64_get_qualifier_standard_value (qualifier) == value);
173 return qualifier;
174 }
175
176 /* Given VALUE, return qualifier for a vector register. */
177 static inline enum aarch64_opnd_qualifier
178 get_vreg_qualifier_from_value (aarch64_insn value)
179 {
180 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
181
182 assert (value <= 0x8
183 && aarch64_get_qualifier_standard_value (qualifier) == value);
184 return qualifier;
185 }
186
187 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
188 static inline enum aarch64_opnd_qualifier
189 get_sreg_qualifier_from_value (aarch64_insn value)
190 {
191 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
192
193 assert (value <= 0x4
194 && aarch64_get_qualifier_standard_value (qualifier) == value);
195 return qualifier;
196 }
197
198 /* Given the instruction in *INST which is probably half way through the
199 decoding and our caller wants to know the expected qualifier for operand
200 I. Return such a qualifier if we can establish it; otherwise return
201 AARCH64_OPND_QLF_NIL. */
202
203 static aarch64_opnd_qualifier_t
204 get_expected_qualifier (const aarch64_inst *inst, int i)
205 {
206 aarch64_opnd_qualifier_seq_t qualifiers;
207 /* Should not be called if the qualifier is known. */
208 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
209 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
210 i, qualifiers))
211 return qualifiers[i];
212 else
213 return AARCH64_OPND_QLF_NIL;
214 }
215
216 /* Operand extractors. */
217
218 int
219 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
220 const aarch64_insn code,
221 const aarch64_inst *inst ATTRIBUTE_UNUSED)
222 {
223 info->reg.regno = extract_field (self->fields[0], code, 0);
224 return 1;
225 }
226
227 /* e.g. IC <ic_op>{, <Xt>}. */
228 int
229 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
230 const aarch64_insn code,
231 const aarch64_inst *inst ATTRIBUTE_UNUSED)
232 {
233 info->reg.regno = extract_field (self->fields[0], code, 0);
234 assert (info->idx == 1
235 && (aarch64_get_operand_class (inst->operands[0].type)
236 == AARCH64_OPND_CLASS_SYSTEM));
237 /* This will make the constraint checking happy and more importantly will
238 help the disassembler determine whether this operand is optional or
239 not. */
240 info->present = inst->operands[0].sysins_op->has_xt;
241
242 return 1;
243 }
244
245 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
246 int
247 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
248 const aarch64_insn code,
249 const aarch64_inst *inst ATTRIBUTE_UNUSED)
250 {
251 /* regno */
252 info->reglane.regno = extract_field (self->fields[0], code,
253 inst->opcode->mask);
254
255 /* Index and/or type. */
256 if (inst->opcode->iclass == asisdone
257 || inst->opcode->iclass == asimdins)
258 {
259 if (info->type == AARCH64_OPND_En
260 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
261 {
262 unsigned shift;
263 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
264 assert (info->idx == 1); /* Vn */
265 aarch64_insn value = extract_field (FLD_imm4, code, 0);
266 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
267 info->qualifier = get_expected_qualifier (inst, info->idx);
268 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
269 info->reglane.index = value >> shift;
270 }
271 else
272 {
273 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
274 imm5<3:0> <V>
275 0000 RESERVED
276 xxx1 B
277 xx10 H
278 x100 S
279 1000 D */
280 int pos = -1;
281 aarch64_insn value = extract_field (FLD_imm5, code, 0);
282 while (++pos <= 3 && (value & 0x1) == 0)
283 value >>= 1;
284 if (pos > 3)
285 return 0;
286 info->qualifier = get_sreg_qualifier_from_value (pos);
287 info->reglane.index = (unsigned) (value >> 1);
288 }
289 }
290 else
291 {
292 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
293 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
294
295 /* Need information in other operand(s) to help decoding. */
296 info->qualifier = get_expected_qualifier (inst, info->idx);
297 switch (info->qualifier)
298 {
299 case AARCH64_OPND_QLF_S_H:
300 /* h:l:m */
301 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
302 FLD_M);
303 info->reglane.regno &= 0xf;
304 break;
305 case AARCH64_OPND_QLF_S_S:
306 /* h:l */
307 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
308 break;
309 case AARCH64_OPND_QLF_S_D:
310 /* H */
311 info->reglane.index = extract_field (FLD_H, code, 0);
312 break;
313 default:
314 return 0;
315 }
316 }
317
318 return 1;
319 }
320
321 int
322 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
323 const aarch64_insn code,
324 const aarch64_inst *inst ATTRIBUTE_UNUSED)
325 {
326 /* R */
327 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
328 /* len */
329 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
330 return 1;
331 }
332
333 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
334 int
335 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
336 aarch64_opnd_info *info, const aarch64_insn code,
337 const aarch64_inst *inst)
338 {
339 aarch64_insn value;
340 /* Number of elements in each structure to be loaded/stored. */
341 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
342
343 struct
344 {
345 unsigned is_reserved;
346 unsigned num_regs;
347 unsigned num_elements;
348 } data [] =
349 { {0, 4, 4},
350 {1, 4, 4},
351 {0, 4, 1},
352 {0, 4, 2},
353 {0, 3, 3},
354 {1, 3, 3},
355 {0, 3, 1},
356 {0, 1, 1},
357 {0, 2, 2},
358 {1, 2, 2},
359 {0, 2, 1},
360 };
361
362 /* Rt */
363 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
364 /* opcode */
365 value = extract_field (FLD_opcode, code, 0);
366 if (expected_num != data[value].num_elements || data[value].is_reserved)
367 return 0;
368 info->reglist.num_regs = data[value].num_regs;
369
370 return 1;
371 }
372
373 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
374 lanes instructions. */
375 int
376 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
377 aarch64_opnd_info *info, const aarch64_insn code,
378 const aarch64_inst *inst)
379 {
380 aarch64_insn value;
381
382 /* Rt */
383 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
384 /* S */
385 value = extract_field (FLD_S, code, 0);
386
387 /* Number of registers is equal to the number of elements in
388 each structure to be loaded/stored. */
389 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
390 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
391
392 /* Except when it is LD1R. */
393 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
394 info->reglist.num_regs = 2;
395
396 return 1;
397 }
398
399 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
400 load/store single element instructions. */
401 int
402 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
403 aarch64_opnd_info *info, const aarch64_insn code,
404 const aarch64_inst *inst ATTRIBUTE_UNUSED)
405 {
406 aarch64_field field = {0, 0};
407 aarch64_insn QSsize; /* fields Q:S:size. */
408 aarch64_insn opcodeh2; /* opcode<2:1> */
409
410 /* Rt */
411 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
412
413 /* Decode the index, opcode<2:1> and size. */
414 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
415 opcodeh2 = extract_field_2 (&field, code, 0);
416 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
417 switch (opcodeh2)
418 {
419 case 0x0:
420 info->qualifier = AARCH64_OPND_QLF_S_B;
421 /* Index encoded in "Q:S:size". */
422 info->reglist.index = QSsize;
423 break;
424 case 0x1:
425 info->qualifier = AARCH64_OPND_QLF_S_H;
426 /* Index encoded in "Q:S:size<1>". */
427 info->reglist.index = QSsize >> 1;
428 break;
429 case 0x2:
430 if ((QSsize & 0x1) == 0)
431 {
432 info->qualifier = AARCH64_OPND_QLF_S_S;
433 /* Index encoded in "Q:S". */
434 info->reglist.index = QSsize >> 2;
435 }
436 else
437 {
438 info->qualifier = AARCH64_OPND_QLF_S_D;
439 /* Index encoded in "Q". */
440 info->reglist.index = QSsize >> 3;
441 if (extract_field (FLD_S, code, 0))
442 /* UND */
443 return 0;
444 }
445 break;
446 default:
447 return 0;
448 }
449
450 info->reglist.has_index = 1;
451 info->reglist.num_regs = 0;
452 /* Number of registers is equal to the number of elements in
453 each structure to be loaded/stored. */
454 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
455 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
456
457 return 1;
458 }
459
460 /* Decode fields immh:immb and/or Q for e.g.
461 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
462 or SSHR <V><d>, <V><n>, #<shift>. */
463
464 int
465 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
466 aarch64_opnd_info *info, const aarch64_insn code,
467 const aarch64_inst *inst)
468 {
469 int pos;
470 aarch64_insn Q, imm, immh;
471 enum aarch64_insn_class iclass = inst->opcode->iclass;
472
473 immh = extract_field (FLD_immh, code, 0);
474 if (immh == 0)
475 return 0;
476 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
477 pos = 4;
478 /* Get highest set bit in immh. */
479 while (--pos >= 0 && (immh & 0x8) == 0)
480 immh <<= 1;
481
482 assert ((iclass == asimdshf || iclass == asisdshf)
483 && (info->type == AARCH64_OPND_IMM_VLSR
484 || info->type == AARCH64_OPND_IMM_VLSL));
485
486 if (iclass == asimdshf)
487 {
488 Q = extract_field (FLD_Q, code, 0);
489 /* immh Q <T>
490 0000 x SEE AdvSIMD modified immediate
491 0001 0 8B
492 0001 1 16B
493 001x 0 4H
494 001x 1 8H
495 01xx 0 2S
496 01xx 1 4S
497 1xxx 0 RESERVED
498 1xxx 1 2D */
499 info->qualifier =
500 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
501 }
502 else
503 info->qualifier = get_sreg_qualifier_from_value (pos);
504
505 if (info->type == AARCH64_OPND_IMM_VLSR)
506 /* immh <shift>
507 0000 SEE AdvSIMD modified immediate
508 0001 (16-UInt(immh:immb))
509 001x (32-UInt(immh:immb))
510 01xx (64-UInt(immh:immb))
511 1xxx (128-UInt(immh:immb)) */
512 info->imm.value = (16 << pos) - imm;
513 else
514 /* immh:immb
515 immh <shift>
516 0000 SEE AdvSIMD modified immediate
517 0001 (UInt(immh:immb)-8)
518 001x (UInt(immh:immb)-16)
519 01xx (UInt(immh:immb)-32)
520 1xxx (UInt(immh:immb)-64) */
521 info->imm.value = imm - (8 << pos);
522
523 return 1;
524 }
525
526 /* Decode shift immediate for e.g. sshr (imm). */
527 int
528 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
529 aarch64_opnd_info *info, const aarch64_insn code,
530 const aarch64_inst *inst ATTRIBUTE_UNUSED)
531 {
532 int64_t imm;
533 aarch64_insn val;
534 val = extract_field (FLD_size, code, 0);
535 switch (val)
536 {
537 case 0: imm = 8; break;
538 case 1: imm = 16; break;
539 case 2: imm = 32; break;
540 default: return 0;
541 }
542 info->imm.value = imm;
543 return 1;
544 }
545
546 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
547 value in the field(s) will be extracted as unsigned immediate value. */
548 int
549 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
550 const aarch64_insn code,
551 const aarch64_inst *inst ATTRIBUTE_UNUSED)
552 {
553 int64_t imm;
554 /* Maximum of two fields to extract. */
555 assert (self->fields[2] == FLD_NIL);
556
557 if (self->fields[1] == FLD_NIL)
558 imm = extract_field (self->fields[0], code, 0);
559 else
560 /* e.g. TBZ b5:b40. */
561 imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
562
563 if (info->type == AARCH64_OPND_FPIMM)
564 info->imm.is_fp = 1;
565
566 if (operand_need_sign_extension (self))
567 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
568
569 if (operand_need_shift_by_two (self))
570 imm <<= 2;
571
572 if (info->type == AARCH64_OPND_ADDR_ADRP)
573 imm <<= 12;
574
575 info->imm.value = imm;
576 return 1;
577 }
578
579 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
580 int
581 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
582 const aarch64_insn code,
583 const aarch64_inst *inst ATTRIBUTE_UNUSED)
584 {
585 aarch64_ext_imm (self, info, code, inst);
586 info->shifter.kind = AARCH64_MOD_LSL;
587 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
588 return 1;
589 }
590
591 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
592 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
593 int
594 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
595 aarch64_opnd_info *info,
596 const aarch64_insn code,
597 const aarch64_inst *inst ATTRIBUTE_UNUSED)
598 {
599 uint64_t imm;
600 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
601 aarch64_field field = {0, 0};
602
603 assert (info->idx == 1);
604
605 if (info->type == AARCH64_OPND_SIMD_FPIMM)
606 info->imm.is_fp = 1;
607
608 /* a:b:c:d:e:f:g:h */
609 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
610 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
611 {
612 /* Either MOVI <Dd>, #<imm>
613 or MOVI <Vd>.2D, #<imm>.
614 <imm> is a 64-bit immediate
615 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
616 encoded in "a:b:c:d:e:f:g:h". */
617 int i;
618 unsigned abcdefgh = imm;
619 for (imm = 0ull, i = 0; i < 8; i++)
620 if (((abcdefgh >> i) & 0x1) != 0)
621 imm |= 0xffull << (8 * i);
622 }
623 info->imm.value = imm;
624
625 /* cmode */
626 info->qualifier = get_expected_qualifier (inst, info->idx);
627 switch (info->qualifier)
628 {
629 case AARCH64_OPND_QLF_NIL:
630 /* no shift */
631 info->shifter.kind = AARCH64_MOD_NONE;
632 return 1;
633 case AARCH64_OPND_QLF_LSL:
634 /* shift zeros */
635 info->shifter.kind = AARCH64_MOD_LSL;
636 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
637 {
638 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
639 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
640 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
641 default: assert (0); return 0;
642 }
643 /* 00: 0; 01: 8; 10:16; 11:24. */
644 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
645 break;
646 case AARCH64_OPND_QLF_MSL:
647 /* shift ones */
648 info->shifter.kind = AARCH64_MOD_MSL;
649 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
650 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
651 break;
652 default:
653 assert (0);
654 return 0;
655 }
656
657 return 1;
658 }
659
660 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
661 int
662 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
663 aarch64_opnd_info *info, const aarch64_insn code,
664 const aarch64_inst *inst ATTRIBUTE_UNUSED)
665 {
666 info->imm.value = 64- extract_field (FLD_scale, code, 0);
667 return 1;
668 }
669
670 /* Decode arithmetic immediate for e.g.
671 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
672 int
673 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
674 aarch64_opnd_info *info, const aarch64_insn code,
675 const aarch64_inst *inst ATTRIBUTE_UNUSED)
676 {
677 aarch64_insn value;
678
679 info->shifter.kind = AARCH64_MOD_LSL;
680 /* shift */
681 value = extract_field (FLD_shift, code, 0);
682 if (value >= 2)
683 return 0;
684 info->shifter.amount = value ? 12 : 0;
685 /* imm12 (unsigned) */
686 info->imm.value = extract_field (FLD_imm12, code, 0);
687
688 return 1;
689 }
690
691 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
692
693 int
694 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
695 aarch64_opnd_info *info, const aarch64_insn code,
696 const aarch64_inst *inst ATTRIBUTE_UNUSED)
697 {
698 uint64_t imm, mask;
699 uint32_t sf;
700 uint32_t N, R, S;
701 unsigned simd_size;
702 aarch64_insn value;
703
704 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
705 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
706 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
707 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
708
709 /* value is N:immr:imms. */
710 S = value & 0x3f;
711 R = (value >> 6) & 0x3f;
712 N = (value >> 12) & 0x1;
713
714 if (sf == 0 && N == 1)
715 return 0;
716
717 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
718 (in other words, right rotated by R), then replicated. */
719 if (N != 0)
720 {
721 simd_size = 64;
722 mask = 0xffffffffffffffffull;
723 }
724 else
725 {
726 switch (S)
727 {
728 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
729 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
730 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
731 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
732 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
733 default: return 0;
734 }
735 mask = (1ull << simd_size) - 1;
736 /* Top bits are IGNORED. */
737 R &= simd_size - 1;
738 }
739 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
740 if (S == simd_size - 1)
741 return 0;
742 /* S+1 consecutive bits to 1. */
743 /* NOTE: S can't be 63 due to detection above. */
744 imm = (1ull << (S + 1)) - 1;
745 /* Rotate to the left by simd_size - R. */
746 if (R != 0)
747 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
748 /* Replicate the value according to SIMD size. */
749 switch (simd_size)
750 {
751 case 2: imm = (imm << 2) | imm;
752 case 4: imm = (imm << 4) | imm;
753 case 8: imm = (imm << 8) | imm;
754 case 16: imm = (imm << 16) | imm;
755 case 32: imm = (imm << 32) | imm;
756 case 64: break;
757 default: assert (0); return 0;
758 }
759
760 info->imm.value = sf ? imm : imm & 0xffffffff;
761
762 return 1;
763 }
764
765 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
766 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
767 int
768 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
769 aarch64_opnd_info *info,
770 const aarch64_insn code, const aarch64_inst *inst)
771 {
772 aarch64_insn value;
773
774 /* Rt */
775 info->reg.regno = extract_field (FLD_Rt, code, 0);
776
777 /* size */
778 value = extract_field (FLD_ldst_size, code, 0);
779 if (inst->opcode->iclass == ldstpair_indexed
780 || inst->opcode->iclass == ldstnapair_offs
781 || inst->opcode->iclass == ldstpair_off
782 || inst->opcode->iclass == loadlit)
783 {
784 enum aarch64_opnd_qualifier qualifier;
785 switch (value)
786 {
787 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
788 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
789 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
790 default: return 0;
791 }
792 info->qualifier = qualifier;
793 }
794 else
795 {
796 /* opc1:size */
797 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
798 if (value > 0x4)
799 return 0;
800 info->qualifier = get_sreg_qualifier_from_value (value);
801 }
802
803 return 1;
804 }
805
806 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
807 int
808 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
809 aarch64_opnd_info *info,
810 aarch64_insn code,
811 const aarch64_inst *inst ATTRIBUTE_UNUSED)
812 {
813 /* Rn */
814 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
815 return 1;
816 }
817
818 /* Decode the address operand for e.g.
819 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
820 int
821 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
822 aarch64_opnd_info *info,
823 aarch64_insn code, const aarch64_inst *inst)
824 {
825 aarch64_insn S, value;
826
827 /* Rn */
828 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
829 /* Rm */
830 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
831 /* option */
832 value = extract_field (FLD_option, code, 0);
833 info->shifter.kind =
834 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
835 /* Fix-up the shifter kind; although the table-driven approach is
836 efficient, it is slightly inflexible, thus needing this fix-up. */
837 if (info->shifter.kind == AARCH64_MOD_UXTX)
838 info->shifter.kind = AARCH64_MOD_LSL;
839 /* S */
840 S = extract_field (FLD_S, code, 0);
841 if (S == 0)
842 {
843 info->shifter.amount = 0;
844 info->shifter.amount_present = 0;
845 }
846 else
847 {
848 int size;
849 /* Need information in other operand(s) to help achieve the decoding
850 from 'S' field. */
851 info->qualifier = get_expected_qualifier (inst, info->idx);
852 /* Get the size of the data element that is accessed, which may be
853 different from that of the source register size, e.g. in strb/ldrb. */
854 size = aarch64_get_qualifier_esize (info->qualifier);
855 info->shifter.amount = get_logsz (size);
856 info->shifter.amount_present = 1;
857 }
858
859 return 1;
860 }
861
862 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
863 int
864 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
865 aarch64_insn code, const aarch64_inst *inst)
866 {
867 aarch64_insn imm;
868 info->qualifier = get_expected_qualifier (inst, info->idx);
869
870 /* Rn */
871 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
872 /* simm (imm9 or imm7) */
873 imm = extract_field (self->fields[0], code, 0);
874 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
875 if (self->fields[0] == FLD_imm7)
876 /* scaled immediate in ld/st pair instructions. */
877 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
878 /* qualifier */
879 if (inst->opcode->iclass == ldst_unscaled
880 || inst->opcode->iclass == ldstnapair_offs
881 || inst->opcode->iclass == ldstpair_off
882 || inst->opcode->iclass == ldst_unpriv)
883 info->addr.writeback = 0;
884 else
885 {
886 /* pre/post- index */
887 info->addr.writeback = 1;
888 if (extract_field (self->fields[1], code, 0) == 1)
889 info->addr.preind = 1;
890 else
891 info->addr.postind = 1;
892 }
893
894 return 1;
895 }
896
897 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
898 int
899 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
900 aarch64_insn code,
901 const aarch64_inst *inst ATTRIBUTE_UNUSED)
902 {
903 int shift;
904 info->qualifier = get_expected_qualifier (inst, info->idx);
905 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
906 /* Rn */
907 info->addr.base_regno = extract_field (self->fields[0], code, 0);
908 /* uimm12 */
909 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
910 return 1;
911 }
912
913 /* Decode the address operand for e.g.
914 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
915 int
916 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
917 aarch64_opnd_info *info,
918 aarch64_insn code, const aarch64_inst *inst)
919 {
920 /* The opcode dependent area stores the number of elements in
921 each structure to be loaded/stored. */
922 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
923
924 /* Rn */
925 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
926 /* Rm | #<amount> */
927 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
928 if (info->addr.offset.regno == 31)
929 {
930 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
931 /* Special handling of loading single structure to all lane. */
932 info->addr.offset.imm = (is_ld1r ? 1
933 : inst->operands[0].reglist.num_regs)
934 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
935 else
936 info->addr.offset.imm = inst->operands[0].reglist.num_regs
937 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
938 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
939 }
940 else
941 info->addr.offset.is_reg = 1;
942 info->addr.writeback = 1;
943
944 return 1;
945 }
946
947 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
948 int
949 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
950 aarch64_opnd_info *info,
951 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
952 {
953 aarch64_insn value;
954 /* cond */
955 value = extract_field (FLD_cond, code, 0);
956 info->cond = get_cond_from_value (value);
957 return 1;
958 }
959
960 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
961 int
962 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
963 aarch64_opnd_info *info,
964 aarch64_insn code,
965 const aarch64_inst *inst ATTRIBUTE_UNUSED)
966 {
967 /* op0:op1:CRn:CRm:op2 */
968 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
969 FLD_CRm, FLD_op2);
970 return 1;
971 }
972
973 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
974 int
975 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
976 aarch64_opnd_info *info, aarch64_insn code,
977 const aarch64_inst *inst ATTRIBUTE_UNUSED)
978 {
979 int i;
980 /* op1:op2 */
981 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
982 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
983 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
984 return 1;
985 /* Reserved value in <pstatefield>. */
986 return 0;
987 }
988
989 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
990 int
991 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
992 aarch64_opnd_info *info,
993 aarch64_insn code,
994 const aarch64_inst *inst ATTRIBUTE_UNUSED)
995 {
996 int i;
997 aarch64_insn value;
998 const aarch64_sys_ins_reg *sysins_ops;
999 /* op0:op1:CRn:CRm:op2 */
1000 value = extract_fields (code, 0, 5,
1001 FLD_op0, FLD_op1, FLD_CRn,
1002 FLD_CRm, FLD_op2);
1003
1004 switch (info->type)
1005 {
1006 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1007 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1008 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1009 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1010 default: assert (0); return 0;
1011 }
1012
1013 for (i = 0; sysins_ops[i].template != NULL; ++i)
1014 if (sysins_ops[i].value == value)
1015 {
1016 info->sysins_op = sysins_ops + i;
1017 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1018 info->sysins_op->template,
1019 (unsigned)info->sysins_op->value,
1020 info->sysins_op->has_xt, i);
1021 return 1;
1022 }
1023
1024 return 0;
1025 }
1026
1027 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1028
1029 int
1030 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1031 aarch64_opnd_info *info,
1032 aarch64_insn code,
1033 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1034 {
1035 /* CRm */
1036 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1037 return 1;
1038 }
1039
1040 /* Decode the prefetch operation option operand for e.g.
1041 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1042
1043 int
1044 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1045 aarch64_opnd_info *info,
1046 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1047 {
1048 /* prfop in Rt */
1049 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1050 return 1;
1051 }
1052
1053 /* Decode the extended register operand for e.g.
1054 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1055 int
1056 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1057 aarch64_opnd_info *info,
1058 aarch64_insn code,
1059 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1060 {
1061 aarch64_insn value;
1062
1063 /* Rm */
1064 info->reg.regno = extract_field (FLD_Rm, code, 0);
1065 /* option */
1066 value = extract_field (FLD_option, code, 0);
1067 info->shifter.kind =
1068 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1069 /* imm3 */
1070 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1071
1072 /* This makes the constraint checking happy. */
1073 info->shifter.operator_present = 1;
1074
1075 /* Assume inst->operands[0].qualifier has been resolved. */
1076 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1077 info->qualifier = AARCH64_OPND_QLF_W;
1078 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1079 && (info->shifter.kind == AARCH64_MOD_UXTX
1080 || info->shifter.kind == AARCH64_MOD_SXTX))
1081 info->qualifier = AARCH64_OPND_QLF_X;
1082
1083 return 1;
1084 }
1085
1086 /* Decode the shifted register operand for e.g.
1087 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1088 int
1089 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1090 aarch64_opnd_info *info,
1091 aarch64_insn code,
1092 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1093 {
1094 aarch64_insn value;
1095
1096 /* Rm */
1097 info->reg.regno = extract_field (FLD_Rm, code, 0);
1098 /* shift */
1099 value = extract_field (FLD_shift, code, 0);
1100 info->shifter.kind =
1101 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1102 if (info->shifter.kind == AARCH64_MOD_ROR
1103 && inst->opcode->iclass != log_shift)
1104 /* ROR is not available for the shifted register operand in arithmetic
1105 instructions. */
1106 return 0;
1107 /* imm6 */
1108 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1109
1110 /* This makes the constraint checking happy. */
1111 info->shifter.operator_present = 1;
1112
1113 return 1;
1114 }
1115 \f
1116 /* Bitfields that are commonly used to encode certain operands' information
1117 may be partially used as part of the base opcode in some instructions.
1118 For example, the bit 1 of the field 'size' in
1119 FCVTXN <Vb><d>, <Va><n>
1120 is actually part of the base opcode, while only size<0> is available
1121 for encoding the register type. Another example is the AdvSIMD
1122 instruction ORR (register), in which the field 'size' is also used for
1123 the base opcode, leaving only the field 'Q' available to encode the
1124 vector register arrangement specifier '8B' or '16B'.
1125
1126 This function tries to deduce the qualifier from the value of partially
1127 constrained field(s). Given the VALUE of such a field or fields, the
1128 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1129 operand encoding), the function returns the matching qualifier or
1130 AARCH64_OPND_QLF_NIL if nothing matches.
1131
1132 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1133 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1134 may end with AARCH64_OPND_QLF_NIL. */
1135
1136 static enum aarch64_opnd_qualifier
1137 get_qualifier_from_partial_encoding (aarch64_insn value,
1138 const enum aarch64_opnd_qualifier* \
1139 candidates,
1140 aarch64_insn mask)
1141 {
1142 int i;
1143 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1144 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1145 {
1146 aarch64_insn standard_value;
1147 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1148 break;
1149 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1150 if ((standard_value & mask) == (value & mask))
1151 return candidates[i];
1152 }
1153 return AARCH64_OPND_QLF_NIL;
1154 }
1155
1156 /* Given a list of qualifier sequences, return all possible valid qualifiers
1157 for operand IDX in QUALIFIERS.
1158 Assume QUALIFIERS is an array whose length is large enough. */
1159
1160 static void
1161 get_operand_possible_qualifiers (int idx,
1162 const aarch64_opnd_qualifier_seq_t *list,
1163 enum aarch64_opnd_qualifier *qualifiers)
1164 {
1165 int i;
1166 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1167 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1168 break;
1169 }
1170
1171 /* Decode the size Q field for e.g. SHADD.
1172 We tag one operand with the qualifer according to the code;
1173 whether the qualifier is valid for this opcode or not, it is the
1174 duty of the semantic checking. */
1175
1176 static int
1177 decode_sizeq (aarch64_inst *inst)
1178 {
1179 int idx;
1180 enum aarch64_opnd_qualifier qualifier;
1181 aarch64_insn code;
1182 aarch64_insn value, mask;
1183 enum aarch64_field_kind fld_sz;
1184 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1185
1186 if (inst->opcode->iclass == asisdlse
1187 || inst->opcode->iclass == asisdlsep
1188 || inst->opcode->iclass == asisdlso
1189 || inst->opcode->iclass == asisdlsop)
1190 fld_sz = FLD_vldst_size;
1191 else
1192 fld_sz = FLD_size;
1193
1194 code = inst->value;
1195 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1196 /* Obtain the info that which bits of fields Q and size are actually
1197 available for operand encoding. Opcodes like FMAXNM and FMLA have
1198 size[1] unavailable. */
1199 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1200
1201 /* The index of the operand we are going to tag a qualifier and the qualifer
1202 itself are reasoned from the value of the size and Q fields and the
1203 possible valid qualifier lists. */
1204 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1205 DEBUG_TRACE ("key idx: %d", idx);
1206
1207 /* For most related instruciton, size:Q are fully available for operand
1208 encoding. */
1209 if (mask == 0x7)
1210 {
1211 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1212 return 1;
1213 }
1214
1215 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1216 candidates);
1217 #ifdef DEBUG_AARCH64
1218 if (debug_dump)
1219 {
1220 int i;
1221 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1222 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1223 DEBUG_TRACE ("qualifier %d: %s", i,
1224 aarch64_get_qualifier_name(candidates[i]));
1225 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1226 }
1227 #endif /* DEBUG_AARCH64 */
1228
1229 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1230
1231 if (qualifier == AARCH64_OPND_QLF_NIL)
1232 return 0;
1233
1234 inst->operands[idx].qualifier = qualifier;
1235 return 1;
1236 }
1237
1238 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1239 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1240
1241 static int
1242 decode_asimd_fcvt (aarch64_inst *inst)
1243 {
1244 aarch64_field field = {0, 0};
1245 aarch64_insn value;
1246 enum aarch64_opnd_qualifier qualifier;
1247
1248 gen_sub_field (FLD_size, 0, 1, &field);
1249 value = extract_field_2 (&field, inst->value, 0);
1250 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1251 : AARCH64_OPND_QLF_V_2D;
1252 switch (inst->opcode->op)
1253 {
1254 case OP_FCVTN:
1255 case OP_FCVTN2:
1256 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1257 inst->operands[1].qualifier = qualifier;
1258 break;
1259 case OP_FCVTL:
1260 case OP_FCVTL2:
1261 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1262 inst->operands[0].qualifier = qualifier;
1263 break;
1264 default:
1265 assert (0);
1266 return 0;
1267 }
1268
1269 return 1;
1270 }
1271
1272 /* Decode size[0], i.e. bit 22, for
1273 e.g. FCVTXN <Vb><d>, <Va><n>. */
1274
1275 static int
1276 decode_asisd_fcvtxn (aarch64_inst *inst)
1277 {
1278 aarch64_field field = {0, 0};
1279 gen_sub_field (FLD_size, 0, 1, &field);
1280 if (!extract_field_2 (&field, inst->value, 0))
1281 return 0;
1282 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1283 return 1;
1284 }
1285
1286 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1287 static int
1288 decode_fcvt (aarch64_inst *inst)
1289 {
1290 enum aarch64_opnd_qualifier qualifier;
1291 aarch64_insn value;
1292 const aarch64_field field = {15, 2};
1293
1294 /* opc dstsize */
1295 value = extract_field_2 (&field, inst->value, 0);
1296 switch (value)
1297 {
1298 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1299 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1300 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1301 default: return 0;
1302 }
1303 inst->operands[0].qualifier = qualifier;
1304
1305 return 1;
1306 }
1307
1308 /* Do miscellaneous decodings that are not common enough to be driven by
1309 flags. */
1310
1311 static int
1312 do_misc_decoding (aarch64_inst *inst)
1313 {
1314 switch (inst->opcode->op)
1315 {
1316 case OP_FCVT:
1317 return decode_fcvt (inst);
1318 case OP_FCVTN:
1319 case OP_FCVTN2:
1320 case OP_FCVTL:
1321 case OP_FCVTL2:
1322 return decode_asimd_fcvt (inst);
1323 case OP_FCVTXN_S:
1324 return decode_asisd_fcvtxn (inst);
1325 default:
1326 return 0;
1327 }
1328 }
1329
1330 /* Opcodes that have fields shared by multiple operands are usually flagged
1331 with flags. In this function, we detect such flags, decode the related
1332 field(s) and store the information in one of the related operands. The
1333 'one' operand is not any operand but one of the operands that can
1334 accommadate all the information that has been decoded. */
1335
1336 static int
1337 do_special_decoding (aarch64_inst *inst)
1338 {
1339 int idx;
1340 aarch64_insn value;
1341 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1342 if (inst->opcode->flags & F_COND)
1343 {
1344 value = extract_field (FLD_cond2, inst->value, 0);
1345 inst->cond = get_cond_from_value (value);
1346 }
1347 /* 'sf' field. */
1348 if (inst->opcode->flags & F_SF)
1349 {
1350 idx = select_operand_for_sf_field_coding (inst->opcode);
1351 value = extract_field (FLD_sf, inst->value, 0);
1352 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1353 if ((inst->opcode->flags & F_N)
1354 && extract_field (FLD_N, inst->value, 0) != value)
1355 return 0;
1356 }
1357 /* size:Q fields. */
1358 if (inst->opcode->flags & F_SIZEQ)
1359 return decode_sizeq (inst);
1360
1361 if (inst->opcode->flags & F_FPTYPE)
1362 {
1363 idx = select_operand_for_fptype_field_coding (inst->opcode);
1364 value = extract_field (FLD_type, inst->value, 0);
1365 switch (value)
1366 {
1367 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1368 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1369 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1370 default: return 0;
1371 }
1372 }
1373
1374 if (inst->opcode->flags & F_SSIZE)
1375 {
1376 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1377 of the base opcode. */
1378 aarch64_insn mask;
1379 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1380 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1381 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1382 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1383 /* For most related instruciton, the 'size' field is fully available for
1384 operand encoding. */
1385 if (mask == 0x3)
1386 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1387 else
1388 {
1389 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1390 candidates);
1391 inst->operands[idx].qualifier
1392 = get_qualifier_from_partial_encoding (value, candidates, mask);
1393 }
1394 }
1395
1396 if (inst->opcode->flags & F_T)
1397 {
1398 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1399 int num = 0;
1400 unsigned val, Q;
1401 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1402 == AARCH64_OPND_CLASS_SIMD_REG);
1403 /* imm5<3:0> q <t>
1404 0000 x reserved
1405 xxx1 0 8b
1406 xxx1 1 16b
1407 xx10 0 4h
1408 xx10 1 8h
1409 x100 0 2s
1410 x100 1 4s
1411 1000 0 reserved
1412 1000 1 2d */
1413 val = extract_field (FLD_imm5, inst->value, 0);
1414 while ((val & 0x1) == 0 && ++num <= 3)
1415 val >>= 1;
1416 if (num > 3)
1417 return 0;
1418 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1419 inst->operands[0].qualifier =
1420 get_vreg_qualifier_from_value ((num << 1) | Q);
1421 }
1422
1423 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1424 {
1425 /* Use Rt to encode in the case of e.g.
1426 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1427 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1428 if (idx == -1)
1429 {
1430 /* Otherwise use the result operand, which has to be a integer
1431 register. */
1432 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1433 == AARCH64_OPND_CLASS_INT_REG);
1434 idx = 0;
1435 }
1436 assert (idx == 0 || idx == 1);
1437 value = extract_field (FLD_Q, inst->value, 0);
1438 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1439 }
1440
1441 if (inst->opcode->flags & F_LDS_SIZE)
1442 {
1443 aarch64_field field = {0, 0};
1444 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1445 == AARCH64_OPND_CLASS_INT_REG);
1446 gen_sub_field (FLD_opc, 0, 1, &field);
1447 value = extract_field_2 (&field, inst->value, 0);
1448 inst->operands[0].qualifier
1449 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1450 }
1451
1452 /* Miscellaneous decoding; done as the last step. */
1453 if (inst->opcode->flags & F_MISC)
1454 return do_misc_decoding (inst);
1455
1456 return 1;
1457 }
1458
1459 /* Converters converting a real opcode instruction to its alias form. */
1460
1461 /* ROR <Wd>, <Ws>, #<shift>
1462 is equivalent to:
1463 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1464 static int
1465 convert_extr_to_ror (aarch64_inst *inst)
1466 {
1467 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1468 {
1469 copy_operand_info (inst, 2, 3);
1470 inst->operands[3].type = AARCH64_OPND_NIL;
1471 return 1;
1472 }
1473 return 0;
1474 }
1475
1476 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1477 is equivalent to:
1478 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1479 static int
1480 convert_shll_to_xtl (aarch64_inst *inst)
1481 {
1482 if (inst->operands[2].imm.value == 0)
1483 {
1484 inst->operands[2].type = AARCH64_OPND_NIL;
1485 return 1;
1486 }
1487 return 0;
1488 }
1489
1490 /* Convert
1491 UBFM <Xd>, <Xn>, #<shift>, #63.
1492 to
1493 LSR <Xd>, <Xn>, #<shift>. */
1494 static int
1495 convert_bfm_to_sr (aarch64_inst *inst)
1496 {
1497 int64_t imms, val;
1498
1499 imms = inst->operands[3].imm.value;
1500 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1501 if (imms == val)
1502 {
1503 inst->operands[3].type = AARCH64_OPND_NIL;
1504 return 1;
1505 }
1506
1507 return 0;
1508 }
1509
1510 /* Convert MOV to ORR. */
1511 static int
1512 convert_orr_to_mov (aarch64_inst *inst)
1513 {
1514 /* MOV <Vd>.<T>, <Vn>.<T>
1515 is equivalent to:
1516 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1517 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1518 {
1519 inst->operands[2].type = AARCH64_OPND_NIL;
1520 return 1;
1521 }
1522 return 0;
1523 }
1524
1525 /* When <imms> >= <immr>, the instruction written:
1526 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1527 is equivalent to:
1528 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1529
1530 static int
1531 convert_bfm_to_bfx (aarch64_inst *inst)
1532 {
1533 int64_t immr, imms;
1534
1535 immr = inst->operands[2].imm.value;
1536 imms = inst->operands[3].imm.value;
1537 if (imms >= immr)
1538 {
1539 int64_t lsb = immr;
1540 inst->operands[2].imm.value = lsb;
1541 inst->operands[3].imm.value = imms + 1 - lsb;
1542 /* The two opcodes have different qualifiers for
1543 the immediate operands; reset to help the checking. */
1544 reset_operand_qualifier (inst, 2);
1545 reset_operand_qualifier (inst, 3);
1546 return 1;
1547 }
1548
1549 return 0;
1550 }
1551
1552 /* When <imms> < <immr>, the instruction written:
1553 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1554 is equivalent to:
1555 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1556
1557 static int
1558 convert_bfm_to_bfi (aarch64_inst *inst)
1559 {
1560 int64_t immr, imms, val;
1561
1562 immr = inst->operands[2].imm.value;
1563 imms = inst->operands[3].imm.value;
1564 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1565 if (imms < immr)
1566 {
1567 inst->operands[2].imm.value = (val - immr) & (val - 1);
1568 inst->operands[3].imm.value = imms + 1;
1569 /* The two opcodes have different qualifiers for
1570 the immediate operands; reset to help the checking. */
1571 reset_operand_qualifier (inst, 2);
1572 reset_operand_qualifier (inst, 3);
1573 return 1;
1574 }
1575
1576 return 0;
1577 }
1578
1579 /* The instruction written:
1580 LSL <Xd>, <Xn>, #<shift>
1581 is equivalent to:
1582 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1583
1584 static int
1585 convert_ubfm_to_lsl (aarch64_inst *inst)
1586 {
1587 int64_t immr = inst->operands[2].imm.value;
1588 int64_t imms = inst->operands[3].imm.value;
1589 int64_t val
1590 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1591
1592 if ((immr == 0 && imms == val) || immr == imms + 1)
1593 {
1594 inst->operands[3].type = AARCH64_OPND_NIL;
1595 inst->operands[2].imm.value = val - imms;
1596 return 1;
1597 }
1598
1599 return 0;
1600 }
1601
1602 /* CINC <Wd>, <Wn>, <cond>
1603 is equivalent to:
1604 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1605 where <cond> is not AL or NV. */
1606
1607 static int
1608 convert_from_csel (aarch64_inst *inst)
1609 {
1610 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
1611 && (inst->operands[3].cond->value & 0xe) != 0xe)
1612 {
1613 copy_operand_info (inst, 2, 3);
1614 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1615 inst->operands[3].type = AARCH64_OPND_NIL;
1616 return 1;
1617 }
1618 return 0;
1619 }
1620
1621 /* CSET <Wd>, <cond>
1622 is equivalent to:
1623 CSINC <Wd>, WZR, WZR, invert(<cond>)
1624 where <cond> is not AL or NV. */
1625
1626 static int
1627 convert_csinc_to_cset (aarch64_inst *inst)
1628 {
1629 if (inst->operands[1].reg.regno == 0x1f
1630 && inst->operands[2].reg.regno == 0x1f
1631 && (inst->operands[3].cond->value & 0xe) != 0xe)
1632 {
1633 copy_operand_info (inst, 1, 3);
1634 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1635 inst->operands[3].type = AARCH64_OPND_NIL;
1636 inst->operands[2].type = AARCH64_OPND_NIL;
1637 return 1;
1638 }
1639 return 0;
1640 }
1641
1642 /* MOV <Wd>, #<imm>
1643 is equivalent to:
1644 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1645
1646 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1647 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1648 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1649 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1650 machine-instruction mnemonic must be used. */
1651
1652 static int
1653 convert_movewide_to_mov (aarch64_inst *inst)
1654 {
1655 uint64_t value = inst->operands[1].imm.value;
1656 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1657 if (value == 0 && inst->operands[1].shifter.amount != 0)
1658 return 0;
1659 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1660 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1661 value <<= inst->operands[1].shifter.amount;
1662 /* As an alias convertor, it has to be clear that the INST->OPCODE
1663 is the opcode of the real instruction. */
1664 if (inst->opcode->op == OP_MOVN)
1665 {
1666 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1667 value = ~value;
1668 /* A MOVN has an immediate that could be encoded by MOVZ. */
1669 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1670 return 0;
1671 }
1672 inst->operands[1].imm.value = value;
1673 inst->operands[1].shifter.amount = 0;
1674 return 1;
1675 }
1676
1677 /* MOV <Wd>, #<imm>
1678 is equivalent to:
1679 ORR <Wd>, WZR, #<imm>.
1680
1681 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1682 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1683 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1684 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1685 machine-instruction mnemonic must be used. */
1686
1687 static int
1688 convert_movebitmask_to_mov (aarch64_inst *inst)
1689 {
1690 int is32;
1691 uint64_t value;
1692
1693 /* Should have been assured by the base opcode value. */
1694 assert (inst->operands[1].reg.regno == 0x1f);
1695 copy_operand_info (inst, 1, 2);
1696 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1697 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1698 value = inst->operands[1].imm.value;
1699 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1700 instruction. */
1701 if (inst->operands[0].reg.regno != 0x1f
1702 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1703 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1704 return 0;
1705
1706 inst->operands[2].type = AARCH64_OPND_NIL;
1707 return 1;
1708 }
1709
1710 /* Some alias opcodes are disassembled by being converted from their real-form.
1711 N.B. INST->OPCODE is the real opcode rather than the alias. */
1712
1713 static int
1714 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1715 {
1716 switch (alias->op)
1717 {
1718 case OP_ASR_IMM:
1719 case OP_LSR_IMM:
1720 return convert_bfm_to_sr (inst);
1721 case OP_LSL_IMM:
1722 return convert_ubfm_to_lsl (inst);
1723 case OP_CINC:
1724 case OP_CINV:
1725 case OP_CNEG:
1726 return convert_from_csel (inst);
1727 case OP_CSET:
1728 case OP_CSETM:
1729 return convert_csinc_to_cset (inst);
1730 case OP_UBFX:
1731 case OP_BFXIL:
1732 case OP_SBFX:
1733 return convert_bfm_to_bfx (inst);
1734 case OP_SBFIZ:
1735 case OP_BFI:
1736 case OP_UBFIZ:
1737 return convert_bfm_to_bfi (inst);
1738 case OP_MOV_V:
1739 return convert_orr_to_mov (inst);
1740 case OP_MOV_IMM_WIDE:
1741 case OP_MOV_IMM_WIDEN:
1742 return convert_movewide_to_mov (inst);
1743 case OP_MOV_IMM_LOG:
1744 return convert_movebitmask_to_mov (inst);
1745 case OP_ROR_IMM:
1746 return convert_extr_to_ror (inst);
1747 case OP_SXTL:
1748 case OP_SXTL2:
1749 case OP_UXTL:
1750 case OP_UXTL2:
1751 return convert_shll_to_xtl (inst);
1752 default:
1753 return 0;
1754 }
1755 }
1756
1757 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1758 aarch64_inst *, int);
1759
1760 /* Given the instruction information in *INST, check if the instruction has
1761 any alias form that can be used to represent *INST. If the answer is yes,
1762 update *INST to be in the form of the determined alias. */
1763
1764 /* In the opcode description table, the following flags are used in opcode
1765 entries to help establish the relations between the real and alias opcodes:
1766
1767 F_ALIAS: opcode is an alias
1768 F_HAS_ALIAS: opcode has alias(es)
1769 F_P1
1770 F_P2
1771 F_P3: Disassembly preference priority 1-3 (the larger the
1772 higher). If nothing is specified, it is the priority
1773 0 by default, i.e. the lowest priority.
1774
1775 Although the relation between the machine and the alias instructions are not
1776 explicitly described, it can be easily determined from the base opcode
1777 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1778 description entries:
1779
1780 The mask of an alias opcode must be equal to or a super-set (i.e. more
1781 constrained) of that of the aliased opcode; so is the base opcode value.
1782
1783 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1784 && (opcode->mask & real->mask) == real->mask
1785 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1786 then OPCODE is an alias of, and only of, the REAL instruction
1787
1788 The alias relationship is forced flat-structured to keep related algorithm
1789 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1790
1791 During the disassembling, the decoding decision tree (in
1792 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1793 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1794 not specified), the disassembler will check whether there is any alias
1795 instruction exists for this real instruction. If there is, the disassembler
1796 will try to disassemble the 32-bit binary again using the alias's rule, or
1797 try to convert the IR to the form of the alias. In the case of the multiple
1798 aliases, the aliases are tried one by one from the highest priority
1799 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1800 first succeeds first adopted.
1801
1802 You may ask why there is a need for the conversion of IR from one form to
1803 another in handling certain aliases. This is because on one hand it avoids
1804 adding more operand code to handle unusual encoding/decoding; on other
1805 hand, during the disassembling, the conversion is an effective approach to
1806 check the condition of an alias (as an alias may be adopted only if certain
1807 conditions are met).
1808
1809 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1810 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1811 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1812
1813 static void
1814 determine_disassembling_preference (struct aarch64_inst *inst)
1815 {
1816 const aarch64_opcode *opcode;
1817 const aarch64_opcode *alias;
1818
1819 opcode = inst->opcode;
1820
1821 /* This opcode does not have an alias, so use itself. */
1822 if (opcode_has_alias (opcode) == FALSE)
1823 return;
1824
1825 alias = aarch64_find_alias_opcode (opcode);
1826 assert (alias);
1827
1828 #ifdef DEBUG_AARCH64
1829 if (debug_dump)
1830 {
1831 const aarch64_opcode *tmp = alias;
1832 printf ("#### LIST orderd: ");
1833 while (tmp)
1834 {
1835 printf ("%s, ", tmp->name);
1836 tmp = aarch64_find_next_alias_opcode (tmp);
1837 }
1838 printf ("\n");
1839 }
1840 #endif /* DEBUG_AARCH64 */
1841
1842 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1843 {
1844 DEBUG_TRACE ("try %s", alias->name);
1845 assert (alias_opcode_p (alias));
1846
1847 /* An alias can be a pseudo opcode which will never be used in the
1848 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1849 aliasing AND. */
1850 if (pseudo_opcode_p (alias))
1851 {
1852 DEBUG_TRACE ("skip pseudo %s", alias->name);
1853 continue;
1854 }
1855
1856 if ((inst->value & alias->mask) != alias->opcode)
1857 {
1858 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1859 continue;
1860 }
1861 /* No need to do any complicated transformation on operands, if the alias
1862 opcode does not have any operand. */
1863 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1864 {
1865 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1866 aarch64_replace_opcode (inst, alias);
1867 return;
1868 }
1869 if (alias->flags & F_CONV)
1870 {
1871 aarch64_inst copy;
1872 memcpy (&copy, inst, sizeof (aarch64_inst));
1873 /* ALIAS is the preference as long as the instruction can be
1874 successfully converted to the form of ALIAS. */
1875 if (convert_to_alias (&copy, alias) == 1)
1876 {
1877 aarch64_replace_opcode (&copy, alias);
1878 assert (aarch64_match_operands_constraint (&copy, NULL));
1879 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1880 memcpy (inst, &copy, sizeof (aarch64_inst));
1881 return;
1882 }
1883 }
1884 else
1885 {
1886 /* Directly decode the alias opcode. */
1887 aarch64_inst temp;
1888 memset (&temp, '\0', sizeof (aarch64_inst));
1889 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1890 {
1891 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1892 memcpy (inst, &temp, sizeof (aarch64_inst));
1893 return;
1894 }
1895 }
1896 }
1897 }
1898
1899 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1900 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1901 return 1.
1902
1903 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1904 determined and used to disassemble CODE; this is done just before the
1905 return. */
1906
1907 static int
1908 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1909 aarch64_inst *inst, int noaliases_p)
1910 {
1911 int i;
1912
1913 DEBUG_TRACE ("enter with %s", opcode->name);
1914
1915 assert (opcode && inst);
1916
1917 /* Check the base opcode. */
1918 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1919 {
1920 DEBUG_TRACE ("base opcode match FAIL");
1921 goto decode_fail;
1922 }
1923
1924 /* Clear inst. */
1925 memset (inst, '\0', sizeof (aarch64_inst));
1926
1927 inst->opcode = opcode;
1928 inst->value = code;
1929
1930 /* Assign operand codes and indexes. */
1931 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1932 {
1933 if (opcode->operands[i] == AARCH64_OPND_NIL)
1934 break;
1935 inst->operands[i].type = opcode->operands[i];
1936 inst->operands[i].idx = i;
1937 }
1938
1939 /* Call the opcode decoder indicated by flags. */
1940 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1941 {
1942 DEBUG_TRACE ("opcode flag-based decoder FAIL");
1943 goto decode_fail;
1944 }
1945
1946 /* Call operand decoders. */
1947 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1948 {
1949 const aarch64_operand *opnd;
1950 enum aarch64_opnd type;
1951 type = opcode->operands[i];
1952 if (type == AARCH64_OPND_NIL)
1953 break;
1954 opnd = &aarch64_operands[type];
1955 if (operand_has_extractor (opnd)
1956 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1957 {
1958 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1959 goto decode_fail;
1960 }
1961 }
1962
1963 /* Match the qualifiers. */
1964 if (aarch64_match_operands_constraint (inst, NULL) == 1)
1965 {
1966 /* Arriving here, the CODE has been determined as a valid instruction
1967 of OPCODE and *INST has been filled with information of this OPCODE
1968 instruction. Before the return, check if the instruction has any
1969 alias and should be disassembled in the form of its alias instead.
1970 If the answer is yes, *INST will be updated. */
1971 if (!noaliases_p)
1972 determine_disassembling_preference (inst);
1973 DEBUG_TRACE ("SUCCESS");
1974 return 1;
1975 }
1976 else
1977 {
1978 DEBUG_TRACE ("constraint matching FAIL");
1979 }
1980
1981 decode_fail:
1982 return 0;
1983 }
1984 \f
1985 /* This does some user-friendly fix-up to *INST. It is currently focus on
1986 the adjustment of qualifiers to help the printed instruction
1987 recognized/understood more easily. */
1988
1989 static void
1990 user_friendly_fixup (aarch64_inst *inst)
1991 {
1992 switch (inst->opcode->iclass)
1993 {
1994 case testbranch:
1995 /* TBNZ Xn|Wn, #uimm6, label
1996 Test and Branch Not Zero: conditionally jumps to label if bit number
1997 uimm6 in register Xn is not zero. The bit number implies the width of
1998 the register, which may be written and should be disassembled as Wn if
1999 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2000 */
2001 if (inst->operands[1].imm.value < 32)
2002 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2003 break;
2004 default: break;
2005 }
2006 }
2007
2008 /* Decode INSN and fill in *INST the instruction information. */
2009
2010 static int
2011 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
2012 aarch64_inst *inst)
2013 {
2014 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2015
2016 #ifdef DEBUG_AARCH64
2017 if (debug_dump)
2018 {
2019 const aarch64_opcode *tmp = opcode;
2020 printf ("\n");
2021 DEBUG_TRACE ("opcode lookup:");
2022 while (tmp != NULL)
2023 {
2024 aarch64_verbose (" %s", tmp->name);
2025 tmp = aarch64_find_next_opcode (tmp);
2026 }
2027 }
2028 #endif /* DEBUG_AARCH64 */
2029
2030 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2031 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2032 opcode field and value, apart from the difference that one of them has an
2033 extra field as part of the opcode, but such a field is used for operand
2034 encoding in other opcode(s) ('immh' in the case of the example). */
2035 while (opcode != NULL)
2036 {
2037 /* But only one opcode can be decoded successfully for, as the
2038 decoding routine will check the constraint carefully. */
2039 if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2040 return ERR_OK;
2041 opcode = aarch64_find_next_opcode (opcode);
2042 }
2043
2044 return ERR_UND;
2045 }
2046
2047 /* Print operands. */
2048
2049 static void
2050 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2051 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2052 {
2053 int i, pcrel_p, num_printed;
2054 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2055 {
2056 const size_t size = 128;
2057 char str[size];
2058 /* We regard the opcode operand info more, however we also look into
2059 the inst->operands to support the disassembling of the optional
2060 operand.
2061 The two operand code should be the same in all cases, apart from
2062 when the operand can be optional. */
2063 if (opcode->operands[i] == AARCH64_OPND_NIL
2064 || opnds[i].type == AARCH64_OPND_NIL)
2065 break;
2066
2067 /* Generate the operand string in STR. */
2068 aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2069 &info->target);
2070
2071 /* Print the delimiter (taking account of omitted operand(s)). */
2072 if (str[0] != '\0')
2073 (*info->fprintf_func) (info->stream, "%s",
2074 num_printed++ == 0 ? "\t" : ", ");
2075
2076 /* Print the operand. */
2077 if (pcrel_p)
2078 (*info->print_address_func) (info->target, info);
2079 else
2080 (*info->fprintf_func) (info->stream, "%s", str);
2081 }
2082 }
2083
2084 /* Print the instruction mnemonic name. */
2085
2086 static void
2087 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2088 {
2089 if (inst->opcode->flags & F_COND)
2090 {
2091 /* For instructions that are truly conditionally executed, e.g. b.cond,
2092 prepare the full mnemonic name with the corresponding condition
2093 suffix. */
2094 char name[8], *ptr;
2095 size_t len;
2096
2097 ptr = strchr (inst->opcode->name, '.');
2098 assert (ptr && inst->cond);
2099 len = ptr - inst->opcode->name;
2100 assert (len < 8);
2101 strncpy (name, inst->opcode->name, len);
2102 name [len] = '\0';
2103 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2104 }
2105 else
2106 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2107 }
2108
2109 /* Print the instruction according to *INST. */
2110
2111 static void
2112 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2113 struct disassemble_info *info)
2114 {
2115 print_mnemonic_name (inst, info);
2116 print_operands (pc, inst->opcode, inst->operands, info);
2117 }
2118
2119 /* Entry-point of the instruction disassembler and printer. */
2120
2121 static void
2122 print_insn_aarch64_word (bfd_vma pc,
2123 uint32_t word,
2124 struct disassemble_info *info)
2125 {
2126 static const char *err_msg[6] =
2127 {
2128 [ERR_OK] = "_",
2129 [-ERR_UND] = "undefined",
2130 [-ERR_UNP] = "unpredictable",
2131 [-ERR_NYI] = "NYI"
2132 };
2133
2134 int ret;
2135 aarch64_inst inst;
2136
2137 info->insn_info_valid = 1;
2138 info->branch_delay_insns = 0;
2139 info->data_size = 0;
2140 info->target = 0;
2141 info->target2 = 0;
2142
2143 if (info->flags & INSN_HAS_RELOC)
2144 /* If the instruction has a reloc associated with it, then
2145 the offset field in the instruction will actually be the
2146 addend for the reloc. (If we are using REL type relocs).
2147 In such cases, we can ignore the pc when computing
2148 addresses, since the addend is not currently pc-relative. */
2149 pc = 0;
2150
2151 ret = disas_aarch64_insn (pc, word, &inst);
2152
2153 if (((word >> 21) & 0x3ff) == 1)
2154 {
2155 /* RESERVED for ALES. */
2156 assert (ret != ERR_OK);
2157 ret = ERR_NYI;
2158 }
2159
2160 switch (ret)
2161 {
2162 case ERR_UND:
2163 case ERR_UNP:
2164 case ERR_NYI:
2165 /* Handle undefined instructions. */
2166 info->insn_type = dis_noninsn;
2167 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2168 word, err_msg[-ret]);
2169 break;
2170 case ERR_OK:
2171 user_friendly_fixup (&inst);
2172 print_aarch64_insn (pc, &inst, info);
2173 break;
2174 default:
2175 abort ();
2176 }
2177 }
2178
2179 /* Disallow mapping symbols ($x, $d etc) from
2180 being displayed in symbol relative addresses. */
2181
2182 bfd_boolean
2183 aarch64_symbol_is_valid (asymbol * sym,
2184 struct disassemble_info * info ATTRIBUTE_UNUSED)
2185 {
2186 const char * name;
2187
2188 if (sym == NULL)
2189 return FALSE;
2190
2191 name = bfd_asymbol_name (sym);
2192
2193 return name
2194 && (name[0] != '$'
2195 || (name[1] != 'x' && name[1] != 'd')
2196 || (name[2] != '\0' && name[2] != '.'));
2197 }
2198
2199 /* Print data bytes on INFO->STREAM. */
2200
2201 static void
2202 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2203 uint32_t word,
2204 struct disassemble_info *info)
2205 {
2206 switch (info->bytes_per_chunk)
2207 {
2208 case 1:
2209 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2210 break;
2211 case 2:
2212 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2213 break;
2214 case 4:
2215 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2216 break;
2217 default:
2218 abort ();
2219 }
2220 }
2221
2222 /* Try to infer the code or data type from a symbol.
2223 Returns nonzero if *MAP_TYPE was set. */
2224
2225 static int
2226 get_sym_code_type (struct disassemble_info *info, int n,
2227 enum map_type *map_type)
2228 {
2229 elf_symbol_type *es;
2230 unsigned int type;
2231 const char *name;
2232
2233 es = *(elf_symbol_type **)(info->symtab + n);
2234 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2235
2236 /* If the symbol has function type then use that. */
2237 if (type == STT_FUNC)
2238 {
2239 *map_type = MAP_INSN;
2240 return TRUE;
2241 }
2242
2243 /* Check for mapping symbols. */
2244 name = bfd_asymbol_name(info->symtab[n]);
2245 if (name[0] == '$'
2246 && (name[1] == 'x' || name[1] == 'd')
2247 && (name[2] == '\0' || name[2] == '.'))
2248 {
2249 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2250 return TRUE;
2251 }
2252
2253 return FALSE;
2254 }
2255
2256 /* Entry-point of the AArch64 disassembler. */
2257
2258 int
2259 print_insn_aarch64 (bfd_vma pc,
2260 struct disassemble_info *info)
2261 {
2262 bfd_byte buffer[INSNLEN];
2263 int status;
2264 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2265 bfd_boolean found = FALSE;
2266 unsigned int size = 4;
2267 unsigned long data;
2268
2269 if (info->disassembler_options)
2270 {
2271 set_default_aarch64_dis_options (info);
2272
2273 parse_aarch64_dis_options (info->disassembler_options);
2274
2275 /* To avoid repeated parsing of these options, we remove them here. */
2276 info->disassembler_options = NULL;
2277 }
2278
2279 /* Aarch64 instructions are always little-endian */
2280 info->endian_code = BFD_ENDIAN_LITTLE;
2281
2282 /* First check the full symtab for a mapping symbol, even if there
2283 are no usable non-mapping symbols for this address. */
2284 if (info->symtab_size != 0
2285 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2286 {
2287 enum map_type type = MAP_INSN;
2288 int last_sym = -1;
2289 bfd_vma addr;
2290 int n;
2291
2292 if (pc <= last_mapping_addr)
2293 last_mapping_sym = -1;
2294
2295 /* Start scanning at the start of the function, or wherever
2296 we finished last time. */
2297 n = info->symtab_pos + 1;
2298 if (n < last_mapping_sym)
2299 n = last_mapping_sym;
2300
2301 /* Scan up to the location being disassembled. */
2302 for (; n < info->symtab_size; n++)
2303 {
2304 addr = bfd_asymbol_value (info->symtab[n]);
2305 if (addr > pc)
2306 break;
2307 if ((info->section == NULL
2308 || info->section == info->symtab[n]->section)
2309 && get_sym_code_type (info, n, &type))
2310 {
2311 last_sym = n;
2312 found = TRUE;
2313 }
2314 }
2315
2316 if (!found)
2317 {
2318 n = info->symtab_pos;
2319 if (n < last_mapping_sym)
2320 n = last_mapping_sym;
2321
2322 /* No mapping symbol found at this address. Look backwards
2323 for a preceeding one. */
2324 for (; n >= 0; n--)
2325 {
2326 if (get_sym_code_type (info, n, &type))
2327 {
2328 last_sym = n;
2329 found = TRUE;
2330 break;
2331 }
2332 }
2333 }
2334
2335 last_mapping_sym = last_sym;
2336 last_type = type;
2337
2338 /* Look a little bit ahead to see if we should print out
2339 less than four bytes of data. If there's a symbol,
2340 mapping or otherwise, after two bytes then don't
2341 print more. */
2342 if (last_type == MAP_DATA)
2343 {
2344 size = 4 - (pc & 3);
2345 for (n = last_sym + 1; n < info->symtab_size; n++)
2346 {
2347 addr = bfd_asymbol_value (info->symtab[n]);
2348 if (addr > pc)
2349 {
2350 if (addr - pc < size)
2351 size = addr - pc;
2352 break;
2353 }
2354 }
2355 /* If the next symbol is after three bytes, we need to
2356 print only part of the data, so that we can use either
2357 .byte or .short. */
2358 if (size == 3)
2359 size = (pc & 1) ? 1 : 2;
2360 }
2361 }
2362
2363 if (last_type == MAP_DATA)
2364 {
2365 /* size was set above. */
2366 info->bytes_per_chunk = size;
2367 info->display_endian = info->endian;
2368 printer = print_insn_data;
2369 }
2370 else
2371 {
2372 info->bytes_per_chunk = size = INSNLEN;
2373 info->display_endian = info->endian_code;
2374 printer = print_insn_aarch64_word;
2375 }
2376
2377 status = (*info->read_memory_func) (pc, buffer, size, info);
2378 if (status != 0)
2379 {
2380 (*info->memory_error_func) (status, pc, info);
2381 return -1;
2382 }
2383
2384 data = bfd_get_bits (buffer, size * 8,
2385 info->display_endian == BFD_ENDIAN_BIG);
2386
2387 (*printer) (pc, data, info);
2388
2389 return size;
2390 }
2391 \f
2392 void
2393 print_aarch64_disassembler_options (FILE *stream)
2394 {
2395 fprintf (stream, _("\n\
2396 The following AARCH64 specific disassembler options are supported for use\n\
2397 with the -M switch (multiple options should be separated by commas):\n"));
2398
2399 fprintf (stream, _("\n\
2400 no-aliases Don't print instruction aliases.\n"));
2401
2402 fprintf (stream, _("\n\
2403 aliases Do print instruction aliases.\n"));
2404
2405 #ifdef DEBUG_AARCH64
2406 fprintf (stream, _("\n\
2407 debug_dump Temp switch for debug trace.\n"));
2408 #endif /* DEBUG_AARCH64 */
2409
2410 fprintf (stream, _("\n"));
2411 }