]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
mach-o.c: use boolean instead of int to return status.
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Sign-extend bit I of VALUE. */
149 static inline int32_t
150 sign_extend (aarch64_insn value, unsigned i)
151 {
152 uint32_t ret = value;
153
154 assert (i < 32);
155 if ((value >> i) & 0x1)
156 {
157 uint32_t val = (uint32_t)(-1) << i;
158 ret = ret | val;
159 }
160 return (int32_t) ret;
161 }
162
163 /* N.B. the following inline helpfer functions create a dependency on the
164 order of operand qualifier enumerators. */
165
166 /* Given VALUE, return qualifier for a general purpose register. */
167 static inline enum aarch64_opnd_qualifier
168 get_greg_qualifier_from_value (aarch64_insn value)
169 {
170 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
171 assert (value <= 0x1
172 && aarch64_get_qualifier_standard_value (qualifier) == value);
173 return qualifier;
174 }
175
176 /* Given VALUE, return qualifier for a vector register. */
177 static inline enum aarch64_opnd_qualifier
178 get_vreg_qualifier_from_value (aarch64_insn value)
179 {
180 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
181
182 assert (value <= 0x8
183 && aarch64_get_qualifier_standard_value (qualifier) == value);
184 return qualifier;
185 }
186
187 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
188 static inline enum aarch64_opnd_qualifier
189 get_sreg_qualifier_from_value (aarch64_insn value)
190 {
191 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
192
193 assert (value <= 0x4
194 && aarch64_get_qualifier_standard_value (qualifier) == value);
195 return qualifier;
196 }
197
198 /* Given the instruction in *INST which is probably half way through the
199 decoding and our caller wants to know the expected qualifier for operand
200 I. Return such a qualifier if we can establish it; otherwise return
201 AARCH64_OPND_QLF_NIL. */
202
203 static aarch64_opnd_qualifier_t
204 get_expected_qualifier (const aarch64_inst *inst, int i)
205 {
206 aarch64_opnd_qualifier_seq_t qualifiers;
207 /* Should not be called if the qualifier is known. */
208 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
209 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
210 i, qualifiers))
211 return qualifiers[i];
212 else
213 return AARCH64_OPND_QLF_NIL;
214 }
215
216 /* Operand extractors. */
217
218 int
219 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
220 const aarch64_insn code,
221 const aarch64_inst *inst ATTRIBUTE_UNUSED)
222 {
223 info->reg.regno = extract_field (self->fields[0], code, 0);
224 return 1;
225 }
226
227 /* e.g. IC <ic_op>{, <Xt>}. */
228 int
229 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
230 const aarch64_insn code,
231 const aarch64_inst *inst ATTRIBUTE_UNUSED)
232 {
233 info->reg.regno = extract_field (self->fields[0], code, 0);
234 assert (info->idx == 1
235 && (aarch64_get_operand_class (inst->operands[0].type)
236 == AARCH64_OPND_CLASS_SYSTEM));
237 /* This will make the constraint checking happy and more importantly will
238 help the disassembler determine whether this operand is optional or
239 not. */
240 info->present = inst->operands[0].sysins_op->has_xt;
241
242 return 1;
243 }
244
245 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
246 int
247 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
248 const aarch64_insn code,
249 const aarch64_inst *inst ATTRIBUTE_UNUSED)
250 {
251 /* regno */
252 info->reglane.regno = extract_field (self->fields[0], code,
253 inst->opcode->mask);
254
255 /* Index and/or type. */
256 if (inst->opcode->iclass == asisdone
257 || inst->opcode->iclass == asimdins)
258 {
259 if (info->type == AARCH64_OPND_En
260 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
261 {
262 unsigned shift;
263 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
264 assert (info->idx == 1); /* Vn */
265 aarch64_insn value = extract_field (FLD_imm4, code, 0);
266 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
267 info->qualifier = get_expected_qualifier (inst, info->idx);
268 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
269 info->reglane.index = value >> shift;
270 }
271 else
272 {
273 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
274 imm5<3:0> <V>
275 0000 RESERVED
276 xxx1 B
277 xx10 H
278 x100 S
279 1000 D */
280 int pos = -1;
281 aarch64_insn value = extract_field (FLD_imm5, code, 0);
282 while (++pos <= 3 && (value & 0x1) == 0)
283 value >>= 1;
284 if (pos > 3)
285 return 0;
286 info->qualifier = get_sreg_qualifier_from_value (pos);
287 info->reglane.index = (unsigned) (value >> 1);
288 }
289 }
290 else
291 {
292 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
293 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
294
295 /* Need information in other operand(s) to help decoding. */
296 info->qualifier = get_expected_qualifier (inst, info->idx);
297 switch (info->qualifier)
298 {
299 case AARCH64_OPND_QLF_S_H:
300 /* h:l:m */
301 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
302 FLD_M);
303 info->reglane.regno &= 0xf;
304 break;
305 case AARCH64_OPND_QLF_S_S:
306 /* h:l */
307 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
308 break;
309 case AARCH64_OPND_QLF_S_D:
310 /* H */
311 info->reglane.index = extract_field (FLD_H, code, 0);
312 break;
313 default:
314 return 0;
315 }
316 }
317
318 return 1;
319 }
320
321 int
322 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
323 const aarch64_insn code,
324 const aarch64_inst *inst ATTRIBUTE_UNUSED)
325 {
326 /* R */
327 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
328 /* len */
329 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
330 return 1;
331 }
332
333 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
334 int
335 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
336 aarch64_opnd_info *info, const aarch64_insn code,
337 const aarch64_inst *inst)
338 {
339 aarch64_insn value;
340 /* Number of elements in each structure to be loaded/stored. */
341 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
342
343 struct
344 {
345 unsigned is_reserved;
346 unsigned num_regs;
347 unsigned num_elements;
348 } data [] =
349 { {0, 4, 4},
350 {1, 4, 4},
351 {0, 4, 1},
352 {0, 4, 2},
353 {0, 3, 3},
354 {1, 3, 3},
355 {0, 3, 1},
356 {0, 1, 1},
357 {0, 2, 2},
358 {1, 2, 2},
359 {0, 2, 1},
360 };
361
362 /* Rt */
363 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
364 /* opcode */
365 value = extract_field (FLD_opcode, code, 0);
366 if (expected_num != data[value].num_elements || data[value].is_reserved)
367 return 0;
368 info->reglist.num_regs = data[value].num_regs;
369
370 return 1;
371 }
372
373 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
374 lanes instructions. */
375 int
376 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
377 aarch64_opnd_info *info, const aarch64_insn code,
378 const aarch64_inst *inst)
379 {
380 aarch64_insn value;
381
382 /* Rt */
383 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
384 /* S */
385 value = extract_field (FLD_S, code, 0);
386
387 /* Number of registers is equal to the number of elements in
388 each structure to be loaded/stored. */
389 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
390 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
391
392 /* Except when it is LD1R. */
393 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
394 info->reglist.num_regs = 2;
395
396 return 1;
397 }
398
399 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
400 load/store single element instructions. */
401 int
402 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
403 aarch64_opnd_info *info, const aarch64_insn code,
404 const aarch64_inst *inst ATTRIBUTE_UNUSED)
405 {
406 aarch64_field field = {0, 0};
407 aarch64_insn QSsize; /* fields Q:S:size. */
408 aarch64_insn opcodeh2; /* opcode<2:1> */
409
410 /* Rt */
411 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
412
413 /* Decode the index, opcode<2:1> and size. */
414 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
415 opcodeh2 = extract_field_2 (&field, code, 0);
416 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
417 switch (opcodeh2)
418 {
419 case 0x0:
420 info->qualifier = AARCH64_OPND_QLF_S_B;
421 /* Index encoded in "Q:S:size". */
422 info->reglist.index = QSsize;
423 break;
424 case 0x1:
425 if (QSsize & 0x1)
426 /* UND. */
427 return 0;
428 info->qualifier = AARCH64_OPND_QLF_S_H;
429 /* Index encoded in "Q:S:size<1>". */
430 info->reglist.index = QSsize >> 1;
431 break;
432 case 0x2:
433 if ((QSsize >> 1) & 0x1)
434 /* UND. */
435 return 0;
436 if ((QSsize & 0x1) == 0)
437 {
438 info->qualifier = AARCH64_OPND_QLF_S_S;
439 /* Index encoded in "Q:S". */
440 info->reglist.index = QSsize >> 2;
441 }
442 else
443 {
444 if (extract_field (FLD_S, code, 0))
445 /* UND */
446 return 0;
447 info->qualifier = AARCH64_OPND_QLF_S_D;
448 /* Index encoded in "Q". */
449 info->reglist.index = QSsize >> 3;
450 }
451 break;
452 default:
453 return 0;
454 }
455
456 info->reglist.has_index = 1;
457 info->reglist.num_regs = 0;
458 /* Number of registers is equal to the number of elements in
459 each structure to be loaded/stored. */
460 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
461 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
462
463 return 1;
464 }
465
466 /* Decode fields immh:immb and/or Q for e.g.
467 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
468 or SSHR <V><d>, <V><n>, #<shift>. */
469
470 int
471 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
472 aarch64_opnd_info *info, const aarch64_insn code,
473 const aarch64_inst *inst)
474 {
475 int pos;
476 aarch64_insn Q, imm, immh;
477 enum aarch64_insn_class iclass = inst->opcode->iclass;
478
479 immh = extract_field (FLD_immh, code, 0);
480 if (immh == 0)
481 return 0;
482 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
483 pos = 4;
484 /* Get highest set bit in immh. */
485 while (--pos >= 0 && (immh & 0x8) == 0)
486 immh <<= 1;
487
488 assert ((iclass == asimdshf || iclass == asisdshf)
489 && (info->type == AARCH64_OPND_IMM_VLSR
490 || info->type == AARCH64_OPND_IMM_VLSL));
491
492 if (iclass == asimdshf)
493 {
494 Q = extract_field (FLD_Q, code, 0);
495 /* immh Q <T>
496 0000 x SEE AdvSIMD modified immediate
497 0001 0 8B
498 0001 1 16B
499 001x 0 4H
500 001x 1 8H
501 01xx 0 2S
502 01xx 1 4S
503 1xxx 0 RESERVED
504 1xxx 1 2D */
505 info->qualifier =
506 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
507 }
508 else
509 info->qualifier = get_sreg_qualifier_from_value (pos);
510
511 if (info->type == AARCH64_OPND_IMM_VLSR)
512 /* immh <shift>
513 0000 SEE AdvSIMD modified immediate
514 0001 (16-UInt(immh:immb))
515 001x (32-UInt(immh:immb))
516 01xx (64-UInt(immh:immb))
517 1xxx (128-UInt(immh:immb)) */
518 info->imm.value = (16 << pos) - imm;
519 else
520 /* immh:immb
521 immh <shift>
522 0000 SEE AdvSIMD modified immediate
523 0001 (UInt(immh:immb)-8)
524 001x (UInt(immh:immb)-16)
525 01xx (UInt(immh:immb)-32)
526 1xxx (UInt(immh:immb)-64) */
527 info->imm.value = imm - (8 << pos);
528
529 return 1;
530 }
531
532 /* Decode shift immediate for e.g. sshr (imm). */
533 int
534 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
535 aarch64_opnd_info *info, const aarch64_insn code,
536 const aarch64_inst *inst ATTRIBUTE_UNUSED)
537 {
538 int64_t imm;
539 aarch64_insn val;
540 val = extract_field (FLD_size, code, 0);
541 switch (val)
542 {
543 case 0: imm = 8; break;
544 case 1: imm = 16; break;
545 case 2: imm = 32; break;
546 default: return 0;
547 }
548 info->imm.value = imm;
549 return 1;
550 }
551
552 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
553 value in the field(s) will be extracted as unsigned immediate value. */
554 int
555 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
556 const aarch64_insn code,
557 const aarch64_inst *inst ATTRIBUTE_UNUSED)
558 {
559 int64_t imm;
560 /* Maximum of two fields to extract. */
561 assert (self->fields[2] == FLD_NIL);
562
563 if (self->fields[1] == FLD_NIL)
564 imm = extract_field (self->fields[0], code, 0);
565 else
566 /* e.g. TBZ b5:b40. */
567 imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
568
569 if (info->type == AARCH64_OPND_FPIMM)
570 info->imm.is_fp = 1;
571
572 if (operand_need_sign_extension (self))
573 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
574
575 if (operand_need_shift_by_two (self))
576 imm <<= 2;
577
578 if (info->type == AARCH64_OPND_ADDR_ADRP)
579 imm <<= 12;
580
581 info->imm.value = imm;
582 return 1;
583 }
584
585 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
586 int
587 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
588 const aarch64_insn code,
589 const aarch64_inst *inst ATTRIBUTE_UNUSED)
590 {
591 aarch64_ext_imm (self, info, code, inst);
592 info->shifter.kind = AARCH64_MOD_LSL;
593 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
594 return 1;
595 }
596
597 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
598 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
599 int
600 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
601 aarch64_opnd_info *info,
602 const aarch64_insn code,
603 const aarch64_inst *inst ATTRIBUTE_UNUSED)
604 {
605 uint64_t imm;
606 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
607 aarch64_field field = {0, 0};
608
609 assert (info->idx == 1);
610
611 if (info->type == AARCH64_OPND_SIMD_FPIMM)
612 info->imm.is_fp = 1;
613
614 /* a:b:c:d:e:f:g:h */
615 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
616 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
617 {
618 /* Either MOVI <Dd>, #<imm>
619 or MOVI <Vd>.2D, #<imm>.
620 <imm> is a 64-bit immediate
621 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
622 encoded in "a:b:c:d:e:f:g:h". */
623 int i;
624 unsigned abcdefgh = imm;
625 for (imm = 0ull, i = 0; i < 8; i++)
626 if (((abcdefgh >> i) & 0x1) != 0)
627 imm |= 0xffull << (8 * i);
628 }
629 info->imm.value = imm;
630
631 /* cmode */
632 info->qualifier = get_expected_qualifier (inst, info->idx);
633 switch (info->qualifier)
634 {
635 case AARCH64_OPND_QLF_NIL:
636 /* no shift */
637 info->shifter.kind = AARCH64_MOD_NONE;
638 return 1;
639 case AARCH64_OPND_QLF_LSL:
640 /* shift zeros */
641 info->shifter.kind = AARCH64_MOD_LSL;
642 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
643 {
644 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
645 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
646 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
647 default: assert (0); return 0;
648 }
649 /* 00: 0; 01: 8; 10:16; 11:24. */
650 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
651 break;
652 case AARCH64_OPND_QLF_MSL:
653 /* shift ones */
654 info->shifter.kind = AARCH64_MOD_MSL;
655 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
656 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
657 break;
658 default:
659 assert (0);
660 return 0;
661 }
662
663 return 1;
664 }
665
666 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
667 int
668 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
669 aarch64_opnd_info *info, const aarch64_insn code,
670 const aarch64_inst *inst ATTRIBUTE_UNUSED)
671 {
672 info->imm.value = 64- extract_field (FLD_scale, code, 0);
673 return 1;
674 }
675
676 /* Decode arithmetic immediate for e.g.
677 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
678 int
679 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 aarch64_opnd_info *info, const aarch64_insn code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
682 {
683 aarch64_insn value;
684
685 info->shifter.kind = AARCH64_MOD_LSL;
686 /* shift */
687 value = extract_field (FLD_shift, code, 0);
688 if (value >= 2)
689 return 0;
690 info->shifter.amount = value ? 12 : 0;
691 /* imm12 (unsigned) */
692 info->imm.value = extract_field (FLD_imm12, code, 0);
693
694 return 1;
695 }
696
697 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
698
699 int
700 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
701 aarch64_opnd_info *info, const aarch64_insn code,
702 const aarch64_inst *inst ATTRIBUTE_UNUSED)
703 {
704 uint64_t imm, mask;
705 uint32_t sf;
706 uint32_t N, R, S;
707 unsigned simd_size;
708 aarch64_insn value;
709
710 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
711 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
712 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
713 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
714
715 /* value is N:immr:imms. */
716 S = value & 0x3f;
717 R = (value >> 6) & 0x3f;
718 N = (value >> 12) & 0x1;
719
720 if (sf == 0 && N == 1)
721 return 0;
722
723 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
724 (in other words, right rotated by R), then replicated. */
725 if (N != 0)
726 {
727 simd_size = 64;
728 mask = 0xffffffffffffffffull;
729 }
730 else
731 {
732 switch (S)
733 {
734 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
735 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
736 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
737 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
738 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
739 default: return 0;
740 }
741 mask = (1ull << simd_size) - 1;
742 /* Top bits are IGNORED. */
743 R &= simd_size - 1;
744 }
745 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
746 if (S == simd_size - 1)
747 return 0;
748 /* S+1 consecutive bits to 1. */
749 /* NOTE: S can't be 63 due to detection above. */
750 imm = (1ull << (S + 1)) - 1;
751 /* Rotate to the left by simd_size - R. */
752 if (R != 0)
753 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
754 /* Replicate the value according to SIMD size. */
755 switch (simd_size)
756 {
757 case 2: imm = (imm << 2) | imm;
758 case 4: imm = (imm << 4) | imm;
759 case 8: imm = (imm << 8) | imm;
760 case 16: imm = (imm << 16) | imm;
761 case 32: imm = (imm << 32) | imm;
762 case 64: break;
763 default: assert (0); return 0;
764 }
765
766 info->imm.value = sf ? imm : imm & 0xffffffff;
767
768 return 1;
769 }
770
771 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
772 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
773 int
774 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
775 aarch64_opnd_info *info,
776 const aarch64_insn code, const aarch64_inst *inst)
777 {
778 aarch64_insn value;
779
780 /* Rt */
781 info->reg.regno = extract_field (FLD_Rt, code, 0);
782
783 /* size */
784 value = extract_field (FLD_ldst_size, code, 0);
785 if (inst->opcode->iclass == ldstpair_indexed
786 || inst->opcode->iclass == ldstnapair_offs
787 || inst->opcode->iclass == ldstpair_off
788 || inst->opcode->iclass == loadlit)
789 {
790 enum aarch64_opnd_qualifier qualifier;
791 switch (value)
792 {
793 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
794 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
795 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
796 default: return 0;
797 }
798 info->qualifier = qualifier;
799 }
800 else
801 {
802 /* opc1:size */
803 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
804 if (value > 0x4)
805 return 0;
806 info->qualifier = get_sreg_qualifier_from_value (value);
807 }
808
809 return 1;
810 }
811
812 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
813 int
814 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
815 aarch64_opnd_info *info,
816 aarch64_insn code,
817 const aarch64_inst *inst ATTRIBUTE_UNUSED)
818 {
819 /* Rn */
820 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
821 return 1;
822 }
823
824 /* Decode the address operand for e.g.
825 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
826 int
827 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
828 aarch64_opnd_info *info,
829 aarch64_insn code, const aarch64_inst *inst)
830 {
831 aarch64_insn S, value;
832
833 /* Rn */
834 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
835 /* Rm */
836 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
837 /* option */
838 value = extract_field (FLD_option, code, 0);
839 info->shifter.kind =
840 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
841 /* Fix-up the shifter kind; although the table-driven approach is
842 efficient, it is slightly inflexible, thus needing this fix-up. */
843 if (info->shifter.kind == AARCH64_MOD_UXTX)
844 info->shifter.kind = AARCH64_MOD_LSL;
845 /* S */
846 S = extract_field (FLD_S, code, 0);
847 if (S == 0)
848 {
849 info->shifter.amount = 0;
850 info->shifter.amount_present = 0;
851 }
852 else
853 {
854 int size;
855 /* Need information in other operand(s) to help achieve the decoding
856 from 'S' field. */
857 info->qualifier = get_expected_qualifier (inst, info->idx);
858 /* Get the size of the data element that is accessed, which may be
859 different from that of the source register size, e.g. in strb/ldrb. */
860 size = aarch64_get_qualifier_esize (info->qualifier);
861 info->shifter.amount = get_logsz (size);
862 info->shifter.amount_present = 1;
863 }
864
865 return 1;
866 }
867
868 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
869 int
870 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
871 aarch64_insn code, const aarch64_inst *inst)
872 {
873 aarch64_insn imm;
874 info->qualifier = get_expected_qualifier (inst, info->idx);
875
876 /* Rn */
877 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
878 /* simm (imm9 or imm7) */
879 imm = extract_field (self->fields[0], code, 0);
880 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
881 if (self->fields[0] == FLD_imm7)
882 /* scaled immediate in ld/st pair instructions. */
883 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
884 /* qualifier */
885 if (inst->opcode->iclass == ldst_unscaled
886 || inst->opcode->iclass == ldstnapair_offs
887 || inst->opcode->iclass == ldstpair_off
888 || inst->opcode->iclass == ldst_unpriv)
889 info->addr.writeback = 0;
890 else
891 {
892 /* pre/post- index */
893 info->addr.writeback = 1;
894 if (extract_field (self->fields[1], code, 0) == 1)
895 info->addr.preind = 1;
896 else
897 info->addr.postind = 1;
898 }
899
900 return 1;
901 }
902
903 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
904 int
905 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
906 aarch64_insn code,
907 const aarch64_inst *inst ATTRIBUTE_UNUSED)
908 {
909 int shift;
910 info->qualifier = get_expected_qualifier (inst, info->idx);
911 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
912 /* Rn */
913 info->addr.base_regno = extract_field (self->fields[0], code, 0);
914 /* uimm12 */
915 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
916 return 1;
917 }
918
919 /* Decode the address operand for e.g.
920 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
921 int
922 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
923 aarch64_opnd_info *info,
924 aarch64_insn code, const aarch64_inst *inst)
925 {
926 /* The opcode dependent area stores the number of elements in
927 each structure to be loaded/stored. */
928 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
929
930 /* Rn */
931 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
932 /* Rm | #<amount> */
933 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
934 if (info->addr.offset.regno == 31)
935 {
936 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
937 /* Special handling of loading single structure to all lane. */
938 info->addr.offset.imm = (is_ld1r ? 1
939 : inst->operands[0].reglist.num_regs)
940 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
941 else
942 info->addr.offset.imm = inst->operands[0].reglist.num_regs
943 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
944 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
945 }
946 else
947 info->addr.offset.is_reg = 1;
948 info->addr.writeback = 1;
949
950 return 1;
951 }
952
953 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
954 int
955 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
956 aarch64_opnd_info *info,
957 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
958 {
959 aarch64_insn value;
960 /* cond */
961 value = extract_field (FLD_cond, code, 0);
962 info->cond = get_cond_from_value (value);
963 return 1;
964 }
965
966 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
967 int
968 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
969 aarch64_opnd_info *info,
970 aarch64_insn code,
971 const aarch64_inst *inst ATTRIBUTE_UNUSED)
972 {
973 /* op0:op1:CRn:CRm:op2 */
974 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
975 FLD_CRm, FLD_op2);
976 return 1;
977 }
978
979 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
980 int
981 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
982 aarch64_opnd_info *info, aarch64_insn code,
983 const aarch64_inst *inst ATTRIBUTE_UNUSED)
984 {
985 int i;
986 /* op1:op2 */
987 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
988 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
989 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
990 return 1;
991 /* Reserved value in <pstatefield>. */
992 return 0;
993 }
994
995 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
996 int
997 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
998 aarch64_opnd_info *info,
999 aarch64_insn code,
1000 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1001 {
1002 int i;
1003 aarch64_insn value;
1004 const aarch64_sys_ins_reg *sysins_ops;
1005 /* op0:op1:CRn:CRm:op2 */
1006 value = extract_fields (code, 0, 5,
1007 FLD_op0, FLD_op1, FLD_CRn,
1008 FLD_CRm, FLD_op2);
1009
1010 switch (info->type)
1011 {
1012 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1013 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1014 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1015 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1016 default: assert (0); return 0;
1017 }
1018
1019 for (i = 0; sysins_ops[i].template != NULL; ++i)
1020 if (sysins_ops[i].value == value)
1021 {
1022 info->sysins_op = sysins_ops + i;
1023 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1024 info->sysins_op->template,
1025 (unsigned)info->sysins_op->value,
1026 info->sysins_op->has_xt, i);
1027 return 1;
1028 }
1029
1030 return 0;
1031 }
1032
1033 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1034
1035 int
1036 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1037 aarch64_opnd_info *info,
1038 aarch64_insn code,
1039 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1040 {
1041 /* CRm */
1042 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1043 return 1;
1044 }
1045
1046 /* Decode the prefetch operation option operand for e.g.
1047 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1048
1049 int
1050 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1051 aarch64_opnd_info *info,
1052 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1053 {
1054 /* prfop in Rt */
1055 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1056 return 1;
1057 }
1058
1059 /* Decode the extended register operand for e.g.
1060 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1061 int
1062 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1063 aarch64_opnd_info *info,
1064 aarch64_insn code,
1065 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1066 {
1067 aarch64_insn value;
1068
1069 /* Rm */
1070 info->reg.regno = extract_field (FLD_Rm, code, 0);
1071 /* option */
1072 value = extract_field (FLD_option, code, 0);
1073 info->shifter.kind =
1074 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1075 /* imm3 */
1076 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1077
1078 /* This makes the constraint checking happy. */
1079 info->shifter.operator_present = 1;
1080
1081 /* Assume inst->operands[0].qualifier has been resolved. */
1082 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1083 info->qualifier = AARCH64_OPND_QLF_W;
1084 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1085 && (info->shifter.kind == AARCH64_MOD_UXTX
1086 || info->shifter.kind == AARCH64_MOD_SXTX))
1087 info->qualifier = AARCH64_OPND_QLF_X;
1088
1089 return 1;
1090 }
1091
1092 /* Decode the shifted register operand for e.g.
1093 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1094 int
1095 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1096 aarch64_opnd_info *info,
1097 aarch64_insn code,
1098 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1099 {
1100 aarch64_insn value;
1101
1102 /* Rm */
1103 info->reg.regno = extract_field (FLD_Rm, code, 0);
1104 /* shift */
1105 value = extract_field (FLD_shift, code, 0);
1106 info->shifter.kind =
1107 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1108 if (info->shifter.kind == AARCH64_MOD_ROR
1109 && inst->opcode->iclass != log_shift)
1110 /* ROR is not available for the shifted register operand in arithmetic
1111 instructions. */
1112 return 0;
1113 /* imm6 */
1114 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1115
1116 /* This makes the constraint checking happy. */
1117 info->shifter.operator_present = 1;
1118
1119 return 1;
1120 }
1121 \f
1122 /* Bitfields that are commonly used to encode certain operands' information
1123 may be partially used as part of the base opcode in some instructions.
1124 For example, the bit 1 of the field 'size' in
1125 FCVTXN <Vb><d>, <Va><n>
1126 is actually part of the base opcode, while only size<0> is available
1127 for encoding the register type. Another example is the AdvSIMD
1128 instruction ORR (register), in which the field 'size' is also used for
1129 the base opcode, leaving only the field 'Q' available to encode the
1130 vector register arrangement specifier '8B' or '16B'.
1131
1132 This function tries to deduce the qualifier from the value of partially
1133 constrained field(s). Given the VALUE of such a field or fields, the
1134 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1135 operand encoding), the function returns the matching qualifier or
1136 AARCH64_OPND_QLF_NIL if nothing matches.
1137
1138 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1139 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1140 may end with AARCH64_OPND_QLF_NIL. */
1141
1142 static enum aarch64_opnd_qualifier
1143 get_qualifier_from_partial_encoding (aarch64_insn value,
1144 const enum aarch64_opnd_qualifier* \
1145 candidates,
1146 aarch64_insn mask)
1147 {
1148 int i;
1149 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1150 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1151 {
1152 aarch64_insn standard_value;
1153 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1154 break;
1155 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1156 if ((standard_value & mask) == (value & mask))
1157 return candidates[i];
1158 }
1159 return AARCH64_OPND_QLF_NIL;
1160 }
1161
1162 /* Given a list of qualifier sequences, return all possible valid qualifiers
1163 for operand IDX in QUALIFIERS.
1164 Assume QUALIFIERS is an array whose length is large enough. */
1165
1166 static void
1167 get_operand_possible_qualifiers (int idx,
1168 const aarch64_opnd_qualifier_seq_t *list,
1169 enum aarch64_opnd_qualifier *qualifiers)
1170 {
1171 int i;
1172 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1173 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1174 break;
1175 }
1176
1177 /* Decode the size Q field for e.g. SHADD.
1178 We tag one operand with the qualifer according to the code;
1179 whether the qualifier is valid for this opcode or not, it is the
1180 duty of the semantic checking. */
1181
1182 static int
1183 decode_sizeq (aarch64_inst *inst)
1184 {
1185 int idx;
1186 enum aarch64_opnd_qualifier qualifier;
1187 aarch64_insn code;
1188 aarch64_insn value, mask;
1189 enum aarch64_field_kind fld_sz;
1190 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1191
1192 if (inst->opcode->iclass == asisdlse
1193 || inst->opcode->iclass == asisdlsep
1194 || inst->opcode->iclass == asisdlso
1195 || inst->opcode->iclass == asisdlsop)
1196 fld_sz = FLD_vldst_size;
1197 else
1198 fld_sz = FLD_size;
1199
1200 code = inst->value;
1201 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1202 /* Obtain the info that which bits of fields Q and size are actually
1203 available for operand encoding. Opcodes like FMAXNM and FMLA have
1204 size[1] unavailable. */
1205 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1206
1207 /* The index of the operand we are going to tag a qualifier and the qualifer
1208 itself are reasoned from the value of the size and Q fields and the
1209 possible valid qualifier lists. */
1210 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1211 DEBUG_TRACE ("key idx: %d", idx);
1212
1213 /* For most related instruciton, size:Q are fully available for operand
1214 encoding. */
1215 if (mask == 0x7)
1216 {
1217 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1218 return 1;
1219 }
1220
1221 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1222 candidates);
1223 #ifdef DEBUG_AARCH64
1224 if (debug_dump)
1225 {
1226 int i;
1227 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1228 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1229 DEBUG_TRACE ("qualifier %d: %s", i,
1230 aarch64_get_qualifier_name(candidates[i]));
1231 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1232 }
1233 #endif /* DEBUG_AARCH64 */
1234
1235 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1236
1237 if (qualifier == AARCH64_OPND_QLF_NIL)
1238 return 0;
1239
1240 inst->operands[idx].qualifier = qualifier;
1241 return 1;
1242 }
1243
1244 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1245 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1246
1247 static int
1248 decode_asimd_fcvt (aarch64_inst *inst)
1249 {
1250 aarch64_field field = {0, 0};
1251 aarch64_insn value;
1252 enum aarch64_opnd_qualifier qualifier;
1253
1254 gen_sub_field (FLD_size, 0, 1, &field);
1255 value = extract_field_2 (&field, inst->value, 0);
1256 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1257 : AARCH64_OPND_QLF_V_2D;
1258 switch (inst->opcode->op)
1259 {
1260 case OP_FCVTN:
1261 case OP_FCVTN2:
1262 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1263 inst->operands[1].qualifier = qualifier;
1264 break;
1265 case OP_FCVTL:
1266 case OP_FCVTL2:
1267 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1268 inst->operands[0].qualifier = qualifier;
1269 break;
1270 default:
1271 assert (0);
1272 return 0;
1273 }
1274
1275 return 1;
1276 }
1277
1278 /* Decode size[0], i.e. bit 22, for
1279 e.g. FCVTXN <Vb><d>, <Va><n>. */
1280
1281 static int
1282 decode_asisd_fcvtxn (aarch64_inst *inst)
1283 {
1284 aarch64_field field = {0, 0};
1285 gen_sub_field (FLD_size, 0, 1, &field);
1286 if (!extract_field_2 (&field, inst->value, 0))
1287 return 0;
1288 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1289 return 1;
1290 }
1291
1292 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1293 static int
1294 decode_fcvt (aarch64_inst *inst)
1295 {
1296 enum aarch64_opnd_qualifier qualifier;
1297 aarch64_insn value;
1298 const aarch64_field field = {15, 2};
1299
1300 /* opc dstsize */
1301 value = extract_field_2 (&field, inst->value, 0);
1302 switch (value)
1303 {
1304 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1305 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1306 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1307 default: return 0;
1308 }
1309 inst->operands[0].qualifier = qualifier;
1310
1311 return 1;
1312 }
1313
1314 /* Do miscellaneous decodings that are not common enough to be driven by
1315 flags. */
1316
1317 static int
1318 do_misc_decoding (aarch64_inst *inst)
1319 {
1320 switch (inst->opcode->op)
1321 {
1322 case OP_FCVT:
1323 return decode_fcvt (inst);
1324 case OP_FCVTN:
1325 case OP_FCVTN2:
1326 case OP_FCVTL:
1327 case OP_FCVTL2:
1328 return decode_asimd_fcvt (inst);
1329 case OP_FCVTXN_S:
1330 return decode_asisd_fcvtxn (inst);
1331 default:
1332 return 0;
1333 }
1334 }
1335
1336 /* Opcodes that have fields shared by multiple operands are usually flagged
1337 with flags. In this function, we detect such flags, decode the related
1338 field(s) and store the information in one of the related operands. The
1339 'one' operand is not any operand but one of the operands that can
1340 accommadate all the information that has been decoded. */
1341
1342 static int
1343 do_special_decoding (aarch64_inst *inst)
1344 {
1345 int idx;
1346 aarch64_insn value;
1347 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1348 if (inst->opcode->flags & F_COND)
1349 {
1350 value = extract_field (FLD_cond2, inst->value, 0);
1351 inst->cond = get_cond_from_value (value);
1352 }
1353 /* 'sf' field. */
1354 if (inst->opcode->flags & F_SF)
1355 {
1356 idx = select_operand_for_sf_field_coding (inst->opcode);
1357 value = extract_field (FLD_sf, inst->value, 0);
1358 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1359 if ((inst->opcode->flags & F_N)
1360 && extract_field (FLD_N, inst->value, 0) != value)
1361 return 0;
1362 }
1363 /* size:Q fields. */
1364 if (inst->opcode->flags & F_SIZEQ)
1365 return decode_sizeq (inst);
1366
1367 if (inst->opcode->flags & F_FPTYPE)
1368 {
1369 idx = select_operand_for_fptype_field_coding (inst->opcode);
1370 value = extract_field (FLD_type, inst->value, 0);
1371 switch (value)
1372 {
1373 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1374 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1375 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1376 default: return 0;
1377 }
1378 }
1379
1380 if (inst->opcode->flags & F_SSIZE)
1381 {
1382 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1383 of the base opcode. */
1384 aarch64_insn mask;
1385 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1386 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1387 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1388 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1389 /* For most related instruciton, the 'size' field is fully available for
1390 operand encoding. */
1391 if (mask == 0x3)
1392 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1393 else
1394 {
1395 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1396 candidates);
1397 inst->operands[idx].qualifier
1398 = get_qualifier_from_partial_encoding (value, candidates, mask);
1399 }
1400 }
1401
1402 if (inst->opcode->flags & F_T)
1403 {
1404 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1405 int num = 0;
1406 unsigned val, Q;
1407 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1408 == AARCH64_OPND_CLASS_SIMD_REG);
1409 /* imm5<3:0> q <t>
1410 0000 x reserved
1411 xxx1 0 8b
1412 xxx1 1 16b
1413 xx10 0 4h
1414 xx10 1 8h
1415 x100 0 2s
1416 x100 1 4s
1417 1000 0 reserved
1418 1000 1 2d */
1419 val = extract_field (FLD_imm5, inst->value, 0);
1420 while ((val & 0x1) == 0 && ++num <= 3)
1421 val >>= 1;
1422 if (num > 3)
1423 return 0;
1424 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1425 inst->operands[0].qualifier =
1426 get_vreg_qualifier_from_value ((num << 1) | Q);
1427 }
1428
1429 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1430 {
1431 /* Use Rt to encode in the case of e.g.
1432 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1433 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1434 if (idx == -1)
1435 {
1436 /* Otherwise use the result operand, which has to be a integer
1437 register. */
1438 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1439 == AARCH64_OPND_CLASS_INT_REG);
1440 idx = 0;
1441 }
1442 assert (idx == 0 || idx == 1);
1443 value = extract_field (FLD_Q, inst->value, 0);
1444 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1445 }
1446
1447 if (inst->opcode->flags & F_LDS_SIZE)
1448 {
1449 aarch64_field field = {0, 0};
1450 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1451 == AARCH64_OPND_CLASS_INT_REG);
1452 gen_sub_field (FLD_opc, 0, 1, &field);
1453 value = extract_field_2 (&field, inst->value, 0);
1454 inst->operands[0].qualifier
1455 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1456 }
1457
1458 /* Miscellaneous decoding; done as the last step. */
1459 if (inst->opcode->flags & F_MISC)
1460 return do_misc_decoding (inst);
1461
1462 return 1;
1463 }
1464
1465 /* Converters converting a real opcode instruction to its alias form. */
1466
1467 /* ROR <Wd>, <Ws>, #<shift>
1468 is equivalent to:
1469 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1470 static int
1471 convert_extr_to_ror (aarch64_inst *inst)
1472 {
1473 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1474 {
1475 copy_operand_info (inst, 2, 3);
1476 inst->operands[3].type = AARCH64_OPND_NIL;
1477 return 1;
1478 }
1479 return 0;
1480 }
1481
1482 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1483 is equivalent to:
1484 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1485 static int
1486 convert_shll_to_xtl (aarch64_inst *inst)
1487 {
1488 if (inst->operands[2].imm.value == 0)
1489 {
1490 inst->operands[2].type = AARCH64_OPND_NIL;
1491 return 1;
1492 }
1493 return 0;
1494 }
1495
1496 /* Convert
1497 UBFM <Xd>, <Xn>, #<shift>, #63.
1498 to
1499 LSR <Xd>, <Xn>, #<shift>. */
1500 static int
1501 convert_bfm_to_sr (aarch64_inst *inst)
1502 {
1503 int64_t imms, val;
1504
1505 imms = inst->operands[3].imm.value;
1506 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1507 if (imms == val)
1508 {
1509 inst->operands[3].type = AARCH64_OPND_NIL;
1510 return 1;
1511 }
1512
1513 return 0;
1514 }
1515
1516 /* Convert MOV to ORR. */
1517 static int
1518 convert_orr_to_mov (aarch64_inst *inst)
1519 {
1520 /* MOV <Vd>.<T>, <Vn>.<T>
1521 is equivalent to:
1522 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1523 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1524 {
1525 inst->operands[2].type = AARCH64_OPND_NIL;
1526 return 1;
1527 }
1528 return 0;
1529 }
1530
1531 /* When <imms> >= <immr>, the instruction written:
1532 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1533 is equivalent to:
1534 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1535
1536 static int
1537 convert_bfm_to_bfx (aarch64_inst *inst)
1538 {
1539 int64_t immr, imms;
1540
1541 immr = inst->operands[2].imm.value;
1542 imms = inst->operands[3].imm.value;
1543 if (imms >= immr)
1544 {
1545 int64_t lsb = immr;
1546 inst->operands[2].imm.value = lsb;
1547 inst->operands[3].imm.value = imms + 1 - lsb;
1548 /* The two opcodes have different qualifiers for
1549 the immediate operands; reset to help the checking. */
1550 reset_operand_qualifier (inst, 2);
1551 reset_operand_qualifier (inst, 3);
1552 return 1;
1553 }
1554
1555 return 0;
1556 }
1557
1558 /* When <imms> < <immr>, the instruction written:
1559 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1560 is equivalent to:
1561 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1562
1563 static int
1564 convert_bfm_to_bfi (aarch64_inst *inst)
1565 {
1566 int64_t immr, imms, val;
1567
1568 immr = inst->operands[2].imm.value;
1569 imms = inst->operands[3].imm.value;
1570 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1571 if (imms < immr)
1572 {
1573 inst->operands[2].imm.value = (val - immr) & (val - 1);
1574 inst->operands[3].imm.value = imms + 1;
1575 /* The two opcodes have different qualifiers for
1576 the immediate operands; reset to help the checking. */
1577 reset_operand_qualifier (inst, 2);
1578 reset_operand_qualifier (inst, 3);
1579 return 1;
1580 }
1581
1582 return 0;
1583 }
1584
1585 /* The instruction written:
1586 LSL <Xd>, <Xn>, #<shift>
1587 is equivalent to:
1588 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1589
1590 static int
1591 convert_ubfm_to_lsl (aarch64_inst *inst)
1592 {
1593 int64_t immr = inst->operands[2].imm.value;
1594 int64_t imms = inst->operands[3].imm.value;
1595 int64_t val
1596 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1597
1598 if ((immr == 0 && imms == val) || immr == imms + 1)
1599 {
1600 inst->operands[3].type = AARCH64_OPND_NIL;
1601 inst->operands[2].imm.value = val - imms;
1602 return 1;
1603 }
1604
1605 return 0;
1606 }
1607
1608 /* CINC <Wd>, <Wn>, <cond>
1609 is equivalent to:
1610 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1611 where <cond> is not AL or NV. */
1612
1613 static int
1614 convert_from_csel (aarch64_inst *inst)
1615 {
1616 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
1617 && (inst->operands[3].cond->value & 0xe) != 0xe)
1618 {
1619 copy_operand_info (inst, 2, 3);
1620 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1621 inst->operands[3].type = AARCH64_OPND_NIL;
1622 return 1;
1623 }
1624 return 0;
1625 }
1626
1627 /* CSET <Wd>, <cond>
1628 is equivalent to:
1629 CSINC <Wd>, WZR, WZR, invert(<cond>)
1630 where <cond> is not AL or NV. */
1631
1632 static int
1633 convert_csinc_to_cset (aarch64_inst *inst)
1634 {
1635 if (inst->operands[1].reg.regno == 0x1f
1636 && inst->operands[2].reg.regno == 0x1f
1637 && (inst->operands[3].cond->value & 0xe) != 0xe)
1638 {
1639 copy_operand_info (inst, 1, 3);
1640 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1641 inst->operands[3].type = AARCH64_OPND_NIL;
1642 inst->operands[2].type = AARCH64_OPND_NIL;
1643 return 1;
1644 }
1645 return 0;
1646 }
1647
1648 /* MOV <Wd>, #<imm>
1649 is equivalent to:
1650 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1651
1652 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1653 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1654 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1655 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1656 machine-instruction mnemonic must be used. */
1657
1658 static int
1659 convert_movewide_to_mov (aarch64_inst *inst)
1660 {
1661 uint64_t value = inst->operands[1].imm.value;
1662 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1663 if (value == 0 && inst->operands[1].shifter.amount != 0)
1664 return 0;
1665 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1666 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1667 value <<= inst->operands[1].shifter.amount;
1668 /* As an alias convertor, it has to be clear that the INST->OPCODE
1669 is the opcode of the real instruction. */
1670 if (inst->opcode->op == OP_MOVN)
1671 {
1672 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1673 value = ~value;
1674 /* A MOVN has an immediate that could be encoded by MOVZ. */
1675 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1676 return 0;
1677 }
1678 inst->operands[1].imm.value = value;
1679 inst->operands[1].shifter.amount = 0;
1680 return 1;
1681 }
1682
1683 /* MOV <Wd>, #<imm>
1684 is equivalent to:
1685 ORR <Wd>, WZR, #<imm>.
1686
1687 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1688 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1689 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1690 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1691 machine-instruction mnemonic must be used. */
1692
1693 static int
1694 convert_movebitmask_to_mov (aarch64_inst *inst)
1695 {
1696 int is32;
1697 uint64_t value;
1698
1699 /* Should have been assured by the base opcode value. */
1700 assert (inst->operands[1].reg.regno == 0x1f);
1701 copy_operand_info (inst, 1, 2);
1702 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1703 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1704 value = inst->operands[1].imm.value;
1705 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1706 instruction. */
1707 if (inst->operands[0].reg.regno != 0x1f
1708 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1709 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1710 return 0;
1711
1712 inst->operands[2].type = AARCH64_OPND_NIL;
1713 return 1;
1714 }
1715
1716 /* Some alias opcodes are disassembled by being converted from their real-form.
1717 N.B. INST->OPCODE is the real opcode rather than the alias. */
1718
1719 static int
1720 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1721 {
1722 switch (alias->op)
1723 {
1724 case OP_ASR_IMM:
1725 case OP_LSR_IMM:
1726 return convert_bfm_to_sr (inst);
1727 case OP_LSL_IMM:
1728 return convert_ubfm_to_lsl (inst);
1729 case OP_CINC:
1730 case OP_CINV:
1731 case OP_CNEG:
1732 return convert_from_csel (inst);
1733 case OP_CSET:
1734 case OP_CSETM:
1735 return convert_csinc_to_cset (inst);
1736 case OP_UBFX:
1737 case OP_BFXIL:
1738 case OP_SBFX:
1739 return convert_bfm_to_bfx (inst);
1740 case OP_SBFIZ:
1741 case OP_BFI:
1742 case OP_UBFIZ:
1743 return convert_bfm_to_bfi (inst);
1744 case OP_MOV_V:
1745 return convert_orr_to_mov (inst);
1746 case OP_MOV_IMM_WIDE:
1747 case OP_MOV_IMM_WIDEN:
1748 return convert_movewide_to_mov (inst);
1749 case OP_MOV_IMM_LOG:
1750 return convert_movebitmask_to_mov (inst);
1751 case OP_ROR_IMM:
1752 return convert_extr_to_ror (inst);
1753 case OP_SXTL:
1754 case OP_SXTL2:
1755 case OP_UXTL:
1756 case OP_UXTL2:
1757 return convert_shll_to_xtl (inst);
1758 default:
1759 return 0;
1760 }
1761 }
1762
1763 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1764 aarch64_inst *, int);
1765
1766 /* Given the instruction information in *INST, check if the instruction has
1767 any alias form that can be used to represent *INST. If the answer is yes,
1768 update *INST to be in the form of the determined alias. */
1769
1770 /* In the opcode description table, the following flags are used in opcode
1771 entries to help establish the relations between the real and alias opcodes:
1772
1773 F_ALIAS: opcode is an alias
1774 F_HAS_ALIAS: opcode has alias(es)
1775 F_P1
1776 F_P2
1777 F_P3: Disassembly preference priority 1-3 (the larger the
1778 higher). If nothing is specified, it is the priority
1779 0 by default, i.e. the lowest priority.
1780
1781 Although the relation between the machine and the alias instructions are not
1782 explicitly described, it can be easily determined from the base opcode
1783 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1784 description entries:
1785
1786 The mask of an alias opcode must be equal to or a super-set (i.e. more
1787 constrained) of that of the aliased opcode; so is the base opcode value.
1788
1789 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1790 && (opcode->mask & real->mask) == real->mask
1791 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1792 then OPCODE is an alias of, and only of, the REAL instruction
1793
1794 The alias relationship is forced flat-structured to keep related algorithm
1795 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1796
1797 During the disassembling, the decoding decision tree (in
1798 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1799 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1800 not specified), the disassembler will check whether there is any alias
1801 instruction exists for this real instruction. If there is, the disassembler
1802 will try to disassemble the 32-bit binary again using the alias's rule, or
1803 try to convert the IR to the form of the alias. In the case of the multiple
1804 aliases, the aliases are tried one by one from the highest priority
1805 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1806 first succeeds first adopted.
1807
1808 You may ask why there is a need for the conversion of IR from one form to
1809 another in handling certain aliases. This is because on one hand it avoids
1810 adding more operand code to handle unusual encoding/decoding; on other
1811 hand, during the disassembling, the conversion is an effective approach to
1812 check the condition of an alias (as an alias may be adopted only if certain
1813 conditions are met).
1814
1815 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1816 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1817 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1818
1819 static void
1820 determine_disassembling_preference (struct aarch64_inst *inst)
1821 {
1822 const aarch64_opcode *opcode;
1823 const aarch64_opcode *alias;
1824
1825 opcode = inst->opcode;
1826
1827 /* This opcode does not have an alias, so use itself. */
1828 if (opcode_has_alias (opcode) == FALSE)
1829 return;
1830
1831 alias = aarch64_find_alias_opcode (opcode);
1832 assert (alias);
1833
1834 #ifdef DEBUG_AARCH64
1835 if (debug_dump)
1836 {
1837 const aarch64_opcode *tmp = alias;
1838 printf ("#### LIST orderd: ");
1839 while (tmp)
1840 {
1841 printf ("%s, ", tmp->name);
1842 tmp = aarch64_find_next_alias_opcode (tmp);
1843 }
1844 printf ("\n");
1845 }
1846 #endif /* DEBUG_AARCH64 */
1847
1848 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1849 {
1850 DEBUG_TRACE ("try %s", alias->name);
1851 assert (alias_opcode_p (alias));
1852
1853 /* An alias can be a pseudo opcode which will never be used in the
1854 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1855 aliasing AND. */
1856 if (pseudo_opcode_p (alias))
1857 {
1858 DEBUG_TRACE ("skip pseudo %s", alias->name);
1859 continue;
1860 }
1861
1862 if ((inst->value & alias->mask) != alias->opcode)
1863 {
1864 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
1865 continue;
1866 }
1867 /* No need to do any complicated transformation on operands, if the alias
1868 opcode does not have any operand. */
1869 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
1870 {
1871 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
1872 aarch64_replace_opcode (inst, alias);
1873 return;
1874 }
1875 if (alias->flags & F_CONV)
1876 {
1877 aarch64_inst copy;
1878 memcpy (&copy, inst, sizeof (aarch64_inst));
1879 /* ALIAS is the preference as long as the instruction can be
1880 successfully converted to the form of ALIAS. */
1881 if (convert_to_alias (&copy, alias) == 1)
1882 {
1883 aarch64_replace_opcode (&copy, alias);
1884 assert (aarch64_match_operands_constraint (&copy, NULL));
1885 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
1886 memcpy (inst, &copy, sizeof (aarch64_inst));
1887 return;
1888 }
1889 }
1890 else
1891 {
1892 /* Directly decode the alias opcode. */
1893 aarch64_inst temp;
1894 memset (&temp, '\0', sizeof (aarch64_inst));
1895 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
1896 {
1897 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
1898 memcpy (inst, &temp, sizeof (aarch64_inst));
1899 return;
1900 }
1901 }
1902 }
1903 }
1904
1905 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1906 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1907 return 1.
1908
1909 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1910 determined and used to disassemble CODE; this is done just before the
1911 return. */
1912
1913 static int
1914 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
1915 aarch64_inst *inst, int noaliases_p)
1916 {
1917 int i;
1918
1919 DEBUG_TRACE ("enter with %s", opcode->name);
1920
1921 assert (opcode && inst);
1922
1923 /* Check the base opcode. */
1924 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
1925 {
1926 DEBUG_TRACE ("base opcode match FAIL");
1927 goto decode_fail;
1928 }
1929
1930 /* Clear inst. */
1931 memset (inst, '\0', sizeof (aarch64_inst));
1932
1933 inst->opcode = opcode;
1934 inst->value = code;
1935
1936 /* Assign operand codes and indexes. */
1937 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1938 {
1939 if (opcode->operands[i] == AARCH64_OPND_NIL)
1940 break;
1941 inst->operands[i].type = opcode->operands[i];
1942 inst->operands[i].idx = i;
1943 }
1944
1945 /* Call the opcode decoder indicated by flags. */
1946 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
1947 {
1948 DEBUG_TRACE ("opcode flag-based decoder FAIL");
1949 goto decode_fail;
1950 }
1951
1952 /* Call operand decoders. */
1953 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1954 {
1955 const aarch64_operand *opnd;
1956 enum aarch64_opnd type;
1957 type = opcode->operands[i];
1958 if (type == AARCH64_OPND_NIL)
1959 break;
1960 opnd = &aarch64_operands[type];
1961 if (operand_has_extractor (opnd)
1962 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
1963 {
1964 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
1965 goto decode_fail;
1966 }
1967 }
1968
1969 /* Match the qualifiers. */
1970 if (aarch64_match_operands_constraint (inst, NULL) == 1)
1971 {
1972 /* Arriving here, the CODE has been determined as a valid instruction
1973 of OPCODE and *INST has been filled with information of this OPCODE
1974 instruction. Before the return, check if the instruction has any
1975 alias and should be disassembled in the form of its alias instead.
1976 If the answer is yes, *INST will be updated. */
1977 if (!noaliases_p)
1978 determine_disassembling_preference (inst);
1979 DEBUG_TRACE ("SUCCESS");
1980 return 1;
1981 }
1982 else
1983 {
1984 DEBUG_TRACE ("constraint matching FAIL");
1985 }
1986
1987 decode_fail:
1988 return 0;
1989 }
1990 \f
1991 /* This does some user-friendly fix-up to *INST. It is currently focus on
1992 the adjustment of qualifiers to help the printed instruction
1993 recognized/understood more easily. */
1994
1995 static void
1996 user_friendly_fixup (aarch64_inst *inst)
1997 {
1998 switch (inst->opcode->iclass)
1999 {
2000 case testbranch:
2001 /* TBNZ Xn|Wn, #uimm6, label
2002 Test and Branch Not Zero: conditionally jumps to label if bit number
2003 uimm6 in register Xn is not zero. The bit number implies the width of
2004 the register, which may be written and should be disassembled as Wn if
2005 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2006 */
2007 if (inst->operands[1].imm.value < 32)
2008 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2009 break;
2010 default: break;
2011 }
2012 }
2013
2014 /* Decode INSN and fill in *INST the instruction information. */
2015
2016 static int
2017 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED, uint32_t insn,
2018 aarch64_inst *inst)
2019 {
2020 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2021
2022 #ifdef DEBUG_AARCH64
2023 if (debug_dump)
2024 {
2025 const aarch64_opcode *tmp = opcode;
2026 printf ("\n");
2027 DEBUG_TRACE ("opcode lookup:");
2028 while (tmp != NULL)
2029 {
2030 aarch64_verbose (" %s", tmp->name);
2031 tmp = aarch64_find_next_opcode (tmp);
2032 }
2033 }
2034 #endif /* DEBUG_AARCH64 */
2035
2036 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2037 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2038 opcode field and value, apart from the difference that one of them has an
2039 extra field as part of the opcode, but such a field is used for operand
2040 encoding in other opcode(s) ('immh' in the case of the example). */
2041 while (opcode != NULL)
2042 {
2043 /* But only one opcode can be decoded successfully for, as the
2044 decoding routine will check the constraint carefully. */
2045 if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
2046 return ERR_OK;
2047 opcode = aarch64_find_next_opcode (opcode);
2048 }
2049
2050 return ERR_UND;
2051 }
2052
2053 /* Print operands. */
2054
2055 static void
2056 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2057 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2058 {
2059 int i, pcrel_p, num_printed;
2060 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2061 {
2062 const size_t size = 128;
2063 char str[size];
2064 /* We regard the opcode operand info more, however we also look into
2065 the inst->operands to support the disassembling of the optional
2066 operand.
2067 The two operand code should be the same in all cases, apart from
2068 when the operand can be optional. */
2069 if (opcode->operands[i] == AARCH64_OPND_NIL
2070 || opnds[i].type == AARCH64_OPND_NIL)
2071 break;
2072
2073 /* Generate the operand string in STR. */
2074 aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
2075 &info->target);
2076
2077 /* Print the delimiter (taking account of omitted operand(s)). */
2078 if (str[0] != '\0')
2079 (*info->fprintf_func) (info->stream, "%s",
2080 num_printed++ == 0 ? "\t" : ", ");
2081
2082 /* Print the operand. */
2083 if (pcrel_p)
2084 (*info->print_address_func) (info->target, info);
2085 else
2086 (*info->fprintf_func) (info->stream, "%s", str);
2087 }
2088 }
2089
2090 /* Print the instruction mnemonic name. */
2091
2092 static void
2093 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2094 {
2095 if (inst->opcode->flags & F_COND)
2096 {
2097 /* For instructions that are truly conditionally executed, e.g. b.cond,
2098 prepare the full mnemonic name with the corresponding condition
2099 suffix. */
2100 char name[8], *ptr;
2101 size_t len;
2102
2103 ptr = strchr (inst->opcode->name, '.');
2104 assert (ptr && inst->cond);
2105 len = ptr - inst->opcode->name;
2106 assert (len < 8);
2107 strncpy (name, inst->opcode->name, len);
2108 name [len] = '\0';
2109 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2110 }
2111 else
2112 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2113 }
2114
2115 /* Print the instruction according to *INST. */
2116
2117 static void
2118 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2119 struct disassemble_info *info)
2120 {
2121 print_mnemonic_name (inst, info);
2122 print_operands (pc, inst->opcode, inst->operands, info);
2123 }
2124
2125 /* Entry-point of the instruction disassembler and printer. */
2126
2127 static void
2128 print_insn_aarch64_word (bfd_vma pc,
2129 uint32_t word,
2130 struct disassemble_info *info)
2131 {
2132 static const char *err_msg[6] =
2133 {
2134 [ERR_OK] = "_",
2135 [-ERR_UND] = "undefined",
2136 [-ERR_UNP] = "unpredictable",
2137 [-ERR_NYI] = "NYI"
2138 };
2139
2140 int ret;
2141 aarch64_inst inst;
2142
2143 info->insn_info_valid = 1;
2144 info->branch_delay_insns = 0;
2145 info->data_size = 0;
2146 info->target = 0;
2147 info->target2 = 0;
2148
2149 if (info->flags & INSN_HAS_RELOC)
2150 /* If the instruction has a reloc associated with it, then
2151 the offset field in the instruction will actually be the
2152 addend for the reloc. (If we are using REL type relocs).
2153 In such cases, we can ignore the pc when computing
2154 addresses, since the addend is not currently pc-relative. */
2155 pc = 0;
2156
2157 ret = disas_aarch64_insn (pc, word, &inst);
2158
2159 if (((word >> 21) & 0x3ff) == 1)
2160 {
2161 /* RESERVED for ALES. */
2162 assert (ret != ERR_OK);
2163 ret = ERR_NYI;
2164 }
2165
2166 switch (ret)
2167 {
2168 case ERR_UND:
2169 case ERR_UNP:
2170 case ERR_NYI:
2171 /* Handle undefined instructions. */
2172 info->insn_type = dis_noninsn;
2173 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2174 word, err_msg[-ret]);
2175 break;
2176 case ERR_OK:
2177 user_friendly_fixup (&inst);
2178 print_aarch64_insn (pc, &inst, info);
2179 break;
2180 default:
2181 abort ();
2182 }
2183 }
2184
2185 /* Disallow mapping symbols ($x, $d etc) from
2186 being displayed in symbol relative addresses. */
2187
2188 bfd_boolean
2189 aarch64_symbol_is_valid (asymbol * sym,
2190 struct disassemble_info * info ATTRIBUTE_UNUSED)
2191 {
2192 const char * name;
2193
2194 if (sym == NULL)
2195 return FALSE;
2196
2197 name = bfd_asymbol_name (sym);
2198
2199 return name
2200 && (name[0] != '$'
2201 || (name[1] != 'x' && name[1] != 'd')
2202 || (name[2] != '\0' && name[2] != '.'));
2203 }
2204
2205 /* Print data bytes on INFO->STREAM. */
2206
2207 static void
2208 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2209 uint32_t word,
2210 struct disassemble_info *info)
2211 {
2212 switch (info->bytes_per_chunk)
2213 {
2214 case 1:
2215 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2216 break;
2217 case 2:
2218 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2219 break;
2220 case 4:
2221 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2222 break;
2223 default:
2224 abort ();
2225 }
2226 }
2227
2228 /* Try to infer the code or data type from a symbol.
2229 Returns nonzero if *MAP_TYPE was set. */
2230
2231 static int
2232 get_sym_code_type (struct disassemble_info *info, int n,
2233 enum map_type *map_type)
2234 {
2235 elf_symbol_type *es;
2236 unsigned int type;
2237 const char *name;
2238
2239 es = *(elf_symbol_type **)(info->symtab + n);
2240 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2241
2242 /* If the symbol has function type then use that. */
2243 if (type == STT_FUNC)
2244 {
2245 *map_type = MAP_INSN;
2246 return TRUE;
2247 }
2248
2249 /* Check for mapping symbols. */
2250 name = bfd_asymbol_name(info->symtab[n]);
2251 if (name[0] == '$'
2252 && (name[1] == 'x' || name[1] == 'd')
2253 && (name[2] == '\0' || name[2] == '.'))
2254 {
2255 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2256 return TRUE;
2257 }
2258
2259 return FALSE;
2260 }
2261
2262 /* Entry-point of the AArch64 disassembler. */
2263
2264 int
2265 print_insn_aarch64 (bfd_vma pc,
2266 struct disassemble_info *info)
2267 {
2268 bfd_byte buffer[INSNLEN];
2269 int status;
2270 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2271 bfd_boolean found = FALSE;
2272 unsigned int size = 4;
2273 unsigned long data;
2274
2275 if (info->disassembler_options)
2276 {
2277 set_default_aarch64_dis_options (info);
2278
2279 parse_aarch64_dis_options (info->disassembler_options);
2280
2281 /* To avoid repeated parsing of these options, we remove them here. */
2282 info->disassembler_options = NULL;
2283 }
2284
2285 /* Aarch64 instructions are always little-endian */
2286 info->endian_code = BFD_ENDIAN_LITTLE;
2287
2288 /* First check the full symtab for a mapping symbol, even if there
2289 are no usable non-mapping symbols for this address. */
2290 if (info->symtab_size != 0
2291 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2292 {
2293 enum map_type type = MAP_INSN;
2294 int last_sym = -1;
2295 bfd_vma addr;
2296 int n;
2297
2298 if (pc <= last_mapping_addr)
2299 last_mapping_sym = -1;
2300
2301 /* Start scanning at the start of the function, or wherever
2302 we finished last time. */
2303 n = info->symtab_pos + 1;
2304 if (n < last_mapping_sym)
2305 n = last_mapping_sym;
2306
2307 /* Scan up to the location being disassembled. */
2308 for (; n < info->symtab_size; n++)
2309 {
2310 addr = bfd_asymbol_value (info->symtab[n]);
2311 if (addr > pc)
2312 break;
2313 if ((info->section == NULL
2314 || info->section == info->symtab[n]->section)
2315 && get_sym_code_type (info, n, &type))
2316 {
2317 last_sym = n;
2318 found = TRUE;
2319 }
2320 }
2321
2322 if (!found)
2323 {
2324 n = info->symtab_pos;
2325 if (n < last_mapping_sym)
2326 n = last_mapping_sym;
2327
2328 /* No mapping symbol found at this address. Look backwards
2329 for a preceeding one. */
2330 for (; n >= 0; n--)
2331 {
2332 if (get_sym_code_type (info, n, &type))
2333 {
2334 last_sym = n;
2335 found = TRUE;
2336 break;
2337 }
2338 }
2339 }
2340
2341 last_mapping_sym = last_sym;
2342 last_type = type;
2343
2344 /* Look a little bit ahead to see if we should print out
2345 less than four bytes of data. If there's a symbol,
2346 mapping or otherwise, after two bytes then don't
2347 print more. */
2348 if (last_type == MAP_DATA)
2349 {
2350 size = 4 - (pc & 3);
2351 for (n = last_sym + 1; n < info->symtab_size; n++)
2352 {
2353 addr = bfd_asymbol_value (info->symtab[n]);
2354 if (addr > pc)
2355 {
2356 if (addr - pc < size)
2357 size = addr - pc;
2358 break;
2359 }
2360 }
2361 /* If the next symbol is after three bytes, we need to
2362 print only part of the data, so that we can use either
2363 .byte or .short. */
2364 if (size == 3)
2365 size = (pc & 1) ? 1 : 2;
2366 }
2367 }
2368
2369 if (last_type == MAP_DATA)
2370 {
2371 /* size was set above. */
2372 info->bytes_per_chunk = size;
2373 info->display_endian = info->endian;
2374 printer = print_insn_data;
2375 }
2376 else
2377 {
2378 info->bytes_per_chunk = size = INSNLEN;
2379 info->display_endian = info->endian_code;
2380 printer = print_insn_aarch64_word;
2381 }
2382
2383 status = (*info->read_memory_func) (pc, buffer, size, info);
2384 if (status != 0)
2385 {
2386 (*info->memory_error_func) (status, pc, info);
2387 return -1;
2388 }
2389
2390 data = bfd_get_bits (buffer, size * 8,
2391 info->display_endian == BFD_ENDIAN_BIG);
2392
2393 (*printer) (pc, data, info);
2394
2395 return size;
2396 }
2397 \f
2398 void
2399 print_aarch64_disassembler_options (FILE *stream)
2400 {
2401 fprintf (stream, _("\n\
2402 The following AARCH64 specific disassembler options are supported for use\n\
2403 with the -M switch (multiple options should be separated by commas):\n"));
2404
2405 fprintf (stream, _("\n\
2406 no-aliases Don't print instruction aliases.\n"));
2407
2408 fprintf (stream, _("\n\
2409 aliases Do print instruction aliases.\n"));
2410
2411 #ifdef DEBUG_AARCH64
2412 fprintf (stream, _("\n\
2413 debug_dump Temp switch for debug trace.\n"));
2414 #endif /* DEBUG_AARCH64 */
2415
2416 fprintf (stream, _("\n"));
2417 }