]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
New Romanian translation for ld
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
30
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
33
34 #define INSNLEN 4
35
36 /* This character is used to encode style information within the output
37 buffers. See get_style_text and print_operands for more details. */
38 #define STYLE_MARKER_CHAR '\002'
39
40 /* Cached mapping symbol state. */
41 enum map_type
42 {
43 MAP_INSN,
44 MAP_DATA
45 };
46
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
52
53 /* Other options */
54 static int no_aliases = 0; /* If set disassemble as most general inst. */
55 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
56 output as comments. */
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence insn_sequence;
60
61 static void
62 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
63 {
64 }
65
66 static void
67 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
68 {
69 /* Try to match options that are simple flags */
70 if (startswith (option, "no-aliases"))
71 {
72 no_aliases = 1;
73 return;
74 }
75
76 if (startswith (option, "aliases"))
77 {
78 no_aliases = 0;
79 return;
80 }
81
82 if (startswith (option, "no-notes"))
83 {
84 no_notes = 1;
85 return;
86 }
87
88 if (startswith (option, "notes"))
89 {
90 no_notes = 0;
91 return;
92 }
93
94 #ifdef DEBUG_AARCH64
95 if (startswith (option, "debug_dump"))
96 {
97 debug_dump = 1;
98 return;
99 }
100 #endif /* DEBUG_AARCH64 */
101
102 /* Invalid option. */
103 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
104 }
105
106 static void
107 parse_aarch64_dis_options (const char *options)
108 {
109 const char *option_end;
110
111 if (options == NULL)
112 return;
113
114 while (*options != '\0')
115 {
116 /* Skip empty options. */
117 if (*options == ',')
118 {
119 options++;
120 continue;
121 }
122
123 /* We know that *options is neither NUL or a comma. */
124 option_end = options + 1;
125 while (*option_end != ',' && *option_end != '\0')
126 option_end++;
127
128 parse_aarch64_dis_option (options, option_end - options);
129
130 /* Go on to the next one. If option_end points to a comma, it
131 will be skipped above. */
132 options = option_end;
133 }
134 }
135 \f
136 /* Functions doing the instruction disassembling. */
137
138 /* The unnamed arguments consist of the number of fields and information about
139 these fields where the VALUE will be extracted from CODE and returned.
140 MASK can be zero or the base mask of the opcode.
141
142 N.B. the fields are required to be in such an order than the most signficant
143 field for VALUE comes the first, e.g. the <index> in
144 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
145 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
146 the order of H, L, M. */
147
148 aarch64_insn
149 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
150 {
151 uint32_t num;
152 const aarch64_field *field;
153 enum aarch64_field_kind kind;
154 va_list va;
155
156 va_start (va, mask);
157 num = va_arg (va, uint32_t);
158 assert (num <= 5);
159 aarch64_insn value = 0x0;
160 while (num--)
161 {
162 kind = va_arg (va, enum aarch64_field_kind);
163 field = &fields[kind];
164 value <<= field->width;
165 value |= extract_field (kind, code, mask);
166 }
167 va_end (va);
168 return value;
169 }
170
171 /* Extract the value of all fields in SELF->fields after START from
172 instruction CODE. The least significant bit comes from the final field. */
173
174 static aarch64_insn
175 extract_all_fields_after (const aarch64_operand *self, unsigned int start,
176 aarch64_insn code)
177 {
178 aarch64_insn value;
179 unsigned int i;
180 enum aarch64_field_kind kind;
181
182 value = 0;
183 for (i = start;
184 i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
185 {
186 kind = self->fields[i];
187 value <<= fields[kind].width;
188 value |= extract_field (kind, code, 0);
189 }
190 return value;
191 }
192
193 /* Extract the value of all fields in SELF->fields from instruction CODE.
194 The least significant bit comes from the final field. */
195
196 static aarch64_insn
197 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
198 {
199 return extract_all_fields_after (self, 0, code);
200 }
201
202 /* Sign-extend bit I of VALUE. */
203 static inline uint64_t
204 sign_extend (aarch64_insn value, unsigned i)
205 {
206 uint64_t ret, sign;
207
208 assert (i < 32);
209 ret = value;
210 sign = (uint64_t) 1 << i;
211 return ((ret & (sign + sign - 1)) ^ sign) - sign;
212 }
213
214 /* N.B. the following inline helpfer functions create a dependency on the
215 order of operand qualifier enumerators. */
216
217 /* Given VALUE, return qualifier for a general purpose register. */
218 static inline enum aarch64_opnd_qualifier
219 get_greg_qualifier_from_value (aarch64_insn value)
220 {
221 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
222 assert (value <= 0x1
223 && aarch64_get_qualifier_standard_value (qualifier) == value);
224 return qualifier;
225 }
226
227 /* Given VALUE, return qualifier for a vector register. This does not support
228 decoding instructions that accept the 2H vector type. */
229
230 static inline enum aarch64_opnd_qualifier
231 get_vreg_qualifier_from_value (aarch64_insn value)
232 {
233 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
234
235 /* Instructions using vector type 2H should not call this function. Skip over
236 the 2H qualifier. */
237 if (qualifier >= AARCH64_OPND_QLF_V_2H)
238 qualifier += 1;
239
240 assert (value <= 0x8
241 && aarch64_get_qualifier_standard_value (qualifier) == value);
242 return qualifier;
243 }
244
245 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
246 static inline enum aarch64_opnd_qualifier
247 get_sreg_qualifier_from_value (aarch64_insn value)
248 {
249 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
250
251 assert (value <= 0x4
252 && aarch64_get_qualifier_standard_value (qualifier) == value);
253 return qualifier;
254 }
255
256 /* Given the instruction in *INST which is probably half way through the
257 decoding and our caller wants to know the expected qualifier for operand
258 I. Return such a qualifier if we can establish it; otherwise return
259 AARCH64_OPND_QLF_NIL. */
260
261 static aarch64_opnd_qualifier_t
262 get_expected_qualifier (const aarch64_inst *inst, int i)
263 {
264 aarch64_opnd_qualifier_seq_t qualifiers;
265 /* Should not be called if the qualifier is known. */
266 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
267 int invalid_count;
268 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
269 i, qualifiers, &invalid_count))
270 return qualifiers[i];
271 else
272 return AARCH64_OPND_QLF_NIL;
273 }
274
275 /* Operand extractors. */
276
277 bool
278 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
279 aarch64_opnd_info *info ATTRIBUTE_UNUSED,
280 const aarch64_insn code ATTRIBUTE_UNUSED,
281 const aarch64_inst *inst ATTRIBUTE_UNUSED,
282 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
283 {
284 return true;
285 }
286
287 bool
288 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
289 const aarch64_insn code,
290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
292 {
293 info->reg.regno = (extract_field (self->fields[0], code, 0)
294 + get_operand_specific_data (self));
295 return true;
296 }
297
298 bool
299 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
300 const aarch64_insn code ATTRIBUTE_UNUSED,
301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
303 {
304 assert (info->idx == 1
305 || info->idx ==3);
306 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
307 return true;
308 }
309
310 /* e.g. IC <ic_op>{, <Xt>}. */
311 bool
312 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
313 const aarch64_insn code,
314 const aarch64_inst *inst ATTRIBUTE_UNUSED,
315 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
316 {
317 info->reg.regno = extract_field (self->fields[0], code, 0);
318 assert (info->idx == 1
319 && (aarch64_get_operand_class (inst->operands[0].type)
320 == AARCH64_OPND_CLASS_SYSTEM));
321 /* This will make the constraint checking happy and more importantly will
322 help the disassembler determine whether this operand is optional or
323 not. */
324 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
325
326 return true;
327 }
328
329 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
330 bool
331 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
332 const aarch64_insn code,
333 const aarch64_inst *inst ATTRIBUTE_UNUSED,
334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
335 {
336 /* regno */
337 info->reglane.regno = extract_field (self->fields[0], code,
338 inst->opcode->mask);
339
340 /* Index and/or type. */
341 if (inst->opcode->iclass == asisdone
342 || inst->opcode->iclass == asimdins)
343 {
344 if (info->type == AARCH64_OPND_En
345 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
346 {
347 unsigned shift;
348 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
349 assert (info->idx == 1); /* Vn */
350 aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
351 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
352 info->qualifier = get_expected_qualifier (inst, info->idx);
353 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
354 info->reglane.index = value >> shift;
355 }
356 else
357 {
358 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
359 imm5<3:0> <V>
360 0000 RESERVED
361 xxx1 B
362 xx10 H
363 x100 S
364 1000 D */
365 int pos = -1;
366 aarch64_insn value = extract_field (FLD_imm5, code, 0);
367 while (++pos <= 3 && (value & 0x1) == 0)
368 value >>= 1;
369 if (pos > 3)
370 return false;
371 info->qualifier = get_sreg_qualifier_from_value (pos);
372 info->reglane.index = (unsigned) (value >> 1);
373 }
374 }
375 else if (inst->opcode->iclass == dotproduct)
376 {
377 /* Need information in other operand(s) to help decoding. */
378 info->qualifier = get_expected_qualifier (inst, info->idx);
379 switch (info->qualifier)
380 {
381 case AARCH64_OPND_QLF_S_4B:
382 case AARCH64_OPND_QLF_S_2H:
383 /* L:H */
384 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
385 info->reglane.regno &= 0x1f;
386 break;
387 default:
388 return false;
389 }
390 }
391 else if (inst->opcode->iclass == cryptosm3)
392 {
393 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
394 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
395 }
396 else
397 {
398 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
399 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
400
401 /* Need information in other operand(s) to help decoding. */
402 info->qualifier = get_expected_qualifier (inst, info->idx);
403 switch (info->qualifier)
404 {
405 case AARCH64_OPND_QLF_S_H:
406 if (info->type == AARCH64_OPND_Em16)
407 {
408 /* h:l:m */
409 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
410 FLD_M);
411 info->reglane.regno &= 0xf;
412 }
413 else
414 {
415 /* h:l */
416 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
417 }
418 break;
419 case AARCH64_OPND_QLF_S_S:
420 /* h:l */
421 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
422 break;
423 case AARCH64_OPND_QLF_S_D:
424 /* H */
425 info->reglane.index = extract_field (FLD_H, code, 0);
426 break;
427 default:
428 return false;
429 }
430
431 if (inst->opcode->op == OP_FCMLA_ELEM
432 && info->qualifier != AARCH64_OPND_QLF_S_H)
433 {
434 /* Complex operand takes two elements. */
435 if (info->reglane.index & 1)
436 return false;
437 info->reglane.index /= 2;
438 }
439 }
440
441 return true;
442 }
443
444 bool
445 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
446 const aarch64_insn code,
447 const aarch64_inst *inst ATTRIBUTE_UNUSED,
448 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
449 {
450 /* R */
451 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
452 /* len */
453 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
454 info->reglist.stride = 1;
455 return true;
456 }
457
458 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
459 bool
460 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
461 aarch64_opnd_info *info, const aarch64_insn code,
462 const aarch64_inst *inst,
463 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
464 {
465 aarch64_insn value;
466 /* Number of elements in each structure to be loaded/stored. */
467 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
468
469 struct
470 {
471 unsigned is_reserved;
472 unsigned num_regs;
473 unsigned num_elements;
474 } data [] =
475 { {0, 4, 4},
476 {1, 4, 4},
477 {0, 4, 1},
478 {0, 4, 2},
479 {0, 3, 3},
480 {1, 3, 3},
481 {0, 3, 1},
482 {0, 1, 1},
483 {0, 2, 2},
484 {1, 2, 2},
485 {0, 2, 1},
486 };
487
488 /* Rt */
489 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
490 /* opcode */
491 value = extract_field (FLD_opcode, code, 0);
492 /* PR 21595: Check for a bogus value. */
493 if (value >= ARRAY_SIZE (data))
494 return false;
495 if (expected_num != data[value].num_elements || data[value].is_reserved)
496 return false;
497 info->reglist.num_regs = data[value].num_regs;
498 info->reglist.stride = 1;
499
500 return true;
501 }
502
503 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
504 lanes instructions. */
505 bool
506 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
507 aarch64_opnd_info *info, const aarch64_insn code,
508 const aarch64_inst *inst,
509 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
510 {
511 aarch64_insn value;
512
513 /* Rt */
514 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
515 /* S */
516 value = extract_field (FLD_S, code, 0);
517
518 /* Number of registers is equal to the number of elements in
519 each structure to be loaded/stored. */
520 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
521 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
522
523 /* Except when it is LD1R. */
524 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
525 info->reglist.num_regs = 2;
526
527 info->reglist.stride = 1;
528 return true;
529 }
530
531 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
532 load/store single element instructions. */
533 bool
534 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
535 aarch64_opnd_info *info, const aarch64_insn code,
536 const aarch64_inst *inst ATTRIBUTE_UNUSED,
537 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
538 {
539 aarch64_field field = {0, 0};
540 aarch64_insn QSsize; /* fields Q:S:size. */
541 aarch64_insn opcodeh2; /* opcode<2:1> */
542
543 /* Rt */
544 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
545
546 /* Decode the index, opcode<2:1> and size. */
547 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
548 opcodeh2 = extract_field_2 (&field, code, 0);
549 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
550 switch (opcodeh2)
551 {
552 case 0x0:
553 info->qualifier = AARCH64_OPND_QLF_S_B;
554 /* Index encoded in "Q:S:size". */
555 info->reglist.index = QSsize;
556 break;
557 case 0x1:
558 if (QSsize & 0x1)
559 /* UND. */
560 return false;
561 info->qualifier = AARCH64_OPND_QLF_S_H;
562 /* Index encoded in "Q:S:size<1>". */
563 info->reglist.index = QSsize >> 1;
564 break;
565 case 0x2:
566 if ((QSsize >> 1) & 0x1)
567 /* UND. */
568 return false;
569 if ((QSsize & 0x1) == 0)
570 {
571 info->qualifier = AARCH64_OPND_QLF_S_S;
572 /* Index encoded in "Q:S". */
573 info->reglist.index = QSsize >> 2;
574 }
575 else
576 {
577 if (extract_field (FLD_S, code, 0))
578 /* UND */
579 return false;
580 info->qualifier = AARCH64_OPND_QLF_S_D;
581 /* Index encoded in "Q". */
582 info->reglist.index = QSsize >> 3;
583 }
584 break;
585 default:
586 return false;
587 }
588
589 info->reglist.has_index = 1;
590 info->reglist.num_regs = 0;
591 info->reglist.stride = 1;
592 /* Number of registers is equal to the number of elements in
593 each structure to be loaded/stored. */
594 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
595 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
596
597 return true;
598 }
599
600 /* Decode fields immh:immb and/or Q for e.g.
601 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
602 or SSHR <V><d>, <V><n>, #<shift>. */
603
604 bool
605 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
606 aarch64_opnd_info *info, const aarch64_insn code,
607 const aarch64_inst *inst,
608 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
609 {
610 int pos;
611 aarch64_insn Q, imm, immh;
612 enum aarch64_insn_class iclass = inst->opcode->iclass;
613
614 immh = extract_field (FLD_immh, code, 0);
615 if (immh == 0)
616 return false;
617 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
618 pos = 4;
619 /* Get highest set bit in immh. */
620 while (--pos >= 0 && (immh & 0x8) == 0)
621 immh <<= 1;
622
623 assert ((iclass == asimdshf || iclass == asisdshf)
624 && (info->type == AARCH64_OPND_IMM_VLSR
625 || info->type == AARCH64_OPND_IMM_VLSL));
626
627 if (iclass == asimdshf)
628 {
629 Q = extract_field (FLD_Q, code, 0);
630 /* immh Q <T>
631 0000 x SEE AdvSIMD modified immediate
632 0001 0 8B
633 0001 1 16B
634 001x 0 4H
635 001x 1 8H
636 01xx 0 2S
637 01xx 1 4S
638 1xxx 0 RESERVED
639 1xxx 1 2D */
640 info->qualifier =
641 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
642 }
643 else
644 info->qualifier = get_sreg_qualifier_from_value (pos);
645
646 if (info->type == AARCH64_OPND_IMM_VLSR)
647 /* immh <shift>
648 0000 SEE AdvSIMD modified immediate
649 0001 (16-UInt(immh:immb))
650 001x (32-UInt(immh:immb))
651 01xx (64-UInt(immh:immb))
652 1xxx (128-UInt(immh:immb)) */
653 info->imm.value = (16 << pos) - imm;
654 else
655 /* immh:immb
656 immh <shift>
657 0000 SEE AdvSIMD modified immediate
658 0001 (UInt(immh:immb)-8)
659 001x (UInt(immh:immb)-16)
660 01xx (UInt(immh:immb)-32)
661 1xxx (UInt(immh:immb)-64) */
662 info->imm.value = imm - (8 << pos);
663
664 return true;
665 }
666
667 /* Decode shift immediate for e.g. sshr (imm). */
668 bool
669 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
670 aarch64_opnd_info *info, const aarch64_insn code,
671 const aarch64_inst *inst ATTRIBUTE_UNUSED,
672 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
673 {
674 int64_t imm;
675 aarch64_insn val;
676 val = extract_field (FLD_size, code, 0);
677 switch (val)
678 {
679 case 0: imm = 8; break;
680 case 1: imm = 16; break;
681 case 2: imm = 32; break;
682 default: return false;
683 }
684 info->imm.value = imm;
685 return true;
686 }
687
688 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
689 value in the field(s) will be extracted as unsigned immediate value. */
690 bool
691 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
692 const aarch64_insn code,
693 const aarch64_inst *inst,
694 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
695 {
696 uint64_t imm;
697
698 imm = extract_all_fields (self, code);
699
700 if (operand_need_sign_extension (self))
701 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
702
703 if (operand_need_shift_by_two (self))
704 imm <<= 2;
705 else if (operand_need_shift_by_three (self))
706 imm <<= 3;
707 else if (operand_need_shift_by_four (self))
708 imm <<= 4;
709
710 if (info->type == AARCH64_OPND_ADDR_ADRP)
711 imm <<= 12;
712
713 if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
714 && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
715 imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
716
717 info->imm.value = imm;
718 return true;
719 }
720
721 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
722 bool
723 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
724 const aarch64_insn code,
725 const aarch64_inst *inst ATTRIBUTE_UNUSED,
726 aarch64_operand_error *errors)
727 {
728 aarch64_ext_imm (self, info, code, inst, errors);
729 info->shifter.kind = AARCH64_MOD_LSL;
730 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
731 return true;
732 }
733
734 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
735 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
736 bool
737 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
738 aarch64_opnd_info *info,
739 const aarch64_insn code,
740 const aarch64_inst *inst ATTRIBUTE_UNUSED,
741 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
742 {
743 uint64_t imm;
744 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
745 aarch64_field field = {0, 0};
746
747 assert (info->idx == 1);
748
749 if (info->type == AARCH64_OPND_SIMD_FPIMM)
750 info->imm.is_fp = 1;
751
752 /* a:b:c:d:e:f:g:h */
753 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
754 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
755 {
756 /* Either MOVI <Dd>, #<imm>
757 or MOVI <Vd>.2D, #<imm>.
758 <imm> is a 64-bit immediate
759 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
760 encoded in "a:b:c:d:e:f:g:h". */
761 int i;
762 unsigned abcdefgh = imm;
763 for (imm = 0ull, i = 0; i < 8; i++)
764 if (((abcdefgh >> i) & 0x1) != 0)
765 imm |= 0xffull << (8 * i);
766 }
767 info->imm.value = imm;
768
769 /* cmode */
770 info->qualifier = get_expected_qualifier (inst, info->idx);
771 switch (info->qualifier)
772 {
773 case AARCH64_OPND_QLF_NIL:
774 /* no shift */
775 info->shifter.kind = AARCH64_MOD_NONE;
776 return 1;
777 case AARCH64_OPND_QLF_LSL:
778 /* shift zeros */
779 info->shifter.kind = AARCH64_MOD_LSL;
780 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
781 {
782 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
783 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
784 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
785 default: return false;
786 }
787 /* 00: 0; 01: 8; 10:16; 11:24. */
788 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
789 break;
790 case AARCH64_OPND_QLF_MSL:
791 /* shift ones */
792 info->shifter.kind = AARCH64_MOD_MSL;
793 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
794 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
795 break;
796 default:
797 return false;
798 }
799
800 return true;
801 }
802
803 /* Decode an 8-bit floating-point immediate. */
804 bool
805 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
806 const aarch64_insn code,
807 const aarch64_inst *inst ATTRIBUTE_UNUSED,
808 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
809 {
810 info->imm.value = extract_all_fields (self, code);
811 info->imm.is_fp = 1;
812 return true;
813 }
814
815 /* Decode a 1-bit rotate immediate (#90 or #270). */
816 bool
817 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
818 const aarch64_insn code,
819 const aarch64_inst *inst ATTRIBUTE_UNUSED,
820 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
821 {
822 uint64_t rot = extract_field (self->fields[0], code, 0);
823 assert (rot < 2U);
824 info->imm.value = rot * 180 + 90;
825 return true;
826 }
827
828 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
829 bool
830 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
831 const aarch64_insn code,
832 const aarch64_inst *inst ATTRIBUTE_UNUSED,
833 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
834 {
835 uint64_t rot = extract_field (self->fields[0], code, 0);
836 assert (rot < 4U);
837 info->imm.value = rot * 90;
838 return true;
839 }
840
841 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
842 bool
843 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
844 aarch64_opnd_info *info, const aarch64_insn code,
845 const aarch64_inst *inst ATTRIBUTE_UNUSED,
846 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
847 {
848 info->imm.value = 64- extract_field (FLD_scale, code, 0);
849 return true;
850 }
851
852 /* Decode arithmetic immediate for e.g.
853 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
854 bool
855 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
856 aarch64_opnd_info *info, const aarch64_insn code,
857 const aarch64_inst *inst ATTRIBUTE_UNUSED,
858 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
859 {
860 aarch64_insn value;
861
862 info->shifter.kind = AARCH64_MOD_LSL;
863 /* shift */
864 value = extract_field (FLD_shift, code, 0);
865 if (value >= 2)
866 return false;
867 info->shifter.amount = value ? 12 : 0;
868 /* imm12 (unsigned) */
869 info->imm.value = extract_field (FLD_imm12, code, 0);
870
871 return true;
872 }
873
874 /* Return true if VALUE is a valid logical immediate encoding, storing the
875 decoded value in *RESULT if so. ESIZE is the number of bytes in the
876 decoded immediate. */
877 static bool
878 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
879 {
880 uint64_t imm, mask;
881 uint32_t N, R, S;
882 unsigned simd_size;
883
884 /* value is N:immr:imms. */
885 S = value & 0x3f;
886 R = (value >> 6) & 0x3f;
887 N = (value >> 12) & 0x1;
888
889 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
890 (in other words, right rotated by R), then replicated. */
891 if (N != 0)
892 {
893 simd_size = 64;
894 mask = 0xffffffffffffffffull;
895 }
896 else
897 {
898 switch (S)
899 {
900 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
901 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
902 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
903 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
904 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
905 default: return false;
906 }
907 mask = (1ull << simd_size) - 1;
908 /* Top bits are IGNORED. */
909 R &= simd_size - 1;
910 }
911
912 if (simd_size > esize * 8)
913 return false;
914
915 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
916 if (S == simd_size - 1)
917 return false;
918 /* S+1 consecutive bits to 1. */
919 /* NOTE: S can't be 63 due to detection above. */
920 imm = (1ull << (S + 1)) - 1;
921 /* Rotate to the left by simd_size - R. */
922 if (R != 0)
923 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
924 /* Replicate the value according to SIMD size. */
925 switch (simd_size)
926 {
927 case 2: imm = (imm << 2) | imm;
928 /* Fall through. */
929 case 4: imm = (imm << 4) | imm;
930 /* Fall through. */
931 case 8: imm = (imm << 8) | imm;
932 /* Fall through. */
933 case 16: imm = (imm << 16) | imm;
934 /* Fall through. */
935 case 32: imm = (imm << 32) | imm;
936 /* Fall through. */
937 case 64: break;
938 default: return 0;
939 }
940
941 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
942
943 return true;
944 }
945
946 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
947 bool
948 aarch64_ext_limm (const aarch64_operand *self,
949 aarch64_opnd_info *info, const aarch64_insn code,
950 const aarch64_inst *inst,
951 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
952 {
953 uint32_t esize;
954 aarch64_insn value;
955
956 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
957 self->fields[2]);
958 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
959 return decode_limm (esize, value, &info->imm.value);
960 }
961
962 /* Decode a logical immediate for the BIC alias of AND (etc.). */
963 bool
964 aarch64_ext_inv_limm (const aarch64_operand *self,
965 aarch64_opnd_info *info, const aarch64_insn code,
966 const aarch64_inst *inst,
967 aarch64_operand_error *errors)
968 {
969 if (!aarch64_ext_limm (self, info, code, inst, errors))
970 return false;
971 info->imm.value = ~info->imm.value;
972 return true;
973 }
974
975 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
976 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
977 bool
978 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
979 aarch64_opnd_info *info,
980 const aarch64_insn code, const aarch64_inst *inst,
981 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
982 {
983 aarch64_insn value;
984
985 /* Rt */
986 info->reg.regno = extract_field (FLD_Rt, code, 0);
987
988 /* size */
989 value = extract_field (FLD_ldst_size, code, 0);
990 if (inst->opcode->iclass == ldstpair_indexed
991 || inst->opcode->iclass == ldstnapair_offs
992 || inst->opcode->iclass == ldstpair_off
993 || inst->opcode->iclass == loadlit)
994 {
995 enum aarch64_opnd_qualifier qualifier;
996 switch (value)
997 {
998 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
999 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1000 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
1001 default: return false;
1002 }
1003 info->qualifier = qualifier;
1004 }
1005 else
1006 {
1007 /* opc1:size */
1008 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
1009 if (value > 0x4)
1010 return false;
1011 info->qualifier = get_sreg_qualifier_from_value (value);
1012 }
1013
1014 return true;
1015 }
1016
1017 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
1018 bool
1019 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1020 aarch64_opnd_info *info,
1021 aarch64_insn code,
1022 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1023 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1024 {
1025 /* Rn */
1026 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1027 return true;
1028 }
1029
1030 /* Decode the address operand for e.g.
1031 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1032 bool
1033 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1034 aarch64_opnd_info *info,
1035 aarch64_insn code, const aarch64_inst *inst,
1036 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1037 {
1038 info->qualifier = get_expected_qualifier (inst, info->idx);
1039
1040 /* Rn */
1041 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1042
1043 /* simm9 */
1044 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1045 info->addr.offset.imm = sign_extend (imm, 8);
1046 if (extract_field (self->fields[2], code, 0) == 1) {
1047 info->addr.writeback = 1;
1048 info->addr.preind = 1;
1049 }
1050 return true;
1051 }
1052
1053 /* Decode the address operand for e.g.
1054 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1055 bool
1056 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1057 aarch64_opnd_info *info,
1058 aarch64_insn code, const aarch64_inst *inst,
1059 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1060 {
1061 aarch64_insn S, value;
1062
1063 /* Rn */
1064 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1065 /* Rm */
1066 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1067 /* option */
1068 value = extract_field (FLD_option, code, 0);
1069 info->shifter.kind =
1070 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1071 /* Fix-up the shifter kind; although the table-driven approach is
1072 efficient, it is slightly inflexible, thus needing this fix-up. */
1073 if (info->shifter.kind == AARCH64_MOD_UXTX)
1074 info->shifter.kind = AARCH64_MOD_LSL;
1075 /* S */
1076 S = extract_field (FLD_S, code, 0);
1077 if (S == 0)
1078 {
1079 info->shifter.amount = 0;
1080 info->shifter.amount_present = 0;
1081 }
1082 else
1083 {
1084 int size;
1085 /* Need information in other operand(s) to help achieve the decoding
1086 from 'S' field. */
1087 info->qualifier = get_expected_qualifier (inst, info->idx);
1088 /* Get the size of the data element that is accessed, which may be
1089 different from that of the source register size, e.g. in strb/ldrb. */
1090 size = aarch64_get_qualifier_esize (info->qualifier);
1091 info->shifter.amount = get_logsz (size);
1092 info->shifter.amount_present = 1;
1093 }
1094
1095 return true;
1096 }
1097
1098 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1099 bool
1100 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1101 aarch64_insn code, const aarch64_inst *inst,
1102 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1103 {
1104 aarch64_insn imm;
1105 info->qualifier = get_expected_qualifier (inst, info->idx);
1106
1107 /* Rn */
1108 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1109 /* simm (imm9 or imm7) */
1110 imm = extract_field (self->fields[0], code, 0);
1111 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1112 if (self->fields[0] == FLD_imm7
1113 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1114 /* scaled immediate in ld/st pair instructions. */
1115 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1116 /* qualifier */
1117 if (inst->opcode->iclass == ldst_unscaled
1118 || inst->opcode->iclass == ldstnapair_offs
1119 || inst->opcode->iclass == ldstpair_off
1120 || inst->opcode->iclass == ldst_unpriv)
1121 info->addr.writeback = 0;
1122 else
1123 {
1124 /* pre/post- index */
1125 info->addr.writeback = 1;
1126 if (extract_field (self->fields[1], code, 0) == 1)
1127 info->addr.preind = 1;
1128 else
1129 info->addr.postind = 1;
1130 }
1131
1132 return true;
1133 }
1134
1135 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1136 bool
1137 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1138 aarch64_insn code,
1139 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1140 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1141 {
1142 int shift;
1143 info->qualifier = get_expected_qualifier (inst, info->idx);
1144 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1145 /* Rn */
1146 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1147 /* uimm12 */
1148 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1149 return true;
1150 }
1151
1152 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1153 bool
1154 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1155 aarch64_insn code,
1156 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1157 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1158 {
1159 aarch64_insn imm;
1160
1161 info->qualifier = get_expected_qualifier (inst, info->idx);
1162 /* Rn */
1163 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1164 /* simm10 */
1165 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1166 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1167 if (extract_field (self->fields[3], code, 0) == 1) {
1168 info->addr.writeback = 1;
1169 info->addr.preind = 1;
1170 }
1171 return true;
1172 }
1173
1174 /* Decode the address operand for e.g.
1175 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1176 bool
1177 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1178 aarch64_opnd_info *info,
1179 aarch64_insn code, const aarch64_inst *inst,
1180 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1181 {
1182 /* The opcode dependent area stores the number of elements in
1183 each structure to be loaded/stored. */
1184 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1185
1186 /* Rn */
1187 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1188 /* Rm | #<amount> */
1189 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1190 if (info->addr.offset.regno == 31)
1191 {
1192 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1193 /* Special handling of loading single structure to all lane. */
1194 info->addr.offset.imm = (is_ld1r ? 1
1195 : inst->operands[0].reglist.num_regs)
1196 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1197 else
1198 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1199 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1200 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1201 }
1202 else
1203 info->addr.offset.is_reg = 1;
1204 info->addr.writeback = 1;
1205
1206 return true;
1207 }
1208
1209 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1210 bool
1211 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1212 aarch64_opnd_info *info,
1213 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1215 {
1216 aarch64_insn value;
1217 /* cond */
1218 value = extract_field (FLD_cond, code, 0);
1219 info->cond = get_cond_from_value (value);
1220 return true;
1221 }
1222
1223 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1224 bool
1225 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1226 aarch64_opnd_info *info,
1227 aarch64_insn code,
1228 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1229 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1230 {
1231 /* op0:op1:CRn:CRm:op2 */
1232 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1233 FLD_CRm, FLD_op2);
1234 info->sysreg.flags = 0;
1235
1236 /* If a system instruction, check which restrictions should be on the register
1237 value during decoding, these will be enforced then. */
1238 if (inst->opcode->iclass == ic_system)
1239 {
1240 /* Check to see if it's read-only, else check if it's write only.
1241 if it's both or unspecified don't care. */
1242 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1243 info->sysreg.flags = F_REG_READ;
1244 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1245 == F_SYS_WRITE)
1246 info->sysreg.flags = F_REG_WRITE;
1247 }
1248
1249 return true;
1250 }
1251
1252 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1253 bool
1254 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1255 aarch64_opnd_info *info, aarch64_insn code,
1256 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1257 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1258 {
1259 int i;
1260 aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1261 /* op1:op2 */
1262 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1263 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1264 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1265 {
1266 /* PSTATEFIELD name can be encoded partially in CRm[3:1]. */
1267 uint32_t flags = aarch64_pstatefields[i].flags;
1268 if ((flags & F_REG_IN_CRM)
1269 && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1270 continue;
1271 info->sysreg.flags = flags;
1272 return true;
1273 }
1274 /* Reserved value in <pstatefield>. */
1275 return false;
1276 }
1277
1278 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1279 bool
1280 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1281 aarch64_opnd_info *info,
1282 aarch64_insn code,
1283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1285 {
1286 int i;
1287 aarch64_insn value;
1288 const aarch64_sys_ins_reg *sysins_ops;
1289 /* op0:op1:CRn:CRm:op2 */
1290 value = extract_fields (code, 0, 5,
1291 FLD_op0, FLD_op1, FLD_CRn,
1292 FLD_CRm, FLD_op2);
1293
1294 switch (info->type)
1295 {
1296 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1297 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1298 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1299 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1300 case AARCH64_OPND_SYSREG_SR:
1301 sysins_ops = aarch64_sys_regs_sr;
1302 /* Let's remove op2 for rctx. Refer to comments in the definition of
1303 aarch64_sys_regs_sr[]. */
1304 value = value & ~(0x7);
1305 break;
1306 default: return false;
1307 }
1308
1309 for (i = 0; sysins_ops[i].name != NULL; ++i)
1310 if (sysins_ops[i].value == value)
1311 {
1312 info->sysins_op = sysins_ops + i;
1313 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1314 info->sysins_op->name,
1315 (unsigned)info->sysins_op->value,
1316 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1317 return true;
1318 }
1319
1320 return false;
1321 }
1322
1323 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1324
1325 bool
1326 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1327 aarch64_opnd_info *info,
1328 aarch64_insn code,
1329 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1330 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1331 {
1332 /* CRm */
1333 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1334 return true;
1335 }
1336
1337 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>. */
1338
1339 bool
1340 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1341 aarch64_opnd_info *info,
1342 aarch64_insn code,
1343 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1344 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1345 {
1346 /* For the DSB nXS barrier variant immediate is encoded in 2-bit field. */
1347 aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1348 info->barrier = aarch64_barrier_dsb_nxs_options + field;
1349 return true;
1350 }
1351
1352 /* Decode the prefetch operation option operand for e.g.
1353 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1354
1355 bool
1356 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1357 aarch64_opnd_info *info,
1358 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1359 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1360 {
1361 /* prfop in Rt */
1362 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1363 return true;
1364 }
1365
1366 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1367 to the matching name/value pair in aarch64_hint_options. */
1368
1369 bool
1370 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1371 aarch64_opnd_info *info,
1372 aarch64_insn code,
1373 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1374 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1375 {
1376 /* CRm:op2. */
1377 unsigned hint_number;
1378 int i;
1379
1380 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1381
1382 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1383 {
1384 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1385 {
1386 info->hint_option = &(aarch64_hint_options[i]);
1387 return true;
1388 }
1389 }
1390
1391 return false;
1392 }
1393
1394 /* Decode the extended register operand for e.g.
1395 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1396 bool
1397 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1398 aarch64_opnd_info *info,
1399 aarch64_insn code,
1400 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1401 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1402 {
1403 aarch64_insn value;
1404
1405 /* Rm */
1406 info->reg.regno = extract_field (FLD_Rm, code, 0);
1407 /* option */
1408 value = extract_field (FLD_option, code, 0);
1409 info->shifter.kind =
1410 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1411 /* imm3 */
1412 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1413
1414 /* This makes the constraint checking happy. */
1415 info->shifter.operator_present = 1;
1416
1417 /* Assume inst->operands[0].qualifier has been resolved. */
1418 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1419 info->qualifier = AARCH64_OPND_QLF_W;
1420 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1421 && (info->shifter.kind == AARCH64_MOD_UXTX
1422 || info->shifter.kind == AARCH64_MOD_SXTX))
1423 info->qualifier = AARCH64_OPND_QLF_X;
1424
1425 return true;
1426 }
1427
1428 /* Decode the shifted register operand for e.g.
1429 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1430 bool
1431 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1432 aarch64_opnd_info *info,
1433 aarch64_insn code,
1434 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1435 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1436 {
1437 aarch64_insn value;
1438
1439 /* Rm */
1440 info->reg.regno = extract_field (FLD_Rm, code, 0);
1441 /* shift */
1442 value = extract_field (FLD_shift, code, 0);
1443 info->shifter.kind =
1444 aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1445 if (info->shifter.kind == AARCH64_MOD_ROR
1446 && inst->opcode->iclass != log_shift)
1447 /* ROR is not available for the shifted register operand in arithmetic
1448 instructions. */
1449 return false;
1450 /* imm6 */
1451 info->shifter.amount = extract_field (FLD_imm6_10, code, 0);
1452
1453 /* This makes the constraint checking happy. */
1454 info->shifter.operator_present = 1;
1455
1456 return true;
1457 }
1458
1459 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1460 where <offset> is given by the OFFSET parameter and where <factor> is
1461 1 plus SELF's operand-dependent value. fields[0] specifies the field
1462 that holds <base>. */
1463 static bool
1464 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1465 aarch64_opnd_info *info, aarch64_insn code,
1466 int64_t offset)
1467 {
1468 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1469 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1470 info->addr.offset.is_reg = false;
1471 info->addr.writeback = false;
1472 info->addr.preind = true;
1473 if (offset != 0)
1474 info->shifter.kind = AARCH64_MOD_MUL_VL;
1475 info->shifter.amount = 1;
1476 info->shifter.operator_present = (info->addr.offset.imm != 0);
1477 info->shifter.amount_present = false;
1478 return true;
1479 }
1480
1481 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1482 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1483 SELF's operand-dependent value. fields[0] specifies the field that
1484 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1485 bool
1486 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1487 aarch64_opnd_info *info, aarch64_insn code,
1488 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1489 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1490 {
1491 int offset;
1492
1493 offset = extract_field (FLD_SVE_imm4, code, 0);
1494 offset = ((offset + 8) & 15) - 8;
1495 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1496 }
1497
1498 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1499 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1500 SELF's operand-dependent value. fields[0] specifies the field that
1501 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1502 bool
1503 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1504 aarch64_opnd_info *info, aarch64_insn code,
1505 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1506 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1507 {
1508 int offset;
1509
1510 offset = extract_field (FLD_SVE_imm6, code, 0);
1511 offset = (((offset + 32) & 63) - 32);
1512 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1513 }
1514
1515 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1516 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1517 SELF's operand-dependent value. fields[0] specifies the field that
1518 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1519 and imm3 fields, with imm3 being the less-significant part. */
1520 bool
1521 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1522 aarch64_opnd_info *info,
1523 aarch64_insn code,
1524 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1525 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1526 {
1527 int offset;
1528
1529 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3_10);
1530 offset = (((offset + 256) & 511) - 256);
1531 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1532 }
1533
1534 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1535 is given by the OFFSET parameter and where <shift> is SELF's operand-
1536 dependent value. fields[0] specifies the base register field <base>. */
1537 static bool
1538 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1539 aarch64_opnd_info *info, aarch64_insn code,
1540 int64_t offset)
1541 {
1542 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1543 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1544 info->addr.offset.is_reg = false;
1545 info->addr.writeback = false;
1546 info->addr.preind = true;
1547 info->shifter.operator_present = false;
1548 info->shifter.amount_present = false;
1549 return true;
1550 }
1551
1552 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1553 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1554 value. fields[0] specifies the base register field. */
1555 bool
1556 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1557 aarch64_opnd_info *info, aarch64_insn code,
1558 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1559 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1560 {
1561 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1562 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1563 }
1564
1565 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1566 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1567 value. fields[0] specifies the base register field. */
1568 bool
1569 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1570 aarch64_opnd_info *info, aarch64_insn code,
1571 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1572 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1573 {
1574 int offset = extract_field (FLD_SVE_imm6, code, 0);
1575 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1576 }
1577
1578 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1579 is SELF's operand-dependent value. fields[0] specifies the base
1580 register field and fields[1] specifies the offset register field. */
1581 bool
1582 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1583 aarch64_opnd_info *info, aarch64_insn code,
1584 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1585 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1586 {
1587 int index_regno;
1588
1589 index_regno = extract_field (self->fields[1], code, 0);
1590 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1591 return false;
1592
1593 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1594 info->addr.offset.regno = index_regno;
1595 info->addr.offset.is_reg = true;
1596 info->addr.writeback = false;
1597 info->addr.preind = true;
1598 info->shifter.kind = AARCH64_MOD_LSL;
1599 info->shifter.amount = get_operand_specific_data (self);
1600 info->shifter.operator_present = (info->shifter.amount != 0);
1601 info->shifter.amount_present = (info->shifter.amount != 0);
1602 return true;
1603 }
1604
1605 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1606 <shift> is SELF's operand-dependent value. fields[0] specifies the
1607 base register field, fields[1] specifies the offset register field and
1608 fields[2] is a single-bit field that selects SXTW over UXTW. */
1609 bool
1610 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1611 aarch64_opnd_info *info, aarch64_insn code,
1612 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1613 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1614 {
1615 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1616 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1617 info->addr.offset.is_reg = true;
1618 info->addr.writeback = false;
1619 info->addr.preind = true;
1620 if (extract_field (self->fields[2], code, 0))
1621 info->shifter.kind = AARCH64_MOD_SXTW;
1622 else
1623 info->shifter.kind = AARCH64_MOD_UXTW;
1624 info->shifter.amount = get_operand_specific_data (self);
1625 info->shifter.operator_present = true;
1626 info->shifter.amount_present = (info->shifter.amount != 0);
1627 return true;
1628 }
1629
1630 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1631 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1632 fields[0] specifies the base register field. */
1633 bool
1634 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1635 aarch64_opnd_info *info, aarch64_insn code,
1636 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1637 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1638 {
1639 int offset = extract_field (FLD_imm5, code, 0);
1640 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1641 }
1642
1643 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1644 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1645 number. fields[0] specifies the base register field and fields[1]
1646 specifies the offset register field. */
1647 static bool
1648 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1649 aarch64_insn code, enum aarch64_modifier_kind kind)
1650 {
1651 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1652 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1653 info->addr.offset.is_reg = true;
1654 info->addr.writeback = false;
1655 info->addr.preind = true;
1656 info->shifter.kind = kind;
1657 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1658 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1659 || info->shifter.amount != 0);
1660 info->shifter.amount_present = (info->shifter.amount != 0);
1661 return true;
1662 }
1663
1664 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1665 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1666 field and fields[1] specifies the offset register field. */
1667 bool
1668 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1669 aarch64_opnd_info *info, aarch64_insn code,
1670 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1671 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1672 {
1673 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1674 }
1675
1676 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1677 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1678 field and fields[1] specifies the offset register field. */
1679 bool
1680 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1681 aarch64_opnd_info *info, aarch64_insn code,
1682 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1683 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1684 {
1685 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1686 }
1687
1688 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1689 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1690 field and fields[1] specifies the offset register field. */
1691 bool
1692 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1693 aarch64_opnd_info *info, aarch64_insn code,
1694 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1695 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1696 {
1697 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1698 }
1699
1700 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1701 has the raw field value and that the low 8 bits decode to VALUE. */
1702 static bool
1703 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1704 {
1705 info->shifter.kind = AARCH64_MOD_LSL;
1706 info->shifter.amount = 0;
1707 if (info->imm.value & 0x100)
1708 {
1709 if (value == 0)
1710 /* Decode 0x100 as #0, LSL #8. */
1711 info->shifter.amount = 8;
1712 else
1713 value *= 256;
1714 }
1715 info->shifter.operator_present = (info->shifter.amount != 0);
1716 info->shifter.amount_present = (info->shifter.amount != 0);
1717 info->imm.value = value;
1718 return true;
1719 }
1720
1721 /* Decode an SVE ADD/SUB immediate. */
1722 bool
1723 aarch64_ext_sve_aimm (const aarch64_operand *self,
1724 aarch64_opnd_info *info, const aarch64_insn code,
1725 const aarch64_inst *inst,
1726 aarch64_operand_error *errors)
1727 {
1728 return (aarch64_ext_imm (self, info, code, inst, errors)
1729 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1730 }
1731
1732 bool
1733 aarch64_ext_sve_aligned_reglist (const aarch64_operand *self,
1734 aarch64_opnd_info *info, aarch64_insn code,
1735 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1736 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1737 {
1738 unsigned int num_regs = get_operand_specific_data (self);
1739 unsigned int val = extract_field (self->fields[0], code, 0);
1740 info->reglist.first_regno = val * num_regs;
1741 info->reglist.num_regs = num_regs;
1742 info->reglist.stride = 1;
1743 return true;
1744 }
1745
1746 /* Decode an SVE CPY/DUP immediate. */
1747 bool
1748 aarch64_ext_sve_asimm (const aarch64_operand *self,
1749 aarch64_opnd_info *info, const aarch64_insn code,
1750 const aarch64_inst *inst,
1751 aarch64_operand_error *errors)
1752 {
1753 return (aarch64_ext_imm (self, info, code, inst, errors)
1754 && decode_sve_aimm (info, (int8_t) info->imm.value));
1755 }
1756
1757 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1758 The fields array specifies which field to use. */
1759 bool
1760 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1761 aarch64_opnd_info *info, aarch64_insn code,
1762 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1763 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1764 {
1765 if (extract_field (self->fields[0], code, 0))
1766 info->imm.value = 0x3f800000;
1767 else
1768 info->imm.value = 0x3f000000;
1769 info->imm.is_fp = true;
1770 return true;
1771 }
1772
1773 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1774 The fields array specifies which field to use. */
1775 bool
1776 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1777 aarch64_opnd_info *info, aarch64_insn code,
1778 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1779 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1780 {
1781 if (extract_field (self->fields[0], code, 0))
1782 info->imm.value = 0x40000000;
1783 else
1784 info->imm.value = 0x3f000000;
1785 info->imm.is_fp = true;
1786 return true;
1787 }
1788
1789 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1790 The fields array specifies which field to use. */
1791 bool
1792 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1793 aarch64_opnd_info *info, aarch64_insn code,
1794 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1795 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1796 {
1797 if (extract_field (self->fields[0], code, 0))
1798 info->imm.value = 0x3f800000;
1799 else
1800 info->imm.value = 0x0;
1801 info->imm.is_fp = true;
1802 return true;
1803 }
1804
1805 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
1806 immediate on numerous SME instruction fields such as MOVA. */
1807 bool
1808 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
1809 aarch64_opnd_info *info, aarch64_insn code,
1810 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1811 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1812 {
1813 int fld_size = extract_field (self->fields[0], code, 0);
1814 int fld_q = extract_field (self->fields[1], code, 0);
1815 int fld_v = extract_field (self->fields[2], code, 0);
1816 int fld_rv = extract_field (self->fields[3], code, 0);
1817 int fld_zan_imm = extract_field (self->fields[4], code, 0);
1818
1819 /* Deduce qualifier encoded in size and Q fields. */
1820 if (fld_size == 0)
1821 {
1822 info->indexed_za.regno = 0;
1823 info->indexed_za.index.imm = fld_zan_imm;
1824 }
1825 else if (fld_size == 1)
1826 {
1827 info->indexed_za.regno = fld_zan_imm >> 3;
1828 info->indexed_za.index.imm = fld_zan_imm & 0x07;
1829 }
1830 else if (fld_size == 2)
1831 {
1832 info->indexed_za.regno = fld_zan_imm >> 2;
1833 info->indexed_za.index.imm = fld_zan_imm & 0x03;
1834 }
1835 else if (fld_size == 3 && fld_q == 0)
1836 {
1837 info->indexed_za.regno = fld_zan_imm >> 1;
1838 info->indexed_za.index.imm = fld_zan_imm & 0x01;
1839 }
1840 else if (fld_size == 3 && fld_q == 1)
1841 {
1842 info->indexed_za.regno = fld_zan_imm;
1843 info->indexed_za.index.imm = 0;
1844 }
1845 else
1846 return false;
1847
1848 info->indexed_za.index.regno = fld_rv + 12;
1849 info->indexed_za.v = fld_v;
1850
1851 return true;
1852 }
1853
1854 bool
1855 aarch64_ext_sme_za_hv_tiles_range (const aarch64_operand *self,
1856 aarch64_opnd_info *info, aarch64_insn code,
1857 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1858 aarch64_operand_error *errors
1859 ATTRIBUTE_UNUSED)
1860 {
1861 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1862 int range_size = get_opcode_dependent_value (inst->opcode);
1863 int fld_v = extract_field (self->fields[0], code, 0);
1864 int fld_rv = extract_field (self->fields[1], code, 0);
1865 int fld_zan_imm = extract_field (self->fields[2], code, 0);
1866 int max_value = 16 / range_size / ebytes;
1867
1868 if (max_value == 0)
1869 max_value = 1;
1870
1871 int regno = fld_zan_imm / max_value;
1872 if (regno >= ebytes)
1873 return false;
1874
1875 info->indexed_za.regno = regno;
1876 info->indexed_za.index.imm = (fld_zan_imm % max_value) * range_size;
1877 info->indexed_za.index.countm1 = range_size - 1;
1878 info->indexed_za.index.regno = fld_rv + 12;
1879 info->indexed_za.v = fld_v;
1880
1881 return true;
1882 }
1883
1884 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
1885 separated by commas, encoded in the "imm8" field.
1886
1887 For programmer convenience an assembler must also accept the names of
1888 32-bit, 16-bit and 8-bit element tiles which are converted into the
1889 corresponding set of 64-bit element tiles.
1890 */
1891 bool
1892 aarch64_ext_sme_za_list (const aarch64_operand *self,
1893 aarch64_opnd_info *info, aarch64_insn code,
1894 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1895 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1896 {
1897 int mask = extract_field (self->fields[0], code, 0);
1898 info->imm.value = mask;
1899 return true;
1900 }
1901
1902 /* Decode ZA array vector select register (Rv field), optional vector and
1903 memory offset (imm4_11 field).
1904 */
1905 bool
1906 aarch64_ext_sme_za_array (const aarch64_operand *self,
1907 aarch64_opnd_info *info, aarch64_insn code,
1908 const aarch64_inst *inst,
1909 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1910 {
1911 int regno = extract_field (self->fields[0], code, 0);
1912 if (info->type == AARCH64_OPND_SME_ZA_array_off4)
1913 regno += 12;
1914 else
1915 regno += 8;
1916 int imm = extract_field (self->fields[1], code, 0);
1917 int num_offsets = get_operand_specific_data (self);
1918 if (num_offsets == 0)
1919 num_offsets = 1;
1920 info->indexed_za.index.regno = regno;
1921 info->indexed_za.index.imm = imm * num_offsets;
1922 info->indexed_za.index.countm1 = num_offsets - 1;
1923 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
1924 return true;
1925 }
1926
1927 bool
1928 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
1929 aarch64_opnd_info *info, aarch64_insn code,
1930 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1931 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1932 {
1933 int regno = extract_field (self->fields[0], code, 0);
1934 int imm = extract_field (self->fields[1], code, 0);
1935 info->addr.base_regno = regno;
1936 info->addr.offset.imm = imm;
1937 /* MUL VL operator is always present for this operand. */
1938 info->shifter.kind = AARCH64_MOD_MUL_VL;
1939 info->shifter.operator_present = (imm != 0);
1940 return true;
1941 }
1942
1943 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions. */
1944 bool
1945 aarch64_ext_sme_sm_za (const aarch64_operand *self,
1946 aarch64_opnd_info *info, aarch64_insn code,
1947 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1948 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1949 {
1950 info->pstatefield = 0x1b;
1951 aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
1952 fld_crm >>= 1; /* CRm[3:1]. */
1953
1954 if (fld_crm == 0x1)
1955 info->reg.regno = 's';
1956 else if (fld_crm == 0x2)
1957 info->reg.regno = 'z';
1958 else
1959 return false;
1960
1961 return true;
1962 }
1963
1964 bool
1965 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
1966 aarch64_opnd_info *info, aarch64_insn code,
1967 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1968 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1969 {
1970 aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
1971 aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
1972 aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
1973 aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
1974 aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
1975 int imm;
1976
1977 info->indexed_za.regno = fld_pn;
1978 info->indexed_za.index.regno = fld_rm + 12;
1979
1980 if (fld_tszl & 0x1)
1981 imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
1982 else if (fld_tszl & 0x2)
1983 imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
1984 else if (fld_tszl & 0x4)
1985 imm = (fld_i1 << 1) | fld_tszh;
1986 else if (fld_tszh)
1987 imm = fld_i1;
1988 else
1989 return false;
1990
1991 info->indexed_za.index.imm = imm;
1992 return true;
1993 }
1994
1995 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1996 array specifies which field to use for Zn. MM is encoded in the
1997 concatenation of imm5 and SVE_tszh, with imm5 being the less
1998 significant part. */
1999 bool
2000 aarch64_ext_sve_index (const aarch64_operand *self,
2001 aarch64_opnd_info *info, aarch64_insn code,
2002 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2003 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2004 {
2005 int val;
2006
2007 info->reglane.regno = extract_field (self->fields[0], code, 0);
2008 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
2009 if ((val & 31) == 0)
2010 return 0;
2011 while ((val & 1) == 0)
2012 val /= 2;
2013 info->reglane.index = val / 2;
2014 return true;
2015 }
2016
2017 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
2018 bool
2019 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
2020 aarch64_opnd_info *info, const aarch64_insn code,
2021 const aarch64_inst *inst,
2022 aarch64_operand_error *errors)
2023 {
2024 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
2025 return (aarch64_ext_limm (self, info, code, inst, errors)
2026 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
2027 }
2028
2029 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
2030 and where MM occupies the most-significant part. The operand-dependent
2031 value specifies the number of bits in Zn. */
2032 bool
2033 aarch64_ext_sve_quad_index (const aarch64_operand *self,
2034 aarch64_opnd_info *info, aarch64_insn code,
2035 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2036 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2037 {
2038 unsigned int reg_bits = get_operand_specific_data (self);
2039 unsigned int val = extract_all_fields (self, code);
2040 info->reglane.regno = val & ((1 << reg_bits) - 1);
2041 info->reglane.index = val >> reg_bits;
2042 return true;
2043 }
2044
2045 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
2046 to use for Zn. The opcode-dependent value specifies the number
2047 of registers in the list. */
2048 bool
2049 aarch64_ext_sve_reglist (const aarch64_operand *self,
2050 aarch64_opnd_info *info, aarch64_insn code,
2051 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2052 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2053 {
2054 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2055 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2056 info->reglist.stride = 1;
2057 return true;
2058 }
2059
2060 /* Decode a strided register list. The first field holds the top bit
2061 (0 or 16) and the second field holds the lower bits. The stride is
2062 16 divided by the list length. */
2063 bool
2064 aarch64_ext_sve_strided_reglist (const aarch64_operand *self,
2065 aarch64_opnd_info *info, aarch64_insn code,
2066 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2067 aarch64_operand_error *errors
2068 ATTRIBUTE_UNUSED)
2069 {
2070 unsigned int upper = extract_field (self->fields[0], code, 0);
2071 unsigned int lower = extract_field (self->fields[1], code, 0);
2072 info->reglist.first_regno = upper * 16 + lower;
2073 info->reglist.num_regs = get_operand_specific_data (self);
2074 info->reglist.stride = 16 / info->reglist.num_regs;
2075 return true;
2076 }
2077
2078 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
2079 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
2080 field. */
2081 bool
2082 aarch64_ext_sve_scale (const aarch64_operand *self,
2083 aarch64_opnd_info *info, aarch64_insn code,
2084 const aarch64_inst *inst, aarch64_operand_error *errors)
2085 {
2086 int val;
2087
2088 if (!aarch64_ext_imm (self, info, code, inst, errors))
2089 return false;
2090 val = extract_field (FLD_SVE_imm4, code, 0);
2091 info->shifter.kind = AARCH64_MOD_MUL;
2092 info->shifter.amount = val + 1;
2093 info->shifter.operator_present = (val != 0);
2094 info->shifter.amount_present = (val != 0);
2095 return true;
2096 }
2097
2098 /* Return the top set bit in VALUE, which is expected to be relatively
2099 small. */
2100 static uint64_t
2101 get_top_bit (uint64_t value)
2102 {
2103 while ((value & -value) != value)
2104 value -= value & -value;
2105 return value;
2106 }
2107
2108 /* Decode an SVE shift-left immediate. */
2109 bool
2110 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2111 aarch64_opnd_info *info, const aarch64_insn code,
2112 const aarch64_inst *inst, aarch64_operand_error *errors)
2113 {
2114 if (!aarch64_ext_imm (self, info, code, inst, errors)
2115 || info->imm.value == 0)
2116 return false;
2117
2118 info->imm.value -= get_top_bit (info->imm.value);
2119 return true;
2120 }
2121
2122 /* Decode an SVE shift-right immediate. */
2123 bool
2124 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2125 aarch64_opnd_info *info, const aarch64_insn code,
2126 const aarch64_inst *inst, aarch64_operand_error *errors)
2127 {
2128 if (!aarch64_ext_imm (self, info, code, inst, errors)
2129 || info->imm.value == 0)
2130 return false;
2131
2132 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2133 return true;
2134 }
2135
2136 /* Decode X0-X30. Register 31 is unallocated. */
2137 bool
2138 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2139 const aarch64_insn code,
2140 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2141 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2142 {
2143 info->reg.regno = extract_field (self->fields[0], code, 0);
2144 return info->reg.regno <= 30;
2145 }
2146
2147 /* Decode an indexed register, with the first field being the register
2148 number and the remaining fields being the index. */
2149 bool
2150 aarch64_ext_simple_index (const aarch64_operand *self, aarch64_opnd_info *info,
2151 const aarch64_insn code,
2152 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2153 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2154 {
2155 int bias = get_operand_specific_data (self);
2156 info->reglane.regno = extract_field (self->fields[0], code, 0) + bias;
2157 info->reglane.index = extract_all_fields_after (self, 1, code);
2158 return true;
2159 }
2160
2161 /* Decode a plain shift-right immediate, when there is only a single
2162 element size. */
2163 bool
2164 aarch64_ext_plain_shrimm (const aarch64_operand *self, aarch64_opnd_info *info,
2165 const aarch64_insn code,
2166 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2167 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2168 {
2169 unsigned int base = 1 << get_operand_field_width (self, 0);
2170 info->imm.value = base - extract_field (self->fields[0], code, 0);
2171 return true;
2172 }
2173 \f
2174 /* Bitfields that are commonly used to encode certain operands' information
2175 may be partially used as part of the base opcode in some instructions.
2176 For example, the bit 1 of the field 'size' in
2177 FCVTXN <Vb><d>, <Va><n>
2178 is actually part of the base opcode, while only size<0> is available
2179 for encoding the register type. Another example is the AdvSIMD
2180 instruction ORR (register), in which the field 'size' is also used for
2181 the base opcode, leaving only the field 'Q' available to encode the
2182 vector register arrangement specifier '8B' or '16B'.
2183
2184 This function tries to deduce the qualifier from the value of partially
2185 constrained field(s). Given the VALUE of such a field or fields, the
2186 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2187 operand encoding), the function returns the matching qualifier or
2188 AARCH64_OPND_QLF_NIL if nothing matches.
2189
2190 N.B. CANDIDATES is a group of possible qualifiers that are valid for
2191 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2192 may end with AARCH64_OPND_QLF_NIL. */
2193
2194 static enum aarch64_opnd_qualifier
2195 get_qualifier_from_partial_encoding (aarch64_insn value,
2196 const enum aarch64_opnd_qualifier* \
2197 candidates,
2198 aarch64_insn mask)
2199 {
2200 int i;
2201 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2202 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2203 {
2204 aarch64_insn standard_value;
2205 if (candidates[i] == AARCH64_OPND_QLF_NIL)
2206 break;
2207 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2208 if ((standard_value & mask) == (value & mask))
2209 return candidates[i];
2210 }
2211 return AARCH64_OPND_QLF_NIL;
2212 }
2213
2214 /* Given a list of qualifier sequences, return all possible valid qualifiers
2215 for operand IDX in QUALIFIERS.
2216 Assume QUALIFIERS is an array whose length is large enough. */
2217
2218 static void
2219 get_operand_possible_qualifiers (int idx,
2220 const aarch64_opnd_qualifier_seq_t *list,
2221 enum aarch64_opnd_qualifier *qualifiers)
2222 {
2223 int i;
2224 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2225 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2226 break;
2227 }
2228
2229 /* Decode the size Q field for e.g. SHADD.
2230 We tag one operand with the qualifer according to the code;
2231 whether the qualifier is valid for this opcode or not, it is the
2232 duty of the semantic checking. */
2233
2234 static int
2235 decode_sizeq (aarch64_inst *inst)
2236 {
2237 int idx;
2238 enum aarch64_opnd_qualifier qualifier;
2239 aarch64_insn code;
2240 aarch64_insn value, mask;
2241 enum aarch64_field_kind fld_sz;
2242 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2243
2244 if (inst->opcode->iclass == asisdlse
2245 || inst->opcode->iclass == asisdlsep
2246 || inst->opcode->iclass == asisdlso
2247 || inst->opcode->iclass == asisdlsop)
2248 fld_sz = FLD_vldst_size;
2249 else
2250 fld_sz = FLD_size;
2251
2252 code = inst->value;
2253 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2254 /* Obtain the info that which bits of fields Q and size are actually
2255 available for operand encoding. Opcodes like FMAXNM and FMLA have
2256 size[1] unavailable. */
2257 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2258
2259 /* The index of the operand we are going to tag a qualifier and the qualifer
2260 itself are reasoned from the value of the size and Q fields and the
2261 possible valid qualifier lists. */
2262 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2263 DEBUG_TRACE ("key idx: %d", idx);
2264
2265 /* For most related instruciton, size:Q are fully available for operand
2266 encoding. */
2267 if (mask == 0x7)
2268 {
2269 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2270 return 1;
2271 }
2272
2273 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2274 candidates);
2275 #ifdef DEBUG_AARCH64
2276 if (debug_dump)
2277 {
2278 int i;
2279 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2280 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2281 DEBUG_TRACE ("qualifier %d: %s", i,
2282 aarch64_get_qualifier_name(candidates[i]));
2283 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2284 }
2285 #endif /* DEBUG_AARCH64 */
2286
2287 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2288
2289 if (qualifier == AARCH64_OPND_QLF_NIL)
2290 return 0;
2291
2292 inst->operands[idx].qualifier = qualifier;
2293 return 1;
2294 }
2295
2296 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2297 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2298
2299 static int
2300 decode_asimd_fcvt (aarch64_inst *inst)
2301 {
2302 aarch64_field field = {0, 0};
2303 aarch64_insn value;
2304 enum aarch64_opnd_qualifier qualifier;
2305
2306 gen_sub_field (FLD_size, 0, 1, &field);
2307 value = extract_field_2 (&field, inst->value, 0);
2308 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2309 : AARCH64_OPND_QLF_V_2D;
2310 switch (inst->opcode->op)
2311 {
2312 case OP_FCVTN:
2313 case OP_FCVTN2:
2314 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2315 inst->operands[1].qualifier = qualifier;
2316 break;
2317 case OP_FCVTL:
2318 case OP_FCVTL2:
2319 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2320 inst->operands[0].qualifier = qualifier;
2321 break;
2322 default:
2323 return 0;
2324 }
2325
2326 return 1;
2327 }
2328
2329 /* Decode size[0], i.e. bit 22, for
2330 e.g. FCVTXN <Vb><d>, <Va><n>. */
2331
2332 static int
2333 decode_asisd_fcvtxn (aarch64_inst *inst)
2334 {
2335 aarch64_field field = {0, 0};
2336 gen_sub_field (FLD_size, 0, 1, &field);
2337 if (!extract_field_2 (&field, inst->value, 0))
2338 return 0;
2339 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2340 return 1;
2341 }
2342
2343 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2344 static int
2345 decode_fcvt (aarch64_inst *inst)
2346 {
2347 enum aarch64_opnd_qualifier qualifier;
2348 aarch64_insn value;
2349 const aarch64_field field = {15, 2};
2350
2351 /* opc dstsize */
2352 value = extract_field_2 (&field, inst->value, 0);
2353 switch (value)
2354 {
2355 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2356 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2357 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2358 default: return 0;
2359 }
2360 inst->operands[0].qualifier = qualifier;
2361
2362 return 1;
2363 }
2364
2365 /* Do miscellaneous decodings that are not common enough to be driven by
2366 flags. */
2367
2368 static int
2369 do_misc_decoding (aarch64_inst *inst)
2370 {
2371 unsigned int value;
2372 switch (inst->opcode->op)
2373 {
2374 case OP_FCVT:
2375 return decode_fcvt (inst);
2376
2377 case OP_FCVTN:
2378 case OP_FCVTN2:
2379 case OP_FCVTL:
2380 case OP_FCVTL2:
2381 return decode_asimd_fcvt (inst);
2382
2383 case OP_FCVTXN_S:
2384 return decode_asisd_fcvtxn (inst);
2385
2386 case OP_MOV_P_P:
2387 case OP_MOVS_P_P:
2388 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2389 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2390 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2391
2392 case OP_MOV_Z_P_Z:
2393 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2394 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2395
2396 case OP_MOV_Z_V:
2397 /* Index must be zero. */
2398 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2399 return value > 0 && value <= 16 && value == (value & -value);
2400
2401 case OP_MOV_Z_Z:
2402 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2403 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2404
2405 case OP_MOV_Z_Zi:
2406 /* Index must be nonzero. */
2407 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2408 return value > 0 && value != (value & -value);
2409
2410 case OP_MOVM_P_P_P:
2411 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2412 == extract_field (FLD_SVE_Pm, inst->value, 0));
2413
2414 case OP_MOVZS_P_P_P:
2415 case OP_MOVZ_P_P_P:
2416 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2417 == extract_field (FLD_SVE_Pm, inst->value, 0));
2418
2419 case OP_NOTS_P_P_P_Z:
2420 case OP_NOT_P_P_P_Z:
2421 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2422 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2423
2424 default:
2425 return 0;
2426 }
2427 }
2428
2429 /* Opcodes that have fields shared by multiple operands are usually flagged
2430 with flags. In this function, we detect such flags, decode the related
2431 field(s) and store the information in one of the related operands. The
2432 'one' operand is not any operand but one of the operands that can
2433 accommadate all the information that has been decoded. */
2434
2435 static int
2436 do_special_decoding (aarch64_inst *inst)
2437 {
2438 int idx;
2439 aarch64_insn value;
2440 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2441 if (inst->opcode->flags & F_COND)
2442 {
2443 value = extract_field (FLD_cond2, inst->value, 0);
2444 inst->cond = get_cond_from_value (value);
2445 }
2446 /* 'sf' field. */
2447 if (inst->opcode->flags & F_SF)
2448 {
2449 idx = select_operand_for_sf_field_coding (inst->opcode);
2450 value = extract_field (FLD_sf, inst->value, 0);
2451 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2452 if ((inst->opcode->flags & F_N)
2453 && extract_field (FLD_N, inst->value, 0) != value)
2454 return 0;
2455 }
2456 /* 'sf' field. */
2457 if (inst->opcode->flags & F_LSE_SZ)
2458 {
2459 idx = select_operand_for_sf_field_coding (inst->opcode);
2460 value = extract_field (FLD_lse_sz, inst->value, 0);
2461 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2462 }
2463 /* size:Q fields. */
2464 if (inst->opcode->flags & F_SIZEQ)
2465 return decode_sizeq (inst);
2466
2467 if (inst->opcode->flags & F_FPTYPE)
2468 {
2469 idx = select_operand_for_fptype_field_coding (inst->opcode);
2470 value = extract_field (FLD_type, inst->value, 0);
2471 switch (value)
2472 {
2473 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2474 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2475 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2476 default: return 0;
2477 }
2478 }
2479
2480 if (inst->opcode->flags & F_SSIZE)
2481 {
2482 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2483 of the base opcode. */
2484 aarch64_insn mask;
2485 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2486 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2487 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2488 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2489 /* For most related instruciton, the 'size' field is fully available for
2490 operand encoding. */
2491 if (mask == 0x3)
2492 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2493 else
2494 {
2495 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2496 candidates);
2497 inst->operands[idx].qualifier
2498 = get_qualifier_from_partial_encoding (value, candidates, mask);
2499 }
2500 }
2501
2502 if (inst->opcode->flags & F_T)
2503 {
2504 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2505 int num = 0;
2506 unsigned val, Q;
2507 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2508 == AARCH64_OPND_CLASS_SIMD_REG);
2509 /* imm5<3:0> q <t>
2510 0000 x reserved
2511 xxx1 0 8b
2512 xxx1 1 16b
2513 xx10 0 4h
2514 xx10 1 8h
2515 x100 0 2s
2516 x100 1 4s
2517 1000 0 reserved
2518 1000 1 2d */
2519 val = extract_field (FLD_imm5, inst->value, 0);
2520 while ((val & 0x1) == 0 && ++num <= 3)
2521 val >>= 1;
2522 if (num > 3)
2523 return 0;
2524 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2525 inst->operands[0].qualifier =
2526 get_vreg_qualifier_from_value ((num << 1) | Q);
2527 }
2528
2529 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2530 {
2531 /* Use Rt to encode in the case of e.g.
2532 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2533 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2534 if (idx == -1)
2535 {
2536 /* Otherwise use the result operand, which has to be a integer
2537 register. */
2538 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2539 == AARCH64_OPND_CLASS_INT_REG);
2540 idx = 0;
2541 }
2542 assert (idx == 0 || idx == 1);
2543 value = extract_field (FLD_Q, inst->value, 0);
2544 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2545 }
2546
2547 if (inst->opcode->flags & F_LDS_SIZE)
2548 {
2549 aarch64_field field = {0, 0};
2550 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2551 == AARCH64_OPND_CLASS_INT_REG);
2552 gen_sub_field (FLD_opc, 0, 1, &field);
2553 value = extract_field_2 (&field, inst->value, 0);
2554 inst->operands[0].qualifier
2555 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2556 }
2557
2558 /* Miscellaneous decoding; done as the last step. */
2559 if (inst->opcode->flags & F_MISC)
2560 return do_misc_decoding (inst);
2561
2562 return 1;
2563 }
2564
2565 /* Converters converting a real opcode instruction to its alias form. */
2566
2567 /* ROR <Wd>, <Ws>, #<shift>
2568 is equivalent to:
2569 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2570 static int
2571 convert_extr_to_ror (aarch64_inst *inst)
2572 {
2573 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2574 {
2575 copy_operand_info (inst, 2, 3);
2576 inst->operands[3].type = AARCH64_OPND_NIL;
2577 return 1;
2578 }
2579 return 0;
2580 }
2581
2582 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2583 is equivalent to:
2584 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2585 static int
2586 convert_shll_to_xtl (aarch64_inst *inst)
2587 {
2588 if (inst->operands[2].imm.value == 0)
2589 {
2590 inst->operands[2].type = AARCH64_OPND_NIL;
2591 return 1;
2592 }
2593 return 0;
2594 }
2595
2596 /* Convert
2597 UBFM <Xd>, <Xn>, #<shift>, #63.
2598 to
2599 LSR <Xd>, <Xn>, #<shift>. */
2600 static int
2601 convert_bfm_to_sr (aarch64_inst *inst)
2602 {
2603 int64_t imms, val;
2604
2605 imms = inst->operands[3].imm.value;
2606 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2607 if (imms == val)
2608 {
2609 inst->operands[3].type = AARCH64_OPND_NIL;
2610 return 1;
2611 }
2612
2613 return 0;
2614 }
2615
2616 /* Convert MOV to ORR. */
2617 static int
2618 convert_orr_to_mov (aarch64_inst *inst)
2619 {
2620 /* MOV <Vd>.<T>, <Vn>.<T>
2621 is equivalent to:
2622 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2623 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2624 {
2625 inst->operands[2].type = AARCH64_OPND_NIL;
2626 return 1;
2627 }
2628 return 0;
2629 }
2630
2631 /* When <imms> >= <immr>, the instruction written:
2632 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2633 is equivalent to:
2634 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2635
2636 static int
2637 convert_bfm_to_bfx (aarch64_inst *inst)
2638 {
2639 int64_t immr, imms;
2640
2641 immr = inst->operands[2].imm.value;
2642 imms = inst->operands[3].imm.value;
2643 if (imms >= immr)
2644 {
2645 int64_t lsb = immr;
2646 inst->operands[2].imm.value = lsb;
2647 inst->operands[3].imm.value = imms + 1 - lsb;
2648 /* The two opcodes have different qualifiers for
2649 the immediate operands; reset to help the checking. */
2650 reset_operand_qualifier (inst, 2);
2651 reset_operand_qualifier (inst, 3);
2652 return 1;
2653 }
2654
2655 return 0;
2656 }
2657
2658 /* When <imms> < <immr>, the instruction written:
2659 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2660 is equivalent to:
2661 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2662
2663 static int
2664 convert_bfm_to_bfi (aarch64_inst *inst)
2665 {
2666 int64_t immr, imms, val;
2667
2668 immr = inst->operands[2].imm.value;
2669 imms = inst->operands[3].imm.value;
2670 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2671 if (imms < immr)
2672 {
2673 inst->operands[2].imm.value = (val - immr) & (val - 1);
2674 inst->operands[3].imm.value = imms + 1;
2675 /* The two opcodes have different qualifiers for
2676 the immediate operands; reset to help the checking. */
2677 reset_operand_qualifier (inst, 2);
2678 reset_operand_qualifier (inst, 3);
2679 return 1;
2680 }
2681
2682 return 0;
2683 }
2684
2685 /* The instruction written:
2686 BFC <Xd>, #<lsb>, #<width>
2687 is equivalent to:
2688 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2689
2690 static int
2691 convert_bfm_to_bfc (aarch64_inst *inst)
2692 {
2693 int64_t immr, imms, val;
2694
2695 /* Should have been assured by the base opcode value. */
2696 assert (inst->operands[1].reg.regno == 0x1f);
2697
2698 immr = inst->operands[2].imm.value;
2699 imms = inst->operands[3].imm.value;
2700 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2701 if (imms < immr)
2702 {
2703 /* Drop XZR from the second operand. */
2704 copy_operand_info (inst, 1, 2);
2705 copy_operand_info (inst, 2, 3);
2706 inst->operands[3].type = AARCH64_OPND_NIL;
2707
2708 /* Recalculate the immediates. */
2709 inst->operands[1].imm.value = (val - immr) & (val - 1);
2710 inst->operands[2].imm.value = imms + 1;
2711
2712 /* The two opcodes have different qualifiers for the operands; reset to
2713 help the checking. */
2714 reset_operand_qualifier (inst, 1);
2715 reset_operand_qualifier (inst, 2);
2716 reset_operand_qualifier (inst, 3);
2717
2718 return 1;
2719 }
2720
2721 return 0;
2722 }
2723
2724 /* The instruction written:
2725 LSL <Xd>, <Xn>, #<shift>
2726 is equivalent to:
2727 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2728
2729 static int
2730 convert_ubfm_to_lsl (aarch64_inst *inst)
2731 {
2732 int64_t immr = inst->operands[2].imm.value;
2733 int64_t imms = inst->operands[3].imm.value;
2734 int64_t val
2735 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2736
2737 if ((immr == 0 && imms == val) || immr == imms + 1)
2738 {
2739 inst->operands[3].type = AARCH64_OPND_NIL;
2740 inst->operands[2].imm.value = val - imms;
2741 return 1;
2742 }
2743
2744 return 0;
2745 }
2746
2747 /* CINC <Wd>, <Wn>, <cond>
2748 is equivalent to:
2749 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2750 where <cond> is not AL or NV. */
2751
2752 static int
2753 convert_from_csel (aarch64_inst *inst)
2754 {
2755 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2756 && (inst->operands[3].cond->value & 0xe) != 0xe)
2757 {
2758 copy_operand_info (inst, 2, 3);
2759 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2760 inst->operands[3].type = AARCH64_OPND_NIL;
2761 return 1;
2762 }
2763 return 0;
2764 }
2765
2766 /* CSET <Wd>, <cond>
2767 is equivalent to:
2768 CSINC <Wd>, WZR, WZR, invert(<cond>)
2769 where <cond> is not AL or NV. */
2770
2771 static int
2772 convert_csinc_to_cset (aarch64_inst *inst)
2773 {
2774 if (inst->operands[1].reg.regno == 0x1f
2775 && inst->operands[2].reg.regno == 0x1f
2776 && (inst->operands[3].cond->value & 0xe) != 0xe)
2777 {
2778 copy_operand_info (inst, 1, 3);
2779 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2780 inst->operands[3].type = AARCH64_OPND_NIL;
2781 inst->operands[2].type = AARCH64_OPND_NIL;
2782 return 1;
2783 }
2784 return 0;
2785 }
2786
2787 /* MOV <Wd>, #<imm>
2788 is equivalent to:
2789 MOVZ <Wd>, #<imm16_5>, LSL #<shift>.
2790
2791 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2792 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2793 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2794 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2795 machine-instruction mnemonic must be used. */
2796
2797 static int
2798 convert_movewide_to_mov (aarch64_inst *inst)
2799 {
2800 uint64_t value = inst->operands[1].imm.value;
2801 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2802 if (value == 0 && inst->operands[1].shifter.amount != 0)
2803 return 0;
2804 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2805 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2806 value <<= inst->operands[1].shifter.amount;
2807 /* As an alias convertor, it has to be clear that the INST->OPCODE
2808 is the opcode of the real instruction. */
2809 if (inst->opcode->op == OP_MOVN)
2810 {
2811 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2812 value = ~value;
2813 /* A MOVN has an immediate that could be encoded by MOVZ. */
2814 if (aarch64_wide_constant_p (value, is32, NULL))
2815 return 0;
2816 }
2817 inst->operands[1].imm.value = value;
2818 inst->operands[1].shifter.amount = 0;
2819 return 1;
2820 }
2821
2822 /* MOV <Wd>, #<imm>
2823 is equivalent to:
2824 ORR <Wd>, WZR, #<imm>.
2825
2826 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2827 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2828 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2829 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2830 machine-instruction mnemonic must be used. */
2831
2832 static int
2833 convert_movebitmask_to_mov (aarch64_inst *inst)
2834 {
2835 int is32;
2836 uint64_t value;
2837
2838 /* Should have been assured by the base opcode value. */
2839 assert (inst->operands[1].reg.regno == 0x1f);
2840 copy_operand_info (inst, 1, 2);
2841 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2842 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2843 value = inst->operands[1].imm.value;
2844 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2845 instruction. */
2846 if (inst->operands[0].reg.regno != 0x1f
2847 && (aarch64_wide_constant_p (value, is32, NULL)
2848 || aarch64_wide_constant_p (~value, is32, NULL)))
2849 return 0;
2850
2851 inst->operands[2].type = AARCH64_OPND_NIL;
2852 return 1;
2853 }
2854
2855 /* Some alias opcodes are disassembled by being converted from their real-form.
2856 N.B. INST->OPCODE is the real opcode rather than the alias. */
2857
2858 static int
2859 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2860 {
2861 switch (alias->op)
2862 {
2863 case OP_ASR_IMM:
2864 case OP_LSR_IMM:
2865 return convert_bfm_to_sr (inst);
2866 case OP_LSL_IMM:
2867 return convert_ubfm_to_lsl (inst);
2868 case OP_CINC:
2869 case OP_CINV:
2870 case OP_CNEG:
2871 return convert_from_csel (inst);
2872 case OP_CSET:
2873 case OP_CSETM:
2874 return convert_csinc_to_cset (inst);
2875 case OP_UBFX:
2876 case OP_BFXIL:
2877 case OP_SBFX:
2878 return convert_bfm_to_bfx (inst);
2879 case OP_SBFIZ:
2880 case OP_BFI:
2881 case OP_UBFIZ:
2882 return convert_bfm_to_bfi (inst);
2883 case OP_BFC:
2884 return convert_bfm_to_bfc (inst);
2885 case OP_MOV_V:
2886 return convert_orr_to_mov (inst);
2887 case OP_MOV_IMM_WIDE:
2888 case OP_MOV_IMM_WIDEN:
2889 return convert_movewide_to_mov (inst);
2890 case OP_MOV_IMM_LOG:
2891 return convert_movebitmask_to_mov (inst);
2892 case OP_ROR_IMM:
2893 return convert_extr_to_ror (inst);
2894 case OP_SXTL:
2895 case OP_SXTL2:
2896 case OP_UXTL:
2897 case OP_UXTL2:
2898 return convert_shll_to_xtl (inst);
2899 default:
2900 return 0;
2901 }
2902 }
2903
2904 static bool
2905 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2906 aarch64_inst *, int, aarch64_operand_error *errors);
2907
2908 /* Given the instruction information in *INST, check if the instruction has
2909 any alias form that can be used to represent *INST. If the answer is yes,
2910 update *INST to be in the form of the determined alias. */
2911
2912 /* In the opcode description table, the following flags are used in opcode
2913 entries to help establish the relations between the real and alias opcodes:
2914
2915 F_ALIAS: opcode is an alias
2916 F_HAS_ALIAS: opcode has alias(es)
2917 F_P1
2918 F_P2
2919 F_P3: Disassembly preference priority 1-3 (the larger the
2920 higher). If nothing is specified, it is the priority
2921 0 by default, i.e. the lowest priority.
2922
2923 Although the relation between the machine and the alias instructions are not
2924 explicitly described, it can be easily determined from the base opcode
2925 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2926 description entries:
2927
2928 The mask of an alias opcode must be equal to or a super-set (i.e. more
2929 constrained) of that of the aliased opcode; so is the base opcode value.
2930
2931 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2932 && (opcode->mask & real->mask) == real->mask
2933 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2934 then OPCODE is an alias of, and only of, the REAL instruction
2935
2936 The alias relationship is forced flat-structured to keep related algorithm
2937 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2938
2939 During the disassembling, the decoding decision tree (in
2940 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2941 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2942 not specified), the disassembler will check whether there is any alias
2943 instruction exists for this real instruction. If there is, the disassembler
2944 will try to disassemble the 32-bit binary again using the alias's rule, or
2945 try to convert the IR to the form of the alias. In the case of the multiple
2946 aliases, the aliases are tried one by one from the highest priority
2947 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2948 first succeeds first adopted.
2949
2950 You may ask why there is a need for the conversion of IR from one form to
2951 another in handling certain aliases. This is because on one hand it avoids
2952 adding more operand code to handle unusual encoding/decoding; on other
2953 hand, during the disassembling, the conversion is an effective approach to
2954 check the condition of an alias (as an alias may be adopted only if certain
2955 conditions are met).
2956
2957 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2958 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2959 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2960
2961 static void
2962 determine_disassembling_preference (struct aarch64_inst *inst,
2963 aarch64_operand_error *errors)
2964 {
2965 const aarch64_opcode *opcode;
2966 const aarch64_opcode *alias;
2967
2968 opcode = inst->opcode;
2969
2970 /* This opcode does not have an alias, so use itself. */
2971 if (!opcode_has_alias (opcode))
2972 return;
2973
2974 alias = aarch64_find_alias_opcode (opcode);
2975 assert (alias);
2976
2977 #ifdef DEBUG_AARCH64
2978 if (debug_dump)
2979 {
2980 const aarch64_opcode *tmp = alias;
2981 printf ("#### LIST orderd: ");
2982 while (tmp)
2983 {
2984 printf ("%s, ", tmp->name);
2985 tmp = aarch64_find_next_alias_opcode (tmp);
2986 }
2987 printf ("\n");
2988 }
2989 #endif /* DEBUG_AARCH64 */
2990
2991 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2992 {
2993 DEBUG_TRACE ("try %s", alias->name);
2994 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2995
2996 /* An alias can be a pseudo opcode which will never be used in the
2997 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2998 aliasing AND. */
2999 if (pseudo_opcode_p (alias))
3000 {
3001 DEBUG_TRACE ("skip pseudo %s", alias->name);
3002 continue;
3003 }
3004
3005 if ((inst->value & alias->mask) != alias->opcode)
3006 {
3007 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
3008 continue;
3009 }
3010
3011 if (!AARCH64_CPU_HAS_ALL_FEATURES (arch_variant, *alias->avariant))
3012 {
3013 DEBUG_TRACE ("skip %s: we're missing features", alias->name);
3014 continue;
3015 }
3016
3017 /* No need to do any complicated transformation on operands, if the alias
3018 opcode does not have any operand. */
3019 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
3020 {
3021 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
3022 aarch64_replace_opcode (inst, alias);
3023 return;
3024 }
3025 if (alias->flags & F_CONV)
3026 {
3027 aarch64_inst copy;
3028 memcpy (&copy, inst, sizeof (aarch64_inst));
3029 /* ALIAS is the preference as long as the instruction can be
3030 successfully converted to the form of ALIAS. */
3031 if (convert_to_alias (&copy, alias) == 1)
3032 {
3033 aarch64_replace_opcode (&copy, alias);
3034 if (aarch64_match_operands_constraint (&copy, NULL) != 1)
3035 {
3036 DEBUG_TRACE ("FAILED with alias %s ", alias->name);
3037 }
3038 else
3039 {
3040 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
3041 memcpy (inst, &copy, sizeof (aarch64_inst));
3042 }
3043 return;
3044 }
3045 }
3046 else
3047 {
3048 /* Directly decode the alias opcode. */
3049 aarch64_inst temp;
3050 memset (&temp, '\0', sizeof (aarch64_inst));
3051 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
3052 {
3053 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
3054 memcpy (inst, &temp, sizeof (aarch64_inst));
3055 return;
3056 }
3057 }
3058 }
3059 }
3060
3061 /* Some instructions (including all SVE ones) use the instruction class
3062 to describe how a qualifiers_list index is represented in the instruction
3063 encoding. If INST is such an instruction, decode the appropriate fields
3064 and fill in the operand qualifiers accordingly. Return true if no
3065 problems are found. */
3066
3067 static bool
3068 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
3069 {
3070 int i, variant;
3071
3072 variant = 0;
3073 switch (inst->opcode->iclass)
3074 {
3075 case sme_mov:
3076 variant = extract_fields (inst->value, 0, 2, FLD_SME_Q, FLD_SME_size_22);
3077 if (variant >= 4 && variant < 7)
3078 return false;
3079 if (variant == 7)
3080 variant = 4;
3081 break;
3082
3083 case sme_psel:
3084 i = extract_fields (inst->value, 0, 2, FLD_SME_tszh, FLD_SME_tszl);
3085 if (i == 0)
3086 return false;
3087 while ((i & 1) == 0)
3088 {
3089 i >>= 1;
3090 variant += 1;
3091 }
3092 break;
3093
3094 case sme_shift:
3095 i = extract_field (FLD_SVE_tszh, inst->value, 0);
3096 goto sve_shift;
3097
3098 case sme_size_12_bhs:
3099 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3100 if (variant >= 3)
3101 return false;
3102 break;
3103
3104 case sme_size_12_hs:
3105 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3106 if (variant != 1 && variant != 2)
3107 return false;
3108 variant -= 1;
3109 break;
3110
3111 case sme_size_22:
3112 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3113 break;
3114
3115 case sme_size_22_hsd:
3116 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3117 if (variant < 1)
3118 return false;
3119 variant -= 1;
3120 break;
3121
3122 case sme_sz_23:
3123 variant = extract_field (FLD_SME_sz_23, inst->value, 0);
3124 break;
3125
3126 case sve_cpy:
3127 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
3128 break;
3129
3130 case sve_index:
3131 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
3132 if ((i & 31) == 0)
3133 return false;
3134 while ((i & 1) == 0)
3135 {
3136 i >>= 1;
3137 variant += 1;
3138 }
3139 break;
3140
3141 case sve_limm:
3142 /* Pick the smallest applicable element size. */
3143 if ((inst->value & 0x20600) == 0x600)
3144 variant = 0;
3145 else if ((inst->value & 0x20400) == 0x400)
3146 variant = 1;
3147 else if ((inst->value & 0x20000) == 0)
3148 variant = 2;
3149 else
3150 variant = 3;
3151 break;
3152
3153 case sme2_mov:
3154 /* .D is preferred over the other sizes in disassembly. */
3155 variant = 3;
3156 break;
3157
3158 case sme_misc:
3159 case sve_misc:
3160 /* These instructions have only a single variant. */
3161 break;
3162
3163 case sve_movprfx:
3164 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3165 break;
3166
3167 case sve_pred_zm:
3168 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3169 break;
3170
3171 case sve_shift_pred:
3172 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3173 sve_shift:
3174 if (i == 0)
3175 return false;
3176 while (i != 1)
3177 {
3178 i >>= 1;
3179 variant += 1;
3180 }
3181 break;
3182
3183 case sve_shift_unpred:
3184 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3185 goto sve_shift;
3186
3187 case sve_size_bhs:
3188 variant = extract_field (FLD_size, inst->value, 0);
3189 if (variant >= 3)
3190 return false;
3191 break;
3192
3193 case sve_size_bhsd:
3194 variant = extract_field (FLD_size, inst->value, 0);
3195 break;
3196
3197 case sve_size_hsd:
3198 i = extract_field (FLD_size, inst->value, 0);
3199 if (i < 1)
3200 return false;
3201 variant = i - 1;
3202 break;
3203
3204 case sme_fp_sd:
3205 case sme_int_sd:
3206 case sve_size_bh:
3207 case sve_size_sd:
3208 variant = extract_field (FLD_SVE_sz, inst->value, 0);
3209 break;
3210
3211 case sve_size_sd2:
3212 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3213 break;
3214
3215 case sve_size_hsd2:
3216 i = extract_field (FLD_SVE_size, inst->value, 0);
3217 if (i < 1)
3218 return false;
3219 variant = i - 1;
3220 break;
3221
3222 case sve_size_13:
3223 /* Ignore low bit of this field since that is set in the opcode for
3224 instructions of this iclass. */
3225 i = (extract_field (FLD_size, inst->value, 0) & 2);
3226 variant = (i >> 1);
3227 break;
3228
3229 case sve_shift_tsz_bhsd:
3230 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3231 if (i == 0)
3232 return false;
3233 while (i != 1)
3234 {
3235 i >>= 1;
3236 variant += 1;
3237 }
3238 break;
3239
3240 case sve_size_tsz_bhs:
3241 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3242 if (i == 0)
3243 return false;
3244 while (i != 1)
3245 {
3246 if (i & 1)
3247 return false;
3248 i >>= 1;
3249 variant += 1;
3250 }
3251 break;
3252
3253 case sve_shift_tsz_hsd:
3254 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3255 if (i == 0)
3256 return false;
3257 while (i != 1)
3258 {
3259 i >>= 1;
3260 variant += 1;
3261 }
3262 break;
3263
3264 default:
3265 /* No mapping between instruction class and qualifiers. */
3266 return true;
3267 }
3268
3269 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3270 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3271 return true;
3272 }
3273 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
3274 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3275 return 1.
3276
3277 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3278 determined and used to disassemble CODE; this is done just before the
3279 return. */
3280
3281 static bool
3282 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3283 aarch64_inst *inst, int noaliases_p,
3284 aarch64_operand_error *errors)
3285 {
3286 int i;
3287
3288 DEBUG_TRACE ("enter with %s", opcode->name);
3289
3290 assert (opcode && inst);
3291
3292 /* Clear inst. */
3293 memset (inst, '\0', sizeof (aarch64_inst));
3294
3295 /* Check the base opcode. */
3296 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3297 {
3298 DEBUG_TRACE ("base opcode match FAIL");
3299 goto decode_fail;
3300 }
3301
3302 inst->opcode = opcode;
3303 inst->value = code;
3304
3305 /* Assign operand codes and indexes. */
3306 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3307 {
3308 if (opcode->operands[i] == AARCH64_OPND_NIL)
3309 break;
3310 inst->operands[i].type = opcode->operands[i];
3311 inst->operands[i].idx = i;
3312 }
3313
3314 /* Call the opcode decoder indicated by flags. */
3315 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3316 {
3317 DEBUG_TRACE ("opcode flag-based decoder FAIL");
3318 goto decode_fail;
3319 }
3320
3321 /* Possibly use the instruction class to determine the correct
3322 qualifier. */
3323 if (!aarch64_decode_variant_using_iclass (inst))
3324 {
3325 DEBUG_TRACE ("iclass-based decoder FAIL");
3326 goto decode_fail;
3327 }
3328
3329 /* Call operand decoders. */
3330 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3331 {
3332 const aarch64_operand *opnd;
3333 enum aarch64_opnd type;
3334
3335 type = opcode->operands[i];
3336 if (type == AARCH64_OPND_NIL)
3337 break;
3338 opnd = &aarch64_operands[type];
3339 if (operand_has_extractor (opnd)
3340 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3341 errors)))
3342 {
3343 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3344 goto decode_fail;
3345 }
3346 }
3347
3348 /* If the opcode has a verifier, then check it now. */
3349 if (opcode->verifier
3350 && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3351 {
3352 DEBUG_TRACE ("operand verifier FAIL");
3353 goto decode_fail;
3354 }
3355
3356 /* Match the qualifiers. */
3357 if (aarch64_match_operands_constraint (inst, NULL) == 1)
3358 {
3359 /* Arriving here, the CODE has been determined as a valid instruction
3360 of OPCODE and *INST has been filled with information of this OPCODE
3361 instruction. Before the return, check if the instruction has any
3362 alias and should be disassembled in the form of its alias instead.
3363 If the answer is yes, *INST will be updated. */
3364 if (!noaliases_p)
3365 determine_disassembling_preference (inst, errors);
3366 DEBUG_TRACE ("SUCCESS");
3367 return true;
3368 }
3369 else
3370 {
3371 DEBUG_TRACE ("constraint matching FAIL");
3372 }
3373
3374 decode_fail:
3375 return false;
3376 }
3377 \f
3378 /* This does some user-friendly fix-up to *INST. It is currently focus on
3379 the adjustment of qualifiers to help the printed instruction
3380 recognized/understood more easily. */
3381
3382 static void
3383 user_friendly_fixup (aarch64_inst *inst)
3384 {
3385 switch (inst->opcode->iclass)
3386 {
3387 case testbranch:
3388 /* TBNZ Xn|Wn, #uimm6, label
3389 Test and Branch Not Zero: conditionally jumps to label if bit number
3390 uimm6 in register Xn is not zero. The bit number implies the width of
3391 the register, which may be written and should be disassembled as Wn if
3392 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3393 */
3394 if (inst->operands[1].imm.value < 32)
3395 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3396 break;
3397 default: break;
3398 }
3399 }
3400
3401 /* Decode INSN and fill in *INST the instruction information. An alias
3402 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3403 success. */
3404
3405 enum err_type
3406 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3407 bool noaliases_p,
3408 aarch64_operand_error *errors)
3409 {
3410 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3411
3412 #ifdef DEBUG_AARCH64
3413 if (debug_dump)
3414 {
3415 const aarch64_opcode *tmp = opcode;
3416 printf ("\n");
3417 DEBUG_TRACE ("opcode lookup:");
3418 while (tmp != NULL)
3419 {
3420 aarch64_verbose (" %s", tmp->name);
3421 tmp = aarch64_find_next_opcode (tmp);
3422 }
3423 }
3424 #endif /* DEBUG_AARCH64 */
3425
3426 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3427 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3428 opcode field and value, apart from the difference that one of them has an
3429 extra field as part of the opcode, but such a field is used for operand
3430 encoding in other opcode(s) ('immh' in the case of the example). */
3431 while (opcode != NULL)
3432 {
3433 /* But only one opcode can be decoded successfully for, as the
3434 decoding routine will check the constraint carefully. */
3435 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3436 return ERR_OK;
3437 opcode = aarch64_find_next_opcode (opcode);
3438 }
3439
3440 return ERR_UND;
3441 }
3442
3443 /* Return a short string to indicate a switch to STYLE. These strings
3444 will be embedded into the disassembled operand text (as produced by
3445 aarch64_print_operand), and then spotted in the print_operands function
3446 so that the disassembler output can be split by style. */
3447
3448 static const char *
3449 get_style_text (enum disassembler_style style)
3450 {
3451 static bool init = false;
3452 static char formats[16][4];
3453 unsigned num;
3454
3455 /* First time through we build a string for every possible format. This
3456 code relies on there being no more than 16 different styles (there's
3457 an assert below for this). */
3458 if (!init)
3459 {
3460 int i;
3461
3462 for (i = 0; i <= 0xf; ++i)
3463 {
3464 int res = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3465 STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3466 assert (res == 3);
3467 }
3468
3469 init = true;
3470 }
3471
3472 /* Return the string that marks switching to STYLE. */
3473 num = (unsigned) style;
3474 assert (style <= 0xf);
3475 return formats[num];
3476 }
3477
3478 /* Callback used by aarch64_print_operand to apply STYLE to the
3479 disassembler output created from FMT and ARGS. The STYLER object holds
3480 any required state. Must return a pointer to a string (created from FMT
3481 and ARGS) that will continue to be valid until the complete disassembled
3482 instruction has been printed.
3483
3484 We return a string that includes two embedded style markers, the first,
3485 places at the start of the string, indicates a switch to STYLE, and the
3486 second, placed at the end of the string, indicates a switch back to the
3487 default text style.
3488
3489 Later, when we print the operand text we take care to collapse any
3490 adjacent style markers, and to ignore any style markers that appear at
3491 the very end of a complete operand string. */
3492
3493 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3494 enum disassembler_style style,
3495 const char *fmt,
3496 va_list args)
3497 {
3498 int res;
3499 char *ptr, *tmp;
3500 struct obstack *stack = (struct obstack *) styler->state;
3501 va_list ap;
3502
3503 /* These are the two strings for switching styles. */
3504 const char *style_on = get_style_text (style);
3505 const char *style_off = get_style_text (dis_style_text);
3506
3507 /* Calculate space needed once FMT and ARGS are expanded. */
3508 va_copy (ap, args);
3509 res = vsnprintf (NULL, 0, fmt, ap);
3510 va_end (ap);
3511 assert (res >= 0);
3512
3513 /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3514 as the two strings for switching styles, then write all of these
3515 strings onto the obstack. */
3516 ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3517 + strlen (style_off) + 1);
3518 tmp = stpcpy (ptr, style_on);
3519 res = vsnprintf (tmp, (res + 1), fmt, args);
3520 assert (res >= 0);
3521 tmp += res;
3522 strcpy (tmp, style_off);
3523
3524 return ptr;
3525 }
3526
3527 /* Print operands. */
3528
3529 static void
3530 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3531 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3532 bool *has_notes)
3533 {
3534 char *notes = NULL;
3535 int i, pcrel_p, num_printed;
3536 struct aarch64_styler styler;
3537 struct obstack content;
3538 obstack_init (&content);
3539
3540 styler.apply_style = aarch64_apply_style;
3541 styler.state = (void *) &content;
3542
3543 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3544 {
3545 char str[128];
3546 char cmt[128];
3547
3548 /* We regard the opcode operand info more, however we also look into
3549 the inst->operands to support the disassembling of the optional
3550 operand.
3551 The two operand code should be the same in all cases, apart from
3552 when the operand can be optional. */
3553 if (opcode->operands[i] == AARCH64_OPND_NIL
3554 || opnds[i].type == AARCH64_OPND_NIL)
3555 break;
3556
3557 /* Generate the operand string in STR. */
3558 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3559 &info->target, &notes, cmt, sizeof (cmt),
3560 arch_variant, &styler);
3561
3562 /* Print the delimiter (taking account of omitted operand(s)). */
3563 if (str[0] != '\0')
3564 (*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3565 num_printed++ == 0 ? "\t" : ", ");
3566
3567 /* Print the operand. */
3568 if (pcrel_p)
3569 (*info->print_address_func) (info->target, info);
3570 else
3571 {
3572 /* This operand came from aarch64_print_operand, and will include
3573 embedded strings indicating which style each character should
3574 have. In the following code we split the text based on
3575 CURR_STYLE, and call the styled print callback to print each
3576 block of text in the appropriate style. */
3577 char *start, *curr;
3578 enum disassembler_style curr_style = dis_style_text;
3579
3580 start = curr = str;
3581 do
3582 {
3583 if (*curr == '\0'
3584 || (*curr == STYLE_MARKER_CHAR
3585 && ISXDIGIT (*(curr + 1))
3586 && *(curr + 2) == STYLE_MARKER_CHAR))
3587 {
3588 /* Output content between our START position and CURR. */
3589 int len = curr - start;
3590 if (len > 0)
3591 {
3592 if ((*info->fprintf_styled_func) (info->stream,
3593 curr_style,
3594 "%.*s",
3595 len, start) < 0)
3596 break;
3597 }
3598
3599 if (*curr == '\0')
3600 break;
3601
3602 /* Skip over the initial STYLE_MARKER_CHAR. */
3603 ++curr;
3604
3605 /* Update the CURR_STYLE. As there are less than 16
3606 styles, it is possible, that if the input is corrupted
3607 in some way, that we might set CURR_STYLE to an
3608 invalid value. Don't worry though, we check for this
3609 situation. */
3610 if (*curr >= '0' && *curr <= '9')
3611 curr_style = (enum disassembler_style) (*curr - '0');
3612 else if (*curr >= 'a' && *curr <= 'f')
3613 curr_style = (enum disassembler_style) (*curr - 'a' + 10);
3614 else
3615 curr_style = dis_style_text;
3616
3617 /* Check for an invalid style having been selected. This
3618 should never happen, but it doesn't hurt to be a
3619 little paranoid. */
3620 if (curr_style > dis_style_comment_start)
3621 curr_style = dis_style_text;
3622
3623 /* Skip the hex character, and the closing STYLE_MARKER_CHAR. */
3624 curr += 2;
3625
3626 /* Reset the START to after the style marker. */
3627 start = curr;
3628 }
3629 else
3630 ++curr;
3631 }
3632 while (true);
3633 }
3634
3635 /* Print the comment. This works because only the last operand ever
3636 adds a comment. If that ever changes then we'll need to be
3637 smarter here. */
3638 if (cmt[0] != '\0')
3639 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3640 "\t// %s", cmt);
3641 }
3642
3643 if (notes && !no_notes)
3644 {
3645 *has_notes = true;
3646 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3647 " // note: %s", notes);
3648 }
3649
3650 obstack_free (&content, NULL);
3651 }
3652
3653 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3654
3655 static void
3656 remove_dot_suffix (char *name, const aarch64_inst *inst)
3657 {
3658 char *ptr;
3659 size_t len;
3660
3661 ptr = strchr (inst->opcode->name, '.');
3662 assert (ptr && inst->cond);
3663 len = ptr - inst->opcode->name;
3664 assert (len < 8);
3665 strncpy (name, inst->opcode->name, len);
3666 name[len] = '\0';
3667 }
3668
3669 /* Print the instruction mnemonic name. */
3670
3671 static void
3672 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3673 {
3674 if (inst->opcode->flags & F_COND)
3675 {
3676 /* For instructions that are truly conditionally executed, e.g. b.cond,
3677 prepare the full mnemonic name with the corresponding condition
3678 suffix. */
3679 char name[8];
3680
3681 remove_dot_suffix (name, inst);
3682 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3683 "%s.%s", name, inst->cond->names[0]);
3684 }
3685 else
3686 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
3687 "%s", inst->opcode->name);
3688 }
3689
3690 /* Decide whether we need to print a comment after the operands of
3691 instruction INST. */
3692
3693 static void
3694 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3695 {
3696 if (inst->opcode->flags & F_COND)
3697 {
3698 char name[8];
3699 unsigned int i, num_conds;
3700
3701 remove_dot_suffix (name, inst);
3702 num_conds = ARRAY_SIZE (inst->cond->names);
3703 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3704 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3705 "%s %s.%s",
3706 i == 1 ? " //" : ",",
3707 name, inst->cond->names[i]);
3708 }
3709 }
3710
3711 /* Build notes from verifiers into a string for printing. */
3712
3713 static void
3714 print_verifier_notes (aarch64_operand_error *detail,
3715 struct disassemble_info *info)
3716 {
3717 if (no_notes)
3718 return;
3719
3720 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3721 would not have succeeded. We can safely ignore these. */
3722 assert (detail->non_fatal);
3723
3724 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3725 " // note: ");
3726 switch (detail->kind)
3727 {
3728 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
3729 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3730 _("this `%s' should have an immediately"
3731 " preceding `%s'"),
3732 detail->data[0].s, detail->data[1].s);
3733 break;
3734
3735 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
3736 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3737 _("expected `%s' after previous `%s'"),
3738 detail->data[0].s, detail->data[1].s);
3739 break;
3740
3741 default:
3742 assert (detail->error);
3743 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3744 "%s", detail->error);
3745 if (detail->index >= 0)
3746 (*info->fprintf_styled_func) (info->stream, dis_style_text,
3747 " at operand %d", detail->index + 1);
3748 break;
3749 }
3750 }
3751
3752 /* Print the instruction according to *INST. */
3753
3754 static void
3755 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3756 const aarch64_insn code,
3757 struct disassemble_info *info,
3758 aarch64_operand_error *mismatch_details)
3759 {
3760 bool has_notes = false;
3761
3762 print_mnemonic_name (inst, info);
3763 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3764 print_comment (inst, info);
3765
3766 /* We've already printed a note, not enough space to print more so exit.
3767 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3768 from a register and instruction at the same time. */
3769 if (has_notes)
3770 return;
3771
3772 /* Always run constraint verifiers, this is needed because constraints need to
3773 maintain a global state regardless of whether the instruction has the flag
3774 set or not. */
3775 enum err_type result = verify_constraints (inst, code, pc, false,
3776 mismatch_details, &insn_sequence);
3777 switch (result)
3778 {
3779 case ERR_VFI:
3780 print_verifier_notes (mismatch_details, info);
3781 break;
3782 case ERR_UND:
3783 case ERR_UNP:
3784 case ERR_NYI:
3785 default:
3786 break;
3787 }
3788 }
3789
3790 /* Entry-point of the instruction disassembler and printer. */
3791
3792 static void
3793 print_insn_aarch64_word (bfd_vma pc,
3794 uint32_t word,
3795 struct disassemble_info *info,
3796 aarch64_operand_error *errors)
3797 {
3798 static const char *err_msg[ERR_NR_ENTRIES+1] =
3799 {
3800 [ERR_OK] = "_",
3801 [ERR_UND] = "undefined",
3802 [ERR_UNP] = "unpredictable",
3803 [ERR_NYI] = "NYI"
3804 };
3805
3806 enum err_type ret;
3807 aarch64_inst inst;
3808
3809 info->insn_info_valid = 1;
3810 info->branch_delay_insns = 0;
3811 info->data_size = 0;
3812 info->target = 0;
3813 info->target2 = 0;
3814
3815 if (info->flags & INSN_HAS_RELOC)
3816 /* If the instruction has a reloc associated with it, then
3817 the offset field in the instruction will actually be the
3818 addend for the reloc. (If we are using REL type relocs).
3819 In such cases, we can ignore the pc when computing
3820 addresses, since the addend is not currently pc-relative. */
3821 pc = 0;
3822
3823 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3824
3825 if (((word >> 21) & 0x3ff) == 1)
3826 {
3827 /* RESERVED for ALES. */
3828 assert (ret != ERR_OK);
3829 ret = ERR_NYI;
3830 }
3831
3832 switch (ret)
3833 {
3834 case ERR_UND:
3835 case ERR_UNP:
3836 case ERR_NYI:
3837 /* Handle undefined instructions. */
3838 info->insn_type = dis_noninsn;
3839 (*info->fprintf_styled_func) (info->stream,
3840 dis_style_assembler_directive,
3841 ".inst\t");
3842 (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
3843 "0x%08x", word);
3844 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
3845 " ; %s", err_msg[ret]);
3846 break;
3847 case ERR_OK:
3848 user_friendly_fixup (&inst);
3849 if (inst.opcode->iclass == condbranch
3850 || inst.opcode->iclass == testbranch
3851 || inst.opcode->iclass == compbranch)
3852 info->insn_type = dis_condbranch;
3853 else if (inst.opcode->iclass == branch_imm)
3854 info->insn_type = dis_jsr;
3855 print_aarch64_insn (pc, &inst, word, info, errors);
3856 break;
3857 default:
3858 abort ();
3859 }
3860 }
3861
3862 /* Disallow mapping symbols ($x, $d etc) from
3863 being displayed in symbol relative addresses. */
3864
3865 bool
3866 aarch64_symbol_is_valid (asymbol * sym,
3867 struct disassemble_info * info ATTRIBUTE_UNUSED)
3868 {
3869 const char * name;
3870
3871 if (sym == NULL)
3872 return false;
3873
3874 name = bfd_asymbol_name (sym);
3875
3876 return name
3877 && (name[0] != '$'
3878 || (name[1] != 'x' && name[1] != 'd')
3879 || (name[2] != '\0' && name[2] != '.'));
3880 }
3881
3882 /* Print data bytes on INFO->STREAM. */
3883
3884 static void
3885 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3886 uint32_t word,
3887 struct disassemble_info *info,
3888 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3889 {
3890 switch (info->bytes_per_chunk)
3891 {
3892 case 1:
3893 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
3894 ".byte\t");
3895 info->fprintf_styled_func (info->stream, dis_style_immediate,
3896 "0x%02x", word);
3897 break;
3898 case 2:
3899 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
3900 ".short\t");
3901 info->fprintf_styled_func (info->stream, dis_style_immediate,
3902 "0x%04x", word);
3903 break;
3904 case 4:
3905 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
3906 ".word\t");
3907 info->fprintf_styled_func (info->stream, dis_style_immediate,
3908 "0x%08x", word);
3909 break;
3910 default:
3911 abort ();
3912 }
3913 }
3914
3915 /* Try to infer the code or data type from a symbol.
3916 Returns nonzero if *MAP_TYPE was set. */
3917
3918 static int
3919 get_sym_code_type (struct disassemble_info *info, int n,
3920 enum map_type *map_type)
3921 {
3922 asymbol * as;
3923 elf_symbol_type *es;
3924 unsigned int type;
3925 const char *name;
3926
3927 /* If the symbol is in a different section, ignore it. */
3928 if (info->section != NULL && info->section != info->symtab[n]->section)
3929 return false;
3930
3931 if (n >= info->symtab_size)
3932 return false;
3933
3934 as = info->symtab[n];
3935 if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
3936 return false;
3937 es = (elf_symbol_type *) as;
3938
3939 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3940
3941 /* If the symbol has function type then use that. */
3942 if (type == STT_FUNC)
3943 {
3944 *map_type = MAP_INSN;
3945 return true;
3946 }
3947
3948 /* Check for mapping symbols. */
3949 name = bfd_asymbol_name(info->symtab[n]);
3950 if (name[0] == '$'
3951 && (name[1] == 'x' || name[1] == 'd')
3952 && (name[2] == '\0' || name[2] == '.'))
3953 {
3954 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3955 return true;
3956 }
3957
3958 return false;
3959 }
3960
3961 /* Set the feature bits in arch_variant in order to get the correct disassembly
3962 for the chosen architecture variant.
3963
3964 Currently we only restrict disassembly for Armv8-R and otherwise enable all
3965 non-R-profile features. */
3966 static void
3967 select_aarch64_variant (unsigned mach)
3968 {
3969 switch (mach)
3970 {
3971 case bfd_mach_aarch64_8R:
3972 AARCH64_SET_FEATURE (arch_variant, AARCH64_ARCH_V8R);
3973 break;
3974 default:
3975 arch_variant = (aarch64_feature_set) AARCH64_ALL_FEATURES;
3976 AARCH64_CLEAR_FEATURE (arch_variant, arch_variant, V8R);
3977 }
3978 }
3979
3980 /* Entry-point of the AArch64 disassembler. */
3981
3982 int
3983 print_insn_aarch64 (bfd_vma pc,
3984 struct disassemble_info *info)
3985 {
3986 bfd_byte buffer[INSNLEN];
3987 int status;
3988 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3989 aarch64_operand_error *);
3990 bool found = false;
3991 unsigned int size = 4;
3992 unsigned long data;
3993 aarch64_operand_error errors;
3994 static bool set_features;
3995
3996 if (info->disassembler_options)
3997 {
3998 set_default_aarch64_dis_options (info);
3999
4000 parse_aarch64_dis_options (info->disassembler_options);
4001
4002 /* To avoid repeated parsing of these options, we remove them here. */
4003 info->disassembler_options = NULL;
4004 }
4005
4006 if (!set_features)
4007 {
4008 select_aarch64_variant (info->mach);
4009 set_features = true;
4010 }
4011
4012 /* Aarch64 instructions are always little-endian */
4013 info->endian_code = BFD_ENDIAN_LITTLE;
4014
4015 /* Default to DATA. A text section is required by the ABI to contain an
4016 INSN mapping symbol at the start. A data section has no such
4017 requirement, hence if no mapping symbol is found the section must
4018 contain only data. This however isn't very useful if the user has
4019 fully stripped the binaries. If this is the case use the section
4020 attributes to determine the default. If we have no section default to
4021 INSN as well, as we may be disassembling some raw bytes on a baremetal
4022 HEX file or similar. */
4023 enum map_type type = MAP_DATA;
4024 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
4025 type = MAP_INSN;
4026
4027 /* First check the full symtab for a mapping symbol, even if there
4028 are no usable non-mapping symbols for this address. */
4029 if (info->symtab_size != 0
4030 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
4031 {
4032 int last_sym = -1;
4033 bfd_vma addr, section_vma = 0;
4034 bool can_use_search_opt_p;
4035 int n;
4036
4037 if (pc <= last_mapping_addr)
4038 last_mapping_sym = -1;
4039
4040 /* Start scanning at the start of the function, or wherever
4041 we finished last time. */
4042 n = info->symtab_pos + 1;
4043
4044 /* If the last stop offset is different from the current one it means we
4045 are disassembling a different glob of bytes. As such the optimization
4046 would not be safe and we should start over. */
4047 can_use_search_opt_p = last_mapping_sym >= 0
4048 && info->stop_offset == last_stop_offset;
4049
4050 if (n >= last_mapping_sym && can_use_search_opt_p)
4051 n = last_mapping_sym;
4052
4053 /* Look down while we haven't passed the location being disassembled.
4054 The reason for this is that there's no defined order between a symbol
4055 and an mapping symbol that may be at the same address. We may have to
4056 look at least one position ahead. */
4057 for (; n < info->symtab_size; n++)
4058 {
4059 addr = bfd_asymbol_value (info->symtab[n]);
4060 if (addr > pc)
4061 break;
4062 if (get_sym_code_type (info, n, &type))
4063 {
4064 last_sym = n;
4065 found = true;
4066 }
4067 }
4068
4069 if (!found)
4070 {
4071 n = info->symtab_pos;
4072 if (n >= last_mapping_sym && can_use_search_opt_p)
4073 n = last_mapping_sym;
4074
4075 /* No mapping symbol found at this address. Look backwards
4076 for a preceeding one, but don't go pass the section start
4077 otherwise a data section with no mapping symbol can pick up
4078 a text mapping symbol of a preceeding section. The documentation
4079 says section can be NULL, in which case we will seek up all the
4080 way to the top. */
4081 if (info->section)
4082 section_vma = info->section->vma;
4083
4084 for (; n >= 0; n--)
4085 {
4086 addr = bfd_asymbol_value (info->symtab[n]);
4087 if (addr < section_vma)
4088 break;
4089
4090 if (get_sym_code_type (info, n, &type))
4091 {
4092 last_sym = n;
4093 found = true;
4094 break;
4095 }
4096 }
4097 }
4098
4099 last_mapping_sym = last_sym;
4100 last_type = type;
4101 last_stop_offset = info->stop_offset;
4102
4103 /* Look a little bit ahead to see if we should print out
4104 less than four bytes of data. If there's a symbol,
4105 mapping or otherwise, after two bytes then don't
4106 print more. */
4107 if (last_type == MAP_DATA)
4108 {
4109 size = 4 - (pc & 3);
4110 for (n = last_sym + 1; n < info->symtab_size; n++)
4111 {
4112 addr = bfd_asymbol_value (info->symtab[n]);
4113 if (addr > pc)
4114 {
4115 if (addr - pc < size)
4116 size = addr - pc;
4117 break;
4118 }
4119 }
4120 /* If the next symbol is after three bytes, we need to
4121 print only part of the data, so that we can use either
4122 .byte or .short. */
4123 if (size == 3)
4124 size = (pc & 1) ? 1 : 2;
4125 }
4126 }
4127 else
4128 last_type = type;
4129
4130 /* PR 10263: Disassemble data if requested to do so by the user. */
4131 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
4132 {
4133 /* size was set above. */
4134 info->bytes_per_chunk = size;
4135 info->display_endian = info->endian;
4136 printer = print_insn_data;
4137 }
4138 else
4139 {
4140 info->bytes_per_chunk = size = INSNLEN;
4141 info->display_endian = info->endian_code;
4142 printer = print_insn_aarch64_word;
4143 }
4144
4145 status = (*info->read_memory_func) (pc, buffer, size, info);
4146 if (status != 0)
4147 {
4148 (*info->memory_error_func) (status, pc, info);
4149 return -1;
4150 }
4151
4152 data = bfd_get_bits (buffer, size * 8,
4153 info->display_endian == BFD_ENDIAN_BIG);
4154
4155 (*printer) (pc, data, info, &errors);
4156
4157 return size;
4158 }
4159 \f
4160 void
4161 print_aarch64_disassembler_options (FILE *stream)
4162 {
4163 fprintf (stream, _("\n\
4164 The following AARCH64 specific disassembler options are supported for use\n\
4165 with the -M switch (multiple options should be separated by commas):\n"));
4166
4167 fprintf (stream, _("\n\
4168 no-aliases Don't print instruction aliases.\n"));
4169
4170 fprintf (stream, _("\n\
4171 aliases Do print instruction aliases.\n"));
4172
4173 fprintf (stream, _("\n\
4174 no-notes Don't print instruction notes.\n"));
4175
4176 fprintf (stream, _("\n\
4177 notes Do print instruction notes.\n"));
4178
4179 #ifdef DEBUG_AARCH64
4180 fprintf (stream, _("\n\
4181 debug_dump Temp switch for debug trace.\n"));
4182 #endif /* DEBUG_AARCH64 */
4183
4184 fprintf (stream, _("\n"));
4185 }