]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-dis.c
ltmain.sh: allow more flags at link-time
[thirdparty/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 #include "safe-ctype.h"
29 #include "obstack.h"
30
31 #define obstack_chunk_alloc xmalloc
32 #define obstack_chunk_free free
33
34 #define INSNLEN 4
35
36 /* This character is used to encode style information within the output
37 buffers. See get_style_text and print_operands for more details. */
38 #define STYLE_MARKER_CHAR '\002'
39
40 /* Cached mapping symbol state. */
41 enum map_type
42 {
43 MAP_INSN,
44 MAP_DATA
45 };
46
47 static aarch64_feature_set arch_variant; /* See select_aarch64_variant. */
48 static enum map_type last_type;
49 static int last_mapping_sym = -1;
50 static bfd_vma last_stop_offset = 0;
51 static bfd_vma last_mapping_addr = 0;
52
53 /* Other options */
54 static int no_aliases = 0; /* If set disassemble as most general inst. */
55 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
56 output as comments. */
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence insn_sequence;
60
61 static void
62 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
63 {
64 }
65
66 static void
67 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
68 {
69 /* Try to match options that are simple flags */
70 if (startswith (option, "no-aliases"))
71 {
72 no_aliases = 1;
73 return;
74 }
75
76 if (startswith (option, "aliases"))
77 {
78 no_aliases = 0;
79 return;
80 }
81
82 if (startswith (option, "no-notes"))
83 {
84 no_notes = 1;
85 return;
86 }
87
88 if (startswith (option, "notes"))
89 {
90 no_notes = 0;
91 return;
92 }
93
94 #ifdef DEBUG_AARCH64
95 if (startswith (option, "debug_dump"))
96 {
97 debug_dump = 1;
98 return;
99 }
100 #endif /* DEBUG_AARCH64 */
101
102 /* Invalid option. */
103 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
104 }
105
106 static void
107 parse_aarch64_dis_options (const char *options)
108 {
109 const char *option_end;
110
111 if (options == NULL)
112 return;
113
114 while (*options != '\0')
115 {
116 /* Skip empty options. */
117 if (*options == ',')
118 {
119 options++;
120 continue;
121 }
122
123 /* We know that *options is neither NUL or a comma. */
124 option_end = options + 1;
125 while (*option_end != ',' && *option_end != '\0')
126 option_end++;
127
128 parse_aarch64_dis_option (options, option_end - options);
129
130 /* Go on to the next one. If option_end points to a comma, it
131 will be skipped above. */
132 options = option_end;
133 }
134 }
135 \f
136 /* Functions doing the instruction disassembling. */
137
138 /* The unnamed arguments consist of the number of fields and information about
139 these fields where the VALUE will be extracted from CODE and returned.
140 MASK can be zero or the base mask of the opcode.
141
142 N.B. the fields are required to be in such an order than the most signficant
143 field for VALUE comes the first, e.g. the <index> in
144 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
145 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
146 the order of H, L, M. */
147
148 aarch64_insn
149 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
150 {
151 uint32_t num;
152 const aarch64_field *field;
153 enum aarch64_field_kind kind;
154 va_list va;
155
156 va_start (va, mask);
157 num = va_arg (va, uint32_t);
158 assert (num <= 5);
159 aarch64_insn value = 0x0;
160 while (num--)
161 {
162 kind = va_arg (va, enum aarch64_field_kind);
163 field = &fields[kind];
164 value <<= field->width;
165 value |= extract_field (kind, code, mask);
166 }
167 va_end (va);
168 return value;
169 }
170
171 /* Extract the value of all fields in SELF->fields after START from
172 instruction CODE. The least significant bit comes from the final field. */
173
174 static aarch64_insn
175 extract_all_fields_after (const aarch64_operand *self, unsigned int start,
176 aarch64_insn code)
177 {
178 aarch64_insn value;
179 unsigned int i;
180 enum aarch64_field_kind kind;
181
182 value = 0;
183 for (i = start;
184 i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
185 {
186 kind = self->fields[i];
187 value <<= fields[kind].width;
188 value |= extract_field (kind, code, 0);
189 }
190 return value;
191 }
192
193 /* Extract the value of all fields in SELF->fields from instruction CODE.
194 The least significant bit comes from the final field. */
195
196 static aarch64_insn
197 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
198 {
199 return extract_all_fields_after (self, 0, code);
200 }
201
202 /* Sign-extend bit I of VALUE. */
203 static inline uint64_t
204 sign_extend (aarch64_insn value, unsigned i)
205 {
206 uint64_t ret, sign;
207
208 assert (i < 32);
209 ret = value;
210 sign = (uint64_t) 1 << i;
211 return ((ret & (sign + sign - 1)) ^ sign) - sign;
212 }
213
214 /* N.B. the following inline helpfer functions create a dependency on the
215 order of operand qualifier enumerators. */
216
217 /* Given VALUE, return qualifier for a general purpose register. */
218 static inline enum aarch64_opnd_qualifier
219 get_greg_qualifier_from_value (aarch64_insn value)
220 {
221 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
222 if (value <= 0x1
223 && aarch64_get_qualifier_standard_value (qualifier) == value)
224 return qualifier;
225 return AARCH64_OPND_QLF_ERR;
226 }
227
228 /* Given VALUE, return qualifier for a vector register. This does not support
229 decoding instructions that accept the 2H vector type. */
230
231 static inline enum aarch64_opnd_qualifier
232 get_vreg_qualifier_from_value (aarch64_insn value)
233 {
234 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
235
236 /* Instructions using vector type 2H should not call this function. Skip over
237 the 2H qualifier. */
238 if (qualifier >= AARCH64_OPND_QLF_V_2H)
239 qualifier += 1;
240
241 if (value <= 0x8
242 && aarch64_get_qualifier_standard_value (qualifier) == value)
243 return qualifier;
244 return AARCH64_OPND_QLF_ERR;
245 }
246
247 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
248 static inline enum aarch64_opnd_qualifier
249 get_sreg_qualifier_from_value (aarch64_insn value)
250 {
251 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
252
253 if (value <= 0x4
254 && aarch64_get_qualifier_standard_value (qualifier) == value)
255 return qualifier;
256 return AARCH64_OPND_QLF_ERR;
257 }
258
259 /* Given the instruction in *INST which is probably half way through the
260 decoding and our caller wants to know the expected qualifier for operand
261 I. Return such a qualifier if we can establish it; otherwise return
262 AARCH64_OPND_QLF_NIL. */
263
264 static aarch64_opnd_qualifier_t
265 get_expected_qualifier (const aarch64_inst *inst, int i)
266 {
267 aarch64_opnd_qualifier_seq_t qualifiers;
268 /* Should not be called if the qualifier is known. */
269 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL)
270 {
271 int invalid_count;
272 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
273 i, qualifiers, &invalid_count))
274 return qualifiers[i];
275 else
276 return AARCH64_OPND_QLF_NIL;
277 }
278 else
279 return AARCH64_OPND_QLF_ERR;
280 }
281
282 /* Operand extractors. */
283
284 bool
285 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
286 aarch64_opnd_info *info ATTRIBUTE_UNUSED,
287 const aarch64_insn code ATTRIBUTE_UNUSED,
288 const aarch64_inst *inst ATTRIBUTE_UNUSED,
289 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
290 {
291 return true;
292 }
293
294 bool
295 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
296 const aarch64_insn code,
297 const aarch64_inst *inst ATTRIBUTE_UNUSED,
298 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
299 {
300 info->reg.regno = (extract_field (self->fields[0], code, 0)
301 + get_operand_specific_data (self));
302 return true;
303 }
304
305 bool
306 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
307 const aarch64_insn code ATTRIBUTE_UNUSED,
308 const aarch64_inst *inst ATTRIBUTE_UNUSED,
309 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
310 {
311 assert (info->idx == 1
312 || info->idx == 2
313 || info->idx == 3
314 || info->idx == 5);
315
316 unsigned prev_regno = inst->operands[info->idx - 1].reg.regno;
317 info->reg.regno = (prev_regno == 0x1f) ? 0x1f
318 : prev_regno + 1;
319 return true;
320 }
321
322 /* e.g. IC <ic_op>{, <Xt>}. */
323 bool
324 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
325 const aarch64_insn code,
326 const aarch64_inst *inst ATTRIBUTE_UNUSED,
327 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
328 {
329 info->reg.regno = extract_field (self->fields[0], code, 0);
330 assert (info->idx == 1
331 && (aarch64_get_operand_class (inst->operands[0].type)
332 == AARCH64_OPND_CLASS_SYSTEM));
333 /* This will make the constraint checking happy and more importantly will
334 help the disassembler determine whether this operand is optional or
335 not. */
336 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
337
338 return true;
339 }
340
341 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
342 bool
343 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
344 const aarch64_insn code,
345 const aarch64_inst *inst ATTRIBUTE_UNUSED,
346 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
347 {
348 /* regno */
349 info->reglane.regno = extract_field (self->fields[0], code,
350 inst->opcode->mask);
351
352 /* Index and/or type. */
353 if (inst->opcode->iclass == asisdone
354 || inst->opcode->iclass == asimdins)
355 {
356 if (info->type == AARCH64_OPND_En
357 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
358 {
359 unsigned shift;
360 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
361 assert (info->idx == 1); /* Vn */
362 aarch64_insn value = extract_field (FLD_imm4_11, code, 0);
363 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
364 info->qualifier = get_expected_qualifier (inst, info->idx);
365 if (info->qualifier == AARCH64_OPND_QLF_ERR)
366 return 0;
367 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
368 info->reglane.index = value >> shift;
369 }
370 else
371 {
372 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
373 imm5<3:0> <V>
374 0000 RESERVED
375 xxx1 B
376 xx10 H
377 x100 S
378 1000 D */
379 int pos = -1;
380 aarch64_insn value = extract_field (FLD_imm5, code, 0);
381 while (++pos <= 3 && (value & 0x1) == 0)
382 value >>= 1;
383 if (pos > 3)
384 return false;
385 info->qualifier = get_sreg_qualifier_from_value (pos);
386 if (info->qualifier == AARCH64_OPND_QLF_ERR)
387 return 0;
388 info->reglane.index = (unsigned) (value >> 1);
389 }
390 }
391 else if (inst->opcode->iclass == dotproduct)
392 {
393 /* Need information in other operand(s) to help decoding. */
394 info->qualifier = get_expected_qualifier (inst, info->idx);
395 if (info->qualifier == AARCH64_OPND_QLF_ERR)
396 return 0;
397 switch (info->qualifier)
398 {
399 case AARCH64_OPND_QLF_S_4B:
400 case AARCH64_OPND_QLF_S_2H:
401 /* L:H */
402 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
403 info->reglane.regno &= 0x1f;
404 break;
405 case AARCH64_OPND_QLF_S_2B:
406 /* h:l:m */
407 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
408 FLD_M);
409 info->reglane.regno &= 0xf;
410 break;
411 default:
412 return false;
413 }
414 }
415 else if (inst->opcode->iclass == cryptosm3)
416 {
417 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
418 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
419 }
420 else
421 {
422 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
423 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
424
425 /* Need information in other operand(s) to help decoding. */
426 info->qualifier = get_expected_qualifier (inst, info->idx);
427 if (info->qualifier == AARCH64_OPND_QLF_ERR)
428 return 0;
429 switch (info->qualifier)
430 {
431 case AARCH64_OPND_QLF_S_B:
432 /* H:imm3 */
433 info->reglane.index = extract_fields (code, 0, 2, FLD_H,
434 FLD_imm3_19);
435 info->reglane.regno &= 0x7;
436 break;
437
438 case AARCH64_OPND_QLF_S_H:
439 case AARCH64_OPND_QLF_S_2B:
440 if (info->type == AARCH64_OPND_Em16)
441 {
442 /* h:l:m */
443 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
444 FLD_M);
445 info->reglane.regno &= 0xf;
446 }
447 else
448 {
449 /* h:l */
450 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
451 }
452 break;
453 case AARCH64_OPND_QLF_S_S:
454 case AARCH64_OPND_QLF_S_4B:
455 /* h:l */
456 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
457 break;
458 case AARCH64_OPND_QLF_S_D:
459 /* H */
460 info->reglane.index = extract_field (FLD_H, code, 0);
461 break;
462 default:
463 return false;
464 }
465
466 if (inst->opcode->op == OP_FCMLA_ELEM
467 && info->qualifier != AARCH64_OPND_QLF_S_H)
468 {
469 /* Complex operand takes two elements. */
470 if (info->reglane.index & 1)
471 return false;
472 info->reglane.index /= 2;
473 }
474 }
475
476 return true;
477 }
478
479 bool
480 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
481 const aarch64_insn code,
482 const aarch64_inst *inst ATTRIBUTE_UNUSED,
483 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
484 {
485 /* R */
486 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
487 /* len */
488 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
489 info->reglist.stride = 1;
490 return true;
491 }
492
493 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
494 bool
495 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
496 aarch64_opnd_info *info, const aarch64_insn code,
497 const aarch64_inst *inst,
498 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
499 {
500 aarch64_insn value;
501 /* Number of elements in each structure to be loaded/stored. */
502 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
503
504 struct
505 {
506 unsigned is_reserved;
507 unsigned num_regs;
508 unsigned num_elements;
509 } data [] =
510 { {0, 4, 4},
511 {1, 4, 4},
512 {0, 4, 1},
513 {0, 4, 2},
514 {0, 3, 3},
515 {1, 3, 3},
516 {0, 3, 1},
517 {0, 1, 1},
518 {0, 2, 2},
519 {1, 2, 2},
520 {0, 2, 1},
521 };
522
523 /* Rt */
524 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
525 /* opcode */
526 value = extract_field (FLD_opcode, code, 0);
527 /* PR 21595: Check for a bogus value. */
528 if (value >= ARRAY_SIZE (data))
529 return false;
530 if (expected_num != data[value].num_elements || data[value].is_reserved)
531 return false;
532 info->reglist.num_regs = data[value].num_regs;
533 info->reglist.stride = 1;
534
535 return true;
536 }
537
538 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
539 lanes instructions. */
540 bool
541 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
542 aarch64_opnd_info *info, const aarch64_insn code,
543 const aarch64_inst *inst,
544 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
545 {
546 aarch64_insn value;
547
548 /* Rt */
549 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
550 /* S */
551 value = extract_field (FLD_S, code, 0);
552
553 /* Number of registers is equal to the number of elements in
554 each structure to be loaded/stored. */
555 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
556 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
557
558 /* Except when it is LD1R. */
559 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
560 info->reglist.num_regs = 2;
561
562 info->reglist.stride = 1;
563 return true;
564 }
565
566 /* Decode AdvSIMD vector register list for AdvSIMD lut instructions.
567 The number of of registers in the list is determined by the opcode
568 flag. */
569 bool
570 aarch64_ext_lut_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
571 const aarch64_insn code,
572 const aarch64_inst *inst ATTRIBUTE_UNUSED,
573 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
574 {
575 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
576 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
577 info->reglist.stride = 1;
578 return true;
579 }
580
581 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
582 load/store single element instructions. */
583 bool
584 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
585 aarch64_opnd_info *info, const aarch64_insn code,
586 const aarch64_inst *inst ATTRIBUTE_UNUSED,
587 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
588 {
589 aarch64_field field = {0, 0};
590 aarch64_insn QSsize; /* fields Q:S:size. */
591 aarch64_insn opcodeh2; /* opcode<2:1> */
592
593 /* Rt */
594 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
595
596 /* Decode the index, opcode<2:1> and size. */
597 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
598 opcodeh2 = extract_field_2 (&field, code, 0);
599 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
600 switch (opcodeh2)
601 {
602 case 0x0:
603 info->qualifier = AARCH64_OPND_QLF_S_B;
604 /* Index encoded in "Q:S:size". */
605 info->reglist.index = QSsize;
606 break;
607 case 0x1:
608 if (QSsize & 0x1)
609 /* UND. */
610 return false;
611 info->qualifier = AARCH64_OPND_QLF_S_H;
612 /* Index encoded in "Q:S:size<1>". */
613 info->reglist.index = QSsize >> 1;
614 break;
615 case 0x2:
616 if ((QSsize >> 1) & 0x1)
617 /* UND. */
618 return false;
619 if ((QSsize & 0x1) == 0)
620 {
621 info->qualifier = AARCH64_OPND_QLF_S_S;
622 /* Index encoded in "Q:S". */
623 info->reglist.index = QSsize >> 2;
624 }
625 else
626 {
627 if (extract_field (FLD_S, code, 0))
628 /* UND */
629 return false;
630 info->qualifier = AARCH64_OPND_QLF_S_D;
631 /* Index encoded in "Q". */
632 info->reglist.index = QSsize >> 3;
633 }
634 break;
635 default:
636 return false;
637 }
638
639 info->reglist.has_index = 1;
640 info->reglist.num_regs = 0;
641 info->reglist.stride = 1;
642 /* Number of registers is equal to the number of elements in
643 each structure to be loaded/stored. */
644 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
645 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
646
647 return true;
648 }
649
650 /* Decode fields immh:immb and/or Q for e.g.
651 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
652 or SSHR <V><d>, <V><n>, #<shift>. */
653
654 bool
655 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 aarch64_opnd_info *info, const aarch64_insn code,
657 const aarch64_inst *inst,
658 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
659 {
660 int pos;
661 aarch64_insn Q, imm, immh;
662 enum aarch64_insn_class iclass = inst->opcode->iclass;
663
664 immh = extract_field (FLD_immh, code, 0);
665 if (immh == 0)
666 return false;
667 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
668 pos = 4;
669 /* Get highest set bit in immh. */
670 while (--pos >= 0 && (immh & 0x8) == 0)
671 immh <<= 1;
672
673 assert ((iclass == asimdshf || iclass == asisdshf)
674 && (info->type == AARCH64_OPND_IMM_VLSR
675 || info->type == AARCH64_OPND_IMM_VLSL));
676
677 if (iclass == asimdshf)
678 {
679 Q = extract_field (FLD_Q, code, 0);
680 /* immh Q <T>
681 0000 x SEE AdvSIMD modified immediate
682 0001 0 8B
683 0001 1 16B
684 001x 0 4H
685 001x 1 8H
686 01xx 0 2S
687 01xx 1 4S
688 1xxx 0 RESERVED
689 1xxx 1 2D */
690 info->qualifier =
691 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
692 if (info->qualifier == AARCH64_OPND_QLF_ERR)
693 return false;
694 }
695 else
696 {
697 info->qualifier = get_sreg_qualifier_from_value (pos);
698 if (info->qualifier == AARCH64_OPND_QLF_ERR)
699 return 0;
700 }
701
702 if (info->type == AARCH64_OPND_IMM_VLSR)
703 /* immh <shift>
704 0000 SEE AdvSIMD modified immediate
705 0001 (16-UInt(immh:immb))
706 001x (32-UInt(immh:immb))
707 01xx (64-UInt(immh:immb))
708 1xxx (128-UInt(immh:immb)) */
709 info->imm.value = (16 << pos) - imm;
710 else
711 /* immh:immb
712 immh <shift>
713 0000 SEE AdvSIMD modified immediate
714 0001 (UInt(immh:immb)-8)
715 001x (UInt(immh:immb)-16)
716 01xx (UInt(immh:immb)-32)
717 1xxx (UInt(immh:immb)-64) */
718 info->imm.value = imm - (8 << pos);
719
720 return true;
721 }
722
723 /* Decode shift immediate for e.g. sshr (imm). */
724 bool
725 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
726 aarch64_opnd_info *info, const aarch64_insn code,
727 const aarch64_inst *inst ATTRIBUTE_UNUSED,
728 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
729 {
730 int64_t imm;
731 aarch64_insn val;
732 val = extract_field (FLD_size, code, 0);
733 switch (val)
734 {
735 case 0: imm = 8; break;
736 case 1: imm = 16; break;
737 case 2: imm = 32; break;
738 default: return false;
739 }
740 info->imm.value = imm;
741 return true;
742 }
743
744 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
745 value in the field(s) will be extracted as unsigned immediate value. */
746 bool
747 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
748 const aarch64_insn code,
749 const aarch64_inst *inst,
750 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
751 {
752 uint64_t imm;
753
754 imm = extract_all_fields (self, code);
755
756 if (operand_need_sign_extension (self))
757 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
758
759 if (operand_need_shift_by_two (self))
760 imm <<= 2;
761 else if (operand_need_shift_by_three (self))
762 imm <<= 3;
763 else if (operand_need_shift_by_four (self))
764 imm <<= 4;
765
766 if (info->type == AARCH64_OPND_ADDR_ADRP)
767 imm <<= 12;
768
769 if (inst->operands[0].type == AARCH64_OPND_PSTATEFIELD
770 && inst->operands[0].sysreg.flags & F_IMM_IN_CRM)
771 imm &= PSTATE_DECODE_CRM_IMM (inst->operands[0].sysreg.flags);
772
773 info->imm.value = imm;
774 return true;
775 }
776
777 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
778 bool
779 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
780 const aarch64_insn code,
781 const aarch64_inst *inst ATTRIBUTE_UNUSED,
782 aarch64_operand_error *errors)
783 {
784 aarch64_ext_imm (self, info, code, inst, errors);
785 info->shifter.kind = AARCH64_MOD_LSL;
786 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
787 return true;
788 }
789
790 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
791 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
792 bool
793 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
794 aarch64_opnd_info *info,
795 const aarch64_insn code,
796 const aarch64_inst *inst ATTRIBUTE_UNUSED,
797 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
798 {
799 uint64_t imm;
800 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
801 aarch64_field field = {0, 0};
802
803 assert (info->idx == 1);
804
805 if (info->type == AARCH64_OPND_SIMD_FPIMM)
806 info->imm.is_fp = 1;
807
808 /* a:b:c:d:e:f:g:h */
809 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
810 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
811 {
812 /* Either MOVI <Dd>, #<imm>
813 or MOVI <Vd>.2D, #<imm>.
814 <imm> is a 64-bit immediate
815 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
816 encoded in "a:b:c:d:e:f:g:h". */
817 int i;
818 unsigned abcdefgh = imm;
819 for (imm = 0ull, i = 0; i < 8; i++)
820 if (((abcdefgh >> i) & 0x1) != 0)
821 imm |= 0xffull << (8 * i);
822 }
823 info->imm.value = imm;
824
825 /* cmode */
826 info->qualifier = get_expected_qualifier (inst, info->idx);
827 if (info->qualifier == AARCH64_OPND_QLF_ERR)
828 return 0;
829 switch (info->qualifier)
830 {
831 case AARCH64_OPND_QLF_NIL:
832 /* no shift */
833 info->shifter.kind = AARCH64_MOD_NONE;
834 return 1;
835 case AARCH64_OPND_QLF_LSL:
836 /* shift zeros */
837 info->shifter.kind = AARCH64_MOD_LSL;
838 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
839 {
840 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
841 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
842 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
843 default: return false;
844 }
845 /* 00: 0; 01: 8; 10:16; 11:24. */
846 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
847 break;
848 case AARCH64_OPND_QLF_MSL:
849 /* shift ones */
850 info->shifter.kind = AARCH64_MOD_MSL;
851 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
852 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
853 break;
854 default:
855 return false;
856 }
857
858 return true;
859 }
860
861 /* Decode an 8-bit floating-point immediate. */
862 bool
863 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
864 const aarch64_insn code,
865 const aarch64_inst *inst ATTRIBUTE_UNUSED,
866 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
867 {
868 info->imm.value = extract_all_fields (self, code);
869 info->imm.is_fp = 1;
870 return true;
871 }
872
873 /* Decode a 1-bit rotate immediate (#90 or #270). */
874 bool
875 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
876 const aarch64_insn code,
877 const aarch64_inst *inst ATTRIBUTE_UNUSED,
878 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
879 {
880 uint64_t rot = extract_field (self->fields[0], code, 0);
881 assert (rot < 2U);
882 info->imm.value = rot * 180 + 90;
883 return true;
884 }
885
886 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
887 bool
888 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
889 const aarch64_insn code,
890 const aarch64_inst *inst ATTRIBUTE_UNUSED,
891 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
892 {
893 uint64_t rot = extract_field (self->fields[0], code, 0);
894 assert (rot < 4U);
895 info->imm.value = rot * 90;
896 return true;
897 }
898
899 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
900 bool
901 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
902 aarch64_opnd_info *info, const aarch64_insn code,
903 const aarch64_inst *inst ATTRIBUTE_UNUSED,
904 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
905 {
906 info->imm.value = 64- extract_field (FLD_scale, code, 0);
907 return true;
908 }
909
910 /* Decode arithmetic immediate for e.g.
911 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
912 bool
913 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
914 aarch64_opnd_info *info, const aarch64_insn code,
915 const aarch64_inst *inst ATTRIBUTE_UNUSED,
916 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
917 {
918 aarch64_insn value;
919
920 info->shifter.kind = AARCH64_MOD_LSL;
921 /* shift */
922 value = extract_field (FLD_shift, code, 0);
923 if (value >= 2)
924 return false;
925 info->shifter.amount = value ? 12 : 0;
926 /* imm12 (unsigned) */
927 info->imm.value = extract_field (FLD_imm12, code, 0);
928
929 return true;
930 }
931
932 /* Return true if VALUE is a valid logical immediate encoding, storing the
933 decoded value in *RESULT if so. ESIZE is the number of bytes in the
934 decoded immediate. */
935 static bool
936 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
937 {
938 uint64_t imm, mask;
939 uint32_t N, R, S;
940 unsigned simd_size;
941
942 /* value is N:immr:imms. */
943 S = value & 0x3f;
944 R = (value >> 6) & 0x3f;
945 N = (value >> 12) & 0x1;
946
947 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
948 (in other words, right rotated by R), then replicated. */
949 if (N != 0)
950 {
951 simd_size = 64;
952 mask = 0xffffffffffffffffull;
953 }
954 else
955 {
956 switch (S)
957 {
958 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
959 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
960 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
961 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
962 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
963 default: return false;
964 }
965 mask = (1ull << simd_size) - 1;
966 /* Top bits are IGNORED. */
967 R &= simd_size - 1;
968 }
969
970 if (simd_size > esize * 8)
971 return false;
972
973 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
974 if (S == simd_size - 1)
975 return false;
976 /* S+1 consecutive bits to 1. */
977 /* NOTE: S can't be 63 due to detection above. */
978 imm = (1ull << (S + 1)) - 1;
979 /* Rotate to the left by simd_size - R. */
980 if (R != 0)
981 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
982 /* Replicate the value according to SIMD size. */
983 switch (simd_size)
984 {
985 case 2: imm = (imm << 2) | imm;
986 /* Fall through. */
987 case 4: imm = (imm << 4) | imm;
988 /* Fall through. */
989 case 8: imm = (imm << 8) | imm;
990 /* Fall through. */
991 case 16: imm = (imm << 16) | imm;
992 /* Fall through. */
993 case 32: imm = (imm << 32) | imm;
994 /* Fall through. */
995 case 64: break;
996 default: return 0;
997 }
998
999 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
1000
1001 return true;
1002 }
1003
1004 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
1005 bool
1006 aarch64_ext_limm (const aarch64_operand *self,
1007 aarch64_opnd_info *info, const aarch64_insn code,
1008 const aarch64_inst *inst,
1009 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1010 {
1011 uint32_t esize;
1012 aarch64_insn value;
1013
1014 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
1015 self->fields[2]);
1016 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1017 return decode_limm (esize, value, &info->imm.value);
1018 }
1019
1020 /* Decode a logical immediate for the BIC alias of AND (etc.). */
1021 bool
1022 aarch64_ext_inv_limm (const aarch64_operand *self,
1023 aarch64_opnd_info *info, const aarch64_insn code,
1024 const aarch64_inst *inst,
1025 aarch64_operand_error *errors)
1026 {
1027 if (!aarch64_ext_limm (self, info, code, inst, errors))
1028 return false;
1029 info->imm.value = ~info->imm.value;
1030 return true;
1031 }
1032
1033 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
1034 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
1035 bool
1036 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
1037 aarch64_opnd_info *info,
1038 const aarch64_insn code, const aarch64_inst *inst,
1039 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1040 {
1041 aarch64_insn value;
1042
1043 /* Rt */
1044 info->reg.regno = extract_field (FLD_Rt, code, 0);
1045
1046 /* size */
1047 value = extract_field (FLD_ldst_size, code, 0);
1048 if (inst->opcode->iclass == ldstpair_indexed
1049 || inst->opcode->iclass == ldstnapair_offs
1050 || inst->opcode->iclass == ldstpair_off
1051 || inst->opcode->iclass == loadlit)
1052 {
1053 enum aarch64_opnd_qualifier qualifier;
1054 switch (value)
1055 {
1056 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1057 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1058 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
1059 default: return false;
1060 }
1061 info->qualifier = qualifier;
1062 }
1063 else
1064 {
1065 /* opc1:size */
1066 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
1067 if (value > 0x4)
1068 return false;
1069 info->qualifier = get_sreg_qualifier_from_value (value);
1070 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1071 return false;
1072 }
1073
1074 return true;
1075 }
1076
1077 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
1078 bool
1079 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
1080 aarch64_opnd_info *info,
1081 aarch64_insn code,
1082 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1083 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1084 {
1085 /* Rn */
1086 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1087 return true;
1088 }
1089
1090 /* Decode the address operand for rcpc3 instructions with optional load/store
1091 datasize offset, e.g. STILPP <Xs>, <Xt>, [<Xn|SP>{,#-16}]! and
1092 LIDAP <Xs>, <Xt>, [<Xn|SP>]{,#-16}. */
1093 bool
1094 aarch64_ext_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1095 aarch64_opnd_info *info,
1096 aarch64_insn code,
1097 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1098 aarch64_operand_error *err ATTRIBUTE_UNUSED)
1099 {
1100 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1101 if (!extract_field (FLD_opc2, code, 0))
1102 {
1103 info->addr.writeback = 1;
1104
1105 enum aarch64_opnd type;
1106 for (int i = 0; i < AARCH64_MAX_OPND_NUM; i++)
1107 {
1108 aarch64_opnd_info opnd = info[i];
1109 type = opnd.type;
1110 if (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS)
1111 break;
1112 }
1113
1114 assert (aarch64_operands[type].op_class == AARCH64_OPND_CLASS_ADDRESS);
1115 int offset = calc_ldst_datasize (inst->operands);
1116
1117 switch (type)
1118 {
1119 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB:
1120 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB:
1121 info->addr.offset.imm = -offset;
1122 info->addr.preind = 1;
1123 break;
1124 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND:
1125 case AARCH64_OPND_RCPC3_ADDR_POSTIND:
1126 info->addr.offset.imm = offset;
1127 info->addr.postind = 1;
1128 break;
1129 default:
1130 return false;
1131 }
1132 }
1133 return true;
1134 }
1135
1136 bool
1137 aarch64_ext_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1138 aarch64_opnd_info *info,
1139 aarch64_insn code,
1140 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1141 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1142 {
1143 info->qualifier = get_expected_qualifier (inst, info->idx);
1144 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1145 return 0;
1146
1147 /* Rn */
1148 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1149
1150 /* simm9 */
1151 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1152 info->addr.offset.imm = sign_extend (imm, 8);
1153 return true;
1154 }
1155
1156 /* Decode the address operand for e.g.
1157 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1158 bool
1159 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1160 aarch64_opnd_info *info,
1161 aarch64_insn code, const aarch64_inst *inst,
1162 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1163 {
1164 info->qualifier = get_expected_qualifier (inst, info->idx);
1165 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1166 return 0;
1167
1168 /* Rn */
1169 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1170
1171 /* simm9 */
1172 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1173 info->addr.offset.imm = sign_extend (imm, 8);
1174 if (extract_field (self->fields[2], code, 0) == 1) {
1175 info->addr.writeback = 1;
1176 info->addr.preind = 1;
1177 }
1178 return true;
1179 }
1180
1181 /* Decode the address operand for e.g.
1182 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1183 bool
1184 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1185 aarch64_opnd_info *info,
1186 aarch64_insn code, const aarch64_inst *inst,
1187 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1188 {
1189 aarch64_insn S, value;
1190
1191 /* Rn */
1192 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1193 /* Rm */
1194 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1195 /* option */
1196 value = extract_field (FLD_option, code, 0);
1197 info->shifter.kind =
1198 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1199 /* Fix-up the shifter kind; although the table-driven approach is
1200 efficient, it is slightly inflexible, thus needing this fix-up. */
1201 if (info->shifter.kind == AARCH64_MOD_UXTX)
1202 info->shifter.kind = AARCH64_MOD_LSL;
1203 /* S */
1204 S = extract_field (FLD_S, code, 0);
1205 if (S == 0)
1206 {
1207 info->shifter.amount = 0;
1208 info->shifter.amount_present = 0;
1209 }
1210 else
1211 {
1212 int size;
1213 /* Need information in other operand(s) to help achieve the decoding
1214 from 'S' field. */
1215 info->qualifier = get_expected_qualifier (inst, info->idx);
1216 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1217 return 0;
1218 /* Get the size of the data element that is accessed, which may be
1219 different from that of the source register size, e.g. in strb/ldrb. */
1220 size = aarch64_get_qualifier_esize (info->qualifier);
1221 info->shifter.amount = get_logsz (size);
1222 info->shifter.amount_present = 1;
1223 }
1224
1225 return true;
1226 }
1227
1228 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1229 bool
1230 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1231 aarch64_insn code, const aarch64_inst *inst,
1232 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1233 {
1234 aarch64_insn imm;
1235 info->qualifier = get_expected_qualifier (inst, info->idx);
1236 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1237 return 0;
1238
1239 /* Rn */
1240 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1241 /* simm (imm9 or imm7) */
1242 imm = extract_field (self->fields[0], code, 0);
1243 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1244 if (self->fields[0] == FLD_imm7
1245 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1246 /* scaled immediate in ld/st pair instructions. */
1247 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1248 /* qualifier */
1249 if (inst->opcode->iclass == ldst_unscaled
1250 || inst->opcode->iclass == ldstnapair_offs
1251 || inst->opcode->iclass == ldstpair_off
1252 || inst->opcode->iclass == ldst_unpriv)
1253 info->addr.writeback = 0;
1254 else
1255 {
1256 /* pre/post- index */
1257 info->addr.writeback = 1;
1258 if (extract_field (self->fields[1], code, 0) == 1)
1259 info->addr.preind = 1;
1260 else
1261 info->addr.postind = 1;
1262 }
1263
1264 return true;
1265 }
1266
1267 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1268 bool
1269 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1270 aarch64_insn code,
1271 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1272 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1273 {
1274 int shift;
1275 info->qualifier = get_expected_qualifier (inst, info->idx);
1276 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1277 return 0;
1278 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1279 /* Rn */
1280 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1281 /* uimm12 */
1282 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1283 return true;
1284 }
1285
1286 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1287 bool
1288 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1289 aarch64_insn code,
1290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1292 {
1293 aarch64_insn imm;
1294
1295 info->qualifier = get_expected_qualifier (inst, info->idx);
1296 if (info->qualifier == AARCH64_OPND_QLF_ERR)
1297 return 0;
1298 /* Rn */
1299 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1300 /* simm10 */
1301 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1302 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1303 if (extract_field (self->fields[3], code, 0) == 1) {
1304 info->addr.writeback = 1;
1305 info->addr.preind = 1;
1306 }
1307 return true;
1308 }
1309
1310 /* Decode the address operand for e.g.
1311 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1312 bool
1313 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1314 aarch64_opnd_info *info,
1315 aarch64_insn code, const aarch64_inst *inst,
1316 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1317 {
1318 /* The opcode dependent area stores the number of elements in
1319 each structure to be loaded/stored. */
1320 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1321
1322 /* Rn */
1323 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1324 /* Rm | #<amount> */
1325 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1326 if (info->addr.offset.regno == 31)
1327 {
1328 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1329 /* Special handling of loading single structure to all lane. */
1330 info->addr.offset.imm = (is_ld1r ? 1
1331 : inst->operands[0].reglist.num_regs)
1332 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1333 else
1334 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1335 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1336 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1337 }
1338 else
1339 info->addr.offset.is_reg = 1;
1340 info->addr.writeback = 1;
1341
1342 return true;
1343 }
1344
1345 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1346 bool
1347 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1348 aarch64_opnd_info *info,
1349 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1350 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1351 {
1352 aarch64_insn value;
1353 /* cond */
1354 value = extract_field (FLD_cond, code, 0);
1355 info->cond = get_cond_from_value (value);
1356 return true;
1357 }
1358
1359 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1360 bool
1361 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1362 aarch64_opnd_info *info,
1363 aarch64_insn code,
1364 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1365 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1366 {
1367 /* op0:op1:CRn:CRm:op2 */
1368 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1369 FLD_CRm, FLD_op2);
1370 info->sysreg.flags = 0;
1371
1372 /* If a system instruction, check which restrictions should be on the register
1373 value during decoding, these will be enforced then. */
1374 if (inst->opcode->iclass == ic_system)
1375 {
1376 /* Check to see if it's read-only, else check if it's write only.
1377 if it's both or unspecified don't care. */
1378 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1379 info->sysreg.flags = F_REG_READ;
1380 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1381 == F_SYS_WRITE)
1382 info->sysreg.flags = F_REG_WRITE;
1383 }
1384
1385 return true;
1386 }
1387
1388 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1389 bool
1390 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1391 aarch64_opnd_info *info, aarch64_insn code,
1392 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1393 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1394 {
1395 int i;
1396 aarch64_insn fld_crm = extract_field (FLD_CRm, code, 0);
1397 /* op1:op2 */
1398 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1399 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1400 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1401 {
1402 /* PSTATEFIELD name can be encoded partially in CRm[3:1]. */
1403 uint32_t flags = aarch64_pstatefields[i].flags;
1404 if ((flags & F_REG_IN_CRM)
1405 && ((fld_crm & 0xe) != PSTATE_DECODE_CRM (flags)))
1406 continue;
1407 info->sysreg.flags = flags;
1408 return true;
1409 }
1410 /* Reserved value in <pstatefield>. */
1411 return false;
1412 }
1413
1414 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1415 bool
1416 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1417 aarch64_opnd_info *info,
1418 aarch64_insn code,
1419 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1420 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1421 {
1422 int i;
1423 aarch64_insn value;
1424 const aarch64_sys_ins_reg *sysins_ops;
1425 /* op0:op1:CRn:CRm:op2 */
1426 value = extract_fields (code, 0, 5,
1427 FLD_op0, FLD_op1, FLD_CRn,
1428 FLD_CRm, FLD_op2);
1429
1430 switch (info->type)
1431 {
1432 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1433 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1434 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1435 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1436 case AARCH64_OPND_SYSREG_TLBIP: sysins_ops = aarch64_sys_regs_tlbi; break;
1437 case AARCH64_OPND_SYSREG_SR:
1438 sysins_ops = aarch64_sys_regs_sr;
1439 /* Let's remove op2 for rctx. Refer to comments in the definition of
1440 aarch64_sys_regs_sr[]. */
1441 value = value & ~(0x7);
1442 break;
1443 default: return false;
1444 }
1445
1446 for (i = 0; sysins_ops[i].name != NULL; ++i)
1447 if (sysins_ops[i].value == value)
1448 {
1449 info->sysins_op = sysins_ops + i;
1450 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1451 info->sysins_op->name,
1452 (unsigned)info->sysins_op->value,
1453 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1454 return true;
1455 }
1456
1457 return false;
1458 }
1459
1460 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1461
1462 bool
1463 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1464 aarch64_opnd_info *info,
1465 aarch64_insn code,
1466 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1467 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1468 {
1469 /* CRm */
1470 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1471 return true;
1472 }
1473
1474 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>. */
1475
1476 bool
1477 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1478 aarch64_opnd_info *info,
1479 aarch64_insn code,
1480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1482 {
1483 /* For the DSB nXS barrier variant immediate is encoded in 2-bit field. */
1484 aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1485 info->barrier = aarch64_barrier_dsb_nxs_options + field;
1486 return true;
1487 }
1488
1489 /* Decode the prefetch operation option operand for e.g.
1490 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1491
1492 bool
1493 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1494 aarch64_opnd_info *info,
1495 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1496 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1497 {
1498 /* prfop in Rt */
1499 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1500 return true;
1501 }
1502
1503 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1504 to the matching name/value pair in aarch64_hint_options. */
1505
1506 bool
1507 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1508 aarch64_opnd_info *info,
1509 aarch64_insn code,
1510 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1511 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1512 {
1513 /* CRm:op2. */
1514 unsigned hint_number;
1515 int i;
1516
1517 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1518
1519 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1520 {
1521 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1522 {
1523 info->hint_option = &(aarch64_hint_options[i]);
1524 return true;
1525 }
1526 }
1527
1528 return false;
1529 }
1530
1531 /* Decode the extended register operand for e.g.
1532 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1533 bool
1534 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1535 aarch64_opnd_info *info,
1536 aarch64_insn code,
1537 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1538 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1539 {
1540 aarch64_insn value;
1541
1542 /* Rm */
1543 info->reg.regno = extract_field (FLD_Rm, code, 0);
1544 /* option */
1545 value = extract_field (FLD_option, code, 0);
1546 info->shifter.kind =
1547 aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1548 /* imm3 */
1549 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1550
1551 /* This makes the constraint checking happy. */
1552 info->shifter.operator_present = 1;
1553
1554 /* Assume inst->operands[0].qualifier has been resolved. */
1555 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1556 info->qualifier = AARCH64_OPND_QLF_W;
1557 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1558 && (info->shifter.kind == AARCH64_MOD_UXTX
1559 || info->shifter.kind == AARCH64_MOD_SXTX))
1560 info->qualifier = AARCH64_OPND_QLF_X;
1561
1562 return true;
1563 }
1564
1565 /* Decode the shifted register operand for e.g.
1566 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1567 bool
1568 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1569 aarch64_opnd_info *info,
1570 aarch64_insn code,
1571 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1572 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1573 {
1574 aarch64_insn value;
1575
1576 /* Rm */
1577 info->reg.regno = extract_field (FLD_Rm, code, 0);
1578 /* shift */
1579 value = extract_field (FLD_shift, code, 0);
1580 info->shifter.kind =
1581 aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1582 if (info->shifter.kind == AARCH64_MOD_ROR
1583 && inst->opcode->iclass != log_shift)
1584 /* ROR is not available for the shifted register operand in arithmetic
1585 instructions. */
1586 return false;
1587 /* imm6 */
1588 info->shifter.amount = extract_field (FLD_imm6_10, code, 0);
1589
1590 /* This makes the constraint checking happy. */
1591 info->shifter.operator_present = 1;
1592
1593 return true;
1594 }
1595
1596 /* Decode the LSL-shifted register operand for e.g.
1597 ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}. */
1598 bool
1599 aarch64_ext_reg_lsl_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1600 aarch64_opnd_info *info,
1601 aarch64_insn code,
1602 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1603 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1604 {
1605 /* Rm */
1606 info->reg.regno = extract_field (FLD_Rm, code, 0);
1607 /* imm3 */
1608 info->shifter.kind = AARCH64_MOD_LSL;
1609 info->shifter.amount = extract_field (FLD_imm3_10, code, 0);
1610 return true;
1611 }
1612
1613 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1614 where <offset> is given by the OFFSET parameter and where <factor> is
1615 1 plus SELF's operand-dependent value. fields[0] specifies the field
1616 that holds <base>. */
1617 static bool
1618 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1619 aarch64_opnd_info *info, aarch64_insn code,
1620 int64_t offset)
1621 {
1622 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1623 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1624 info->addr.offset.is_reg = false;
1625 info->addr.writeback = false;
1626 info->addr.preind = true;
1627 if (offset != 0)
1628 info->shifter.kind = AARCH64_MOD_MUL_VL;
1629 info->shifter.amount = 1;
1630 info->shifter.operator_present = (info->addr.offset.imm != 0);
1631 info->shifter.amount_present = false;
1632 return true;
1633 }
1634
1635 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1636 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1637 SELF's operand-dependent value. fields[0] specifies the field that
1638 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1639 bool
1640 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1641 aarch64_opnd_info *info, aarch64_insn code,
1642 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1643 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1644 {
1645 int offset;
1646
1647 offset = extract_field (FLD_SVE_imm4, code, 0);
1648 offset = ((offset + 8) & 15) - 8;
1649 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1650 }
1651
1652 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1653 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1654 SELF's operand-dependent value. fields[0] specifies the field that
1655 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1656 bool
1657 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1658 aarch64_opnd_info *info, aarch64_insn code,
1659 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1660 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1661 {
1662 int offset;
1663
1664 offset = extract_field (FLD_SVE_imm6, code, 0);
1665 offset = (((offset + 32) & 63) - 32);
1666 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1667 }
1668
1669 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1670 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1671 SELF's operand-dependent value. fields[0] specifies the field that
1672 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1673 and imm3 fields, with imm3 being the less-significant part. */
1674 bool
1675 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1676 aarch64_opnd_info *info,
1677 aarch64_insn code,
1678 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1679 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1680 {
1681 int offset;
1682
1683 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3_10);
1684 offset = (((offset + 256) & 511) - 256);
1685 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1686 }
1687
1688 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1689 is given by the OFFSET parameter and where <shift> is SELF's operand-
1690 dependent value. fields[0] specifies the base register field <base>. */
1691 static bool
1692 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1693 aarch64_opnd_info *info, aarch64_insn code,
1694 int64_t offset)
1695 {
1696 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1697 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1698 info->addr.offset.is_reg = false;
1699 info->addr.writeback = false;
1700 info->addr.preind = true;
1701 info->shifter.operator_present = false;
1702 info->shifter.amount_present = false;
1703 return true;
1704 }
1705
1706 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1707 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1708 value. fields[0] specifies the base register field. */
1709 bool
1710 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1711 aarch64_opnd_info *info, aarch64_insn code,
1712 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1713 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1714 {
1715 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1716 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1717 }
1718
1719 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1720 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1721 value. fields[0] specifies the base register field. */
1722 bool
1723 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1724 aarch64_opnd_info *info, aarch64_insn code,
1725 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1726 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1727 {
1728 int offset = extract_field (FLD_SVE_imm6, code, 0);
1729 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1730 }
1731
1732 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1733 is SELF's operand-dependent value. fields[0] specifies the base
1734 register field and fields[1] specifies the offset register field. */
1735 bool
1736 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1737 aarch64_opnd_info *info, aarch64_insn code,
1738 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1739 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1740 {
1741 int index_regno;
1742
1743 index_regno = extract_field (self->fields[1], code, 0);
1744 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1745 return false;
1746
1747 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1748 info->addr.offset.regno = index_regno;
1749 info->addr.offset.is_reg = true;
1750 info->addr.writeback = false;
1751 info->addr.preind = true;
1752 info->shifter.kind = AARCH64_MOD_LSL;
1753 info->shifter.amount = get_operand_specific_data (self);
1754 info->shifter.operator_present = (info->shifter.amount != 0);
1755 info->shifter.amount_present = (info->shifter.amount != 0);
1756 return true;
1757 }
1758
1759 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1760 <shift> is SELF's operand-dependent value. fields[0] specifies the
1761 base register field, fields[1] specifies the offset register field and
1762 fields[2] is a single-bit field that selects SXTW over UXTW. */
1763 bool
1764 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1765 aarch64_opnd_info *info, aarch64_insn code,
1766 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1767 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1768 {
1769 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1770 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1771 info->addr.offset.is_reg = true;
1772 info->addr.writeback = false;
1773 info->addr.preind = true;
1774 if (extract_field (self->fields[2], code, 0))
1775 info->shifter.kind = AARCH64_MOD_SXTW;
1776 else
1777 info->shifter.kind = AARCH64_MOD_UXTW;
1778 info->shifter.amount = get_operand_specific_data (self);
1779 info->shifter.operator_present = true;
1780 info->shifter.amount_present = (info->shifter.amount != 0);
1781 return true;
1782 }
1783
1784 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1785 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1786 fields[0] specifies the base register field. */
1787 bool
1788 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1789 aarch64_opnd_info *info, aarch64_insn code,
1790 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1791 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1792 {
1793 int offset = extract_field (FLD_imm5, code, 0);
1794 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1795 }
1796
1797 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1798 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1799 number. fields[0] specifies the base register field and fields[1]
1800 specifies the offset register field. */
1801 static bool
1802 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1803 aarch64_insn code, enum aarch64_modifier_kind kind)
1804 {
1805 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1806 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1807 info->addr.offset.is_reg = true;
1808 info->addr.writeback = false;
1809 info->addr.preind = true;
1810 info->shifter.kind = kind;
1811 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1812 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1813 || info->shifter.amount != 0);
1814 info->shifter.amount_present = (info->shifter.amount != 0);
1815 return true;
1816 }
1817
1818 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1819 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1820 field and fields[1] specifies the offset register field. */
1821 bool
1822 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1823 aarch64_opnd_info *info, aarch64_insn code,
1824 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1825 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1826 {
1827 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1828 }
1829
1830 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1831 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1832 field and fields[1] specifies the offset register field. */
1833 bool
1834 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1835 aarch64_opnd_info *info, aarch64_insn code,
1836 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1837 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1838 {
1839 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1840 }
1841
1842 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1843 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1844 field and fields[1] specifies the offset register field. */
1845 bool
1846 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1847 aarch64_opnd_info *info, aarch64_insn code,
1848 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1849 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1850 {
1851 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1852 }
1853
1854 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1855 has the raw field value and that the low 8 bits decode to VALUE. */
1856 static bool
1857 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1858 {
1859 info->shifter.kind = AARCH64_MOD_LSL;
1860 info->shifter.amount = 0;
1861 if (info->imm.value & 0x100)
1862 {
1863 if (value == 0)
1864 /* Decode 0x100 as #0, LSL #8. */
1865 info->shifter.amount = 8;
1866 else
1867 value *= 256;
1868 }
1869 info->shifter.operator_present = (info->shifter.amount != 0);
1870 info->shifter.amount_present = (info->shifter.amount != 0);
1871 info->imm.value = value;
1872 return true;
1873 }
1874
1875 /* Decode an SVE ADD/SUB immediate. */
1876 bool
1877 aarch64_ext_sve_aimm (const aarch64_operand *self,
1878 aarch64_opnd_info *info, const aarch64_insn code,
1879 const aarch64_inst *inst,
1880 aarch64_operand_error *errors)
1881 {
1882 return (aarch64_ext_imm (self, info, code, inst, errors)
1883 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1884 }
1885
1886 bool
1887 aarch64_ext_sve_aligned_reglist (const aarch64_operand *self,
1888 aarch64_opnd_info *info, aarch64_insn code,
1889 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1890 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1891 {
1892 unsigned int num_regs = get_operand_specific_data (self);
1893 unsigned int val = extract_field (self->fields[0], code, 0);
1894 info->reglist.first_regno = val * num_regs;
1895 info->reglist.num_regs = num_regs;
1896 info->reglist.stride = 1;
1897 return true;
1898 }
1899
1900 /* Decode an SVE CPY/DUP immediate. */
1901 bool
1902 aarch64_ext_sve_asimm (const aarch64_operand *self,
1903 aarch64_opnd_info *info, const aarch64_insn code,
1904 const aarch64_inst *inst,
1905 aarch64_operand_error *errors)
1906 {
1907 return (aarch64_ext_imm (self, info, code, inst, errors)
1908 && decode_sve_aimm (info, (int8_t) info->imm.value));
1909 }
1910
1911 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1912 The fields array specifies which field to use. */
1913 bool
1914 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1915 aarch64_opnd_info *info, aarch64_insn code,
1916 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1917 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1918 {
1919 if (extract_field (self->fields[0], code, 0))
1920 info->imm.value = 0x3f800000;
1921 else
1922 info->imm.value = 0x3f000000;
1923 info->imm.is_fp = true;
1924 return true;
1925 }
1926
1927 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1928 The fields array specifies which field to use. */
1929 bool
1930 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1931 aarch64_opnd_info *info, aarch64_insn code,
1932 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1933 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1934 {
1935 if (extract_field (self->fields[0], code, 0))
1936 info->imm.value = 0x40000000;
1937 else
1938 info->imm.value = 0x3f000000;
1939 info->imm.is_fp = true;
1940 return true;
1941 }
1942
1943 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1944 The fields array specifies which field to use. */
1945 bool
1946 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1947 aarch64_opnd_info *info, aarch64_insn code,
1948 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1949 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1950 {
1951 if (extract_field (self->fields[0], code, 0))
1952 info->imm.value = 0x3f800000;
1953 else
1954 info->imm.value = 0x0;
1955 info->imm.is_fp = true;
1956 return true;
1957 }
1958
1959 /* Decode SME instruction such as MOVZA ZA tile slice to vector. */
1960 bool
1961 aarch64_ext_sme_za_tile_to_vec (const aarch64_operand *self,
1962 aarch64_opnd_info *info, aarch64_insn code,
1963 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1964 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1965 {
1966 aarch64_insn Qsize; /* fields Q:S:size. */
1967 int fld_v = extract_field (self->fields[0], code, 0);
1968 int fld_rv = extract_field (self->fields[1], code, 0);
1969 int fld_zan_imm = extract_field (FLD_imm4_5, code, 0);
1970
1971 Qsize = extract_fields (inst->value, 0, 2, FLD_SME_size_22, FLD_SME_Q);
1972 switch (Qsize)
1973 {
1974 case 0x0:
1975 info->qualifier = AARCH64_OPND_QLF_S_B;
1976 info->indexed_za.regno = 0;
1977 info->indexed_za.index.imm = fld_zan_imm;
1978 break;
1979 case 0x2:
1980 info->qualifier = AARCH64_OPND_QLF_S_H;
1981 info->indexed_za.regno = fld_zan_imm >> 3;
1982 info->indexed_za.index.imm = fld_zan_imm & 0x07;
1983 break;
1984 case 0x4:
1985 info->qualifier = AARCH64_OPND_QLF_S_S;
1986 info->indexed_za.regno = fld_zan_imm >> 2;
1987 info->indexed_za.index.imm = fld_zan_imm & 0x03;
1988 break;
1989 case 0x6:
1990 info->qualifier = AARCH64_OPND_QLF_S_D;
1991 info->indexed_za.regno = fld_zan_imm >> 1;
1992 info->indexed_za.index.imm = fld_zan_imm & 0x01;
1993 break;
1994 case 0x7:
1995 info->qualifier = AARCH64_OPND_QLF_S_Q;
1996 info->indexed_za.regno = fld_zan_imm;
1997 break;
1998 default:
1999 return false;
2000 }
2001
2002 info->indexed_za.index.regno = fld_rv + 12;
2003 info->indexed_za.v = fld_v;
2004
2005 return true;
2006 }
2007
2008 /* Decode ZA tile vector, vector indicator, vector selector, qualifier and
2009 immediate on numerous SME instruction fields such as MOVA. */
2010 bool
2011 aarch64_ext_sme_za_hv_tiles (const aarch64_operand *self,
2012 aarch64_opnd_info *info, aarch64_insn code,
2013 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2014 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2015 {
2016 int fld_size = extract_field (self->fields[0], code, 0);
2017 int fld_q = extract_field (self->fields[1], code, 0);
2018 int fld_v = extract_field (self->fields[2], code, 0);
2019 int fld_rv = extract_field (self->fields[3], code, 0);
2020 int fld_zan_imm = extract_field (self->fields[4], code, 0);
2021
2022 /* Deduce qualifier encoded in size and Q fields. */
2023 if (fld_size == 0)
2024 {
2025 info->indexed_za.regno = 0;
2026 info->indexed_za.index.imm = fld_zan_imm;
2027 }
2028 else if (fld_size == 1)
2029 {
2030 info->indexed_za.regno = fld_zan_imm >> 3;
2031 info->indexed_za.index.imm = fld_zan_imm & 0x07;
2032 }
2033 else if (fld_size == 2)
2034 {
2035 info->indexed_za.regno = fld_zan_imm >> 2;
2036 info->indexed_za.index.imm = fld_zan_imm & 0x03;
2037 }
2038 else if (fld_size == 3 && fld_q == 0)
2039 {
2040 info->indexed_za.regno = fld_zan_imm >> 1;
2041 info->indexed_za.index.imm = fld_zan_imm & 0x01;
2042 }
2043 else if (fld_size == 3 && fld_q == 1)
2044 {
2045 info->indexed_za.regno = fld_zan_imm;
2046 info->indexed_za.index.imm = 0;
2047 }
2048 else
2049 return false;
2050
2051 info->indexed_za.index.regno = fld_rv + 12;
2052 info->indexed_za.v = fld_v;
2053
2054 return true;
2055 }
2056
2057 bool
2058 aarch64_ext_sme_za_hv_tiles_range (const aarch64_operand *self,
2059 aarch64_opnd_info *info, aarch64_insn code,
2060 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2061 aarch64_operand_error *errors
2062 ATTRIBUTE_UNUSED)
2063 {
2064 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
2065 int range_size = get_opcode_dependent_value (inst->opcode);
2066 int fld_v = extract_field (self->fields[0], code, 0);
2067 int fld_rv = extract_field (self->fields[1], code, 0);
2068 int fld_zan_imm = extract_field (self->fields[2], code, 0);
2069 int max_value = 16 / range_size / ebytes;
2070
2071 if (max_value == 0)
2072 max_value = 1;
2073
2074 int regno = fld_zan_imm / max_value;
2075 if (regno >= ebytes)
2076 return false;
2077
2078 info->indexed_za.regno = regno;
2079 info->indexed_za.index.imm = (fld_zan_imm % max_value) * range_size;
2080 info->indexed_za.index.countm1 = range_size - 1;
2081 info->indexed_za.index.regno = fld_rv + 12;
2082 info->indexed_za.v = fld_v;
2083
2084 return true;
2085 }
2086
2087 /* Decode in SME instruction ZERO list of up to eight 64-bit element tile names
2088 separated by commas, encoded in the "imm8" field.
2089
2090 For programmer convenience an assembler must also accept the names of
2091 32-bit, 16-bit and 8-bit element tiles which are converted into the
2092 corresponding set of 64-bit element tiles.
2093 */
2094 bool
2095 aarch64_ext_sme_za_list (const aarch64_operand *self,
2096 aarch64_opnd_info *info, aarch64_insn code,
2097 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2098 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2099 {
2100 int mask = extract_field (self->fields[0], code, 0);
2101 info->imm.value = mask;
2102 return true;
2103 }
2104
2105 /* Decode ZA array vector select register (Rv field), optional vector and
2106 memory offset (imm4_11 field).
2107 */
2108 bool
2109 aarch64_ext_sme_za_array (const aarch64_operand *self,
2110 aarch64_opnd_info *info, aarch64_insn code,
2111 const aarch64_inst *inst,
2112 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2113 {
2114 int regno = extract_field (self->fields[0], code, 0);
2115 if (info->type == AARCH64_OPND_SME_ZA_array_off4)
2116 regno += 12;
2117 else
2118 regno += 8;
2119 int imm = extract_field (self->fields[1], code, 0);
2120 int num_offsets = get_operand_specific_data (self);
2121 if (num_offsets == 0)
2122 num_offsets = 1;
2123 info->indexed_za.index.regno = regno;
2124 info->indexed_za.index.imm = imm * num_offsets;
2125 info->indexed_za.index.countm1 = num_offsets - 1;
2126 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2127 return true;
2128 }
2129
2130 /* Decode two ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds. */
2131 bool
2132 aarch64_ext_sme_za_vrs1 (const aarch64_operand *self,
2133 aarch64_opnd_info *info, aarch64_insn code,
2134 const aarch64_inst *inst,
2135 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2136 {
2137 int v = extract_field (self->fields[0], code, 0);
2138 int regno = 12 + extract_field (self->fields[1], code, 0);
2139 int imm, za_reg, num_offset = 2;
2140
2141 switch (info->qualifier)
2142 {
2143 case AARCH64_OPND_QLF_S_B:
2144 imm = extract_field (self->fields[2], code, 0);
2145 info->indexed_za.index.imm = imm * num_offset;
2146 break;
2147 case AARCH64_OPND_QLF_S_H:
2148 case AARCH64_OPND_QLF_S_S:
2149 za_reg = extract_field (self->fields[2], code, 0);
2150 imm = extract_field (self->fields[3], code, 0);
2151 info->indexed_za.index.imm = imm * num_offset;
2152 info->indexed_za.regno = za_reg;
2153 break;
2154 case AARCH64_OPND_QLF_S_D:
2155 za_reg = extract_field (self->fields[2], code, 0);
2156 info->indexed_za.regno = za_reg;
2157 break;
2158 default:
2159 return false;
2160 }
2161
2162 info->indexed_za.index.regno = regno;
2163 info->indexed_za.index.countm1 = num_offset - 1;
2164 info->indexed_za.v = v;
2165 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2166 return true;
2167 }
2168
2169 /* Decode four ZA tile slice (V, Rv, off3| ZAn ,off2 | ZAn, ol| ZAn) feilds. */
2170 bool
2171 aarch64_ext_sme_za_vrs2 (const aarch64_operand *self,
2172 aarch64_opnd_info *info, aarch64_insn code,
2173 const aarch64_inst *inst,
2174 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2175 {
2176 int v = extract_field (self->fields[0], code, 0);
2177 int regno = 12 + extract_field (self->fields[1], code, 0);
2178 int imm, za_reg, num_offset =4;
2179
2180 switch (info->qualifier)
2181 {
2182 case AARCH64_OPND_QLF_S_B:
2183 imm = extract_field (self->fields[2], code, 0);
2184 info->indexed_za.index.imm = imm * num_offset;
2185 break;
2186 case AARCH64_OPND_QLF_S_H:
2187 za_reg = extract_field (self->fields[2], code, 0);
2188 imm = extract_field (self->fields[3], code, 0);
2189 info->indexed_za.index.imm = imm * num_offset;
2190 info->indexed_za.regno = za_reg;
2191 break;
2192 case AARCH64_OPND_QLF_S_S:
2193 case AARCH64_OPND_QLF_S_D:
2194 za_reg = extract_field (self->fields[2], code, 0);
2195 info->indexed_za.regno = za_reg;
2196 break;
2197 default:
2198 return false;
2199 }
2200
2201 info->indexed_za.index.regno = regno;
2202 info->indexed_za.index.countm1 = num_offset - 1;
2203 info->indexed_za.v = v;
2204 info->indexed_za.group_size = get_opcode_dependent_value (inst->opcode);
2205 return true;
2206 }
2207
2208 bool
2209 aarch64_ext_sme_addr_ri_u4xvl (const aarch64_operand *self,
2210 aarch64_opnd_info *info, aarch64_insn code,
2211 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2212 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2213 {
2214 int regno = extract_field (self->fields[0], code, 0);
2215 int imm = extract_field (self->fields[1], code, 0);
2216 info->addr.base_regno = regno;
2217 info->addr.offset.imm = imm;
2218 /* MUL VL operator is always present for this operand. */
2219 info->shifter.kind = AARCH64_MOD_MUL_VL;
2220 info->shifter.operator_present = (imm != 0);
2221 return true;
2222 }
2223
2224 /* Decode {SM|ZA} filed for SMSTART and SMSTOP instructions. */
2225 bool
2226 aarch64_ext_sme_sm_za (const aarch64_operand *self,
2227 aarch64_opnd_info *info, aarch64_insn code,
2228 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2229 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2230 {
2231 info->pstatefield = 0x1b;
2232 aarch64_insn fld_crm = extract_field (self->fields[0], code, 0);
2233 fld_crm >>= 1; /* CRm[3:1]. */
2234
2235 if (fld_crm == 0x1)
2236 info->reg.regno = 's';
2237 else if (fld_crm == 0x2)
2238 info->reg.regno = 'z';
2239 else
2240 return false;
2241
2242 return true;
2243 }
2244
2245 bool
2246 aarch64_ext_sme_pred_reg_with_index (const aarch64_operand *self,
2247 aarch64_opnd_info *info, aarch64_insn code,
2248 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2249 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2250 {
2251 aarch64_insn fld_rm = extract_field (self->fields[0], code, 0);
2252 aarch64_insn fld_pn = extract_field (self->fields[1], code, 0);
2253 aarch64_insn fld_i1 = extract_field (self->fields[2], code, 0);
2254 aarch64_insn fld_tszh = extract_field (self->fields[3], code, 0);
2255 aarch64_insn fld_tszl = extract_field (self->fields[4], code, 0);
2256 int imm;
2257
2258 info->indexed_za.regno = fld_pn;
2259 info->indexed_za.index.regno = fld_rm + 12;
2260
2261 if (fld_tszl & 0x1)
2262 imm = (fld_i1 << 3) | (fld_tszh << 2) | (fld_tszl >> 1);
2263 else if (fld_tszl & 0x2)
2264 imm = (fld_i1 << 2) | (fld_tszh << 1) | (fld_tszl >> 2);
2265 else if (fld_tszl & 0x4)
2266 imm = (fld_i1 << 1) | fld_tszh;
2267 else if (fld_tszh)
2268 imm = fld_i1;
2269 else
2270 return false;
2271
2272 info->indexed_za.index.imm = imm;
2273 return true;
2274 }
2275
2276 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
2277 array specifies which field to use for Zn. MM is encoded in the
2278 concatenation of imm5 and SVE_tszh, with imm5 being the less
2279 significant part. */
2280 bool
2281 aarch64_ext_sve_index (const aarch64_operand *self,
2282 aarch64_opnd_info *info, aarch64_insn code,
2283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2285 {
2286 int val;
2287
2288 info->reglane.regno = extract_field (self->fields[0], code, 0);
2289 val = extract_all_fields_after (self, 1, code);
2290 if ((val & 31) == 0)
2291 return 0;
2292 while ((val & 1) == 0)
2293 val /= 2;
2294 info->reglane.index = val / 2;
2295 return true;
2296 }
2297
2298 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
2299 bool
2300 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
2301 aarch64_opnd_info *info, const aarch64_insn code,
2302 const aarch64_inst *inst,
2303 aarch64_operand_error *errors)
2304 {
2305 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
2306 return (aarch64_ext_limm (self, info, code, inst, errors)
2307 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
2308 }
2309
2310 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
2311 and where MM occupies the most-significant part. The operand-dependent
2312 value specifies the number of bits in Zn. */
2313 bool
2314 aarch64_ext_sve_quad_index (const aarch64_operand *self,
2315 aarch64_opnd_info *info, aarch64_insn code,
2316 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2317 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2318 {
2319 unsigned int reg_bits = get_operand_specific_data (self);
2320 unsigned int val = extract_all_fields (self, code);
2321 info->reglane.regno = val & ((1 << reg_bits) - 1);
2322 info->reglane.index = val >> reg_bits;
2323 return true;
2324 }
2325
2326 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
2327 to use for Zn. The opcode-dependent value specifies the number
2328 of registers in the list. */
2329 bool
2330 aarch64_ext_sve_reglist (const aarch64_operand *self,
2331 aarch64_opnd_info *info, aarch64_insn code,
2332 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2333 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2334 {
2335 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2336 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
2337 info->reglist.stride = 1;
2338 return true;
2339 }
2340
2341 /* Decode {Zn.<T> , Zm.<T>}. The fields array specifies which field
2342 to use for Zn. The opcode-dependent value specifies the number
2343 of registers in the list. */
2344 bool
2345 aarch64_ext_sve_reglist_zt (const aarch64_operand *self,
2346 aarch64_opnd_info *info, aarch64_insn code,
2347 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2348 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2349 {
2350 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
2351 info->reglist.num_regs = get_operand_specific_data (self);
2352 info->reglist.stride = 1;
2353 return true;
2354 }
2355
2356 /* Decode a strided register list. The first field holds the top bit
2357 (0 or 16) and the second field holds the lower bits. The stride is
2358 16 divided by the list length. */
2359 bool
2360 aarch64_ext_sve_strided_reglist (const aarch64_operand *self,
2361 aarch64_opnd_info *info, aarch64_insn code,
2362 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2363 aarch64_operand_error *errors
2364 ATTRIBUTE_UNUSED)
2365 {
2366 unsigned int upper = extract_field (self->fields[0], code, 0);
2367 unsigned int lower = extract_field (self->fields[1], code, 0);
2368 info->reglist.first_regno = upper * 16 + lower;
2369 info->reglist.num_regs = get_operand_specific_data (self);
2370 info->reglist.stride = 16 / info->reglist.num_regs;
2371 return true;
2372 }
2373
2374 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
2375 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
2376 field. */
2377 bool
2378 aarch64_ext_sve_scale (const aarch64_operand *self,
2379 aarch64_opnd_info *info, aarch64_insn code,
2380 const aarch64_inst *inst, aarch64_operand_error *errors)
2381 {
2382 int val;
2383
2384 if (!aarch64_ext_imm (self, info, code, inst, errors))
2385 return false;
2386 val = extract_field (FLD_SVE_imm4, code, 0);
2387 info->shifter.kind = AARCH64_MOD_MUL;
2388 info->shifter.amount = val + 1;
2389 info->shifter.operator_present = (val != 0);
2390 info->shifter.amount_present = (val != 0);
2391 return true;
2392 }
2393
2394 /* Return the top set bit in VALUE, which is expected to be relatively
2395 small. */
2396 static uint64_t
2397 get_top_bit (uint64_t value)
2398 {
2399 while ((value & -value) != value)
2400 value -= value & -value;
2401 return value;
2402 }
2403
2404 /* Decode an SVE shift-left immediate. */
2405 bool
2406 aarch64_ext_sve_shlimm (const aarch64_operand *self,
2407 aarch64_opnd_info *info, const aarch64_insn code,
2408 const aarch64_inst *inst, aarch64_operand_error *errors)
2409 {
2410 if (!aarch64_ext_imm (self, info, code, inst, errors)
2411 || info->imm.value == 0)
2412 return false;
2413
2414 info->imm.value -= get_top_bit (info->imm.value);
2415 return true;
2416 }
2417
2418 /* Decode an SVE shift-right immediate. */
2419 bool
2420 aarch64_ext_sve_shrimm (const aarch64_operand *self,
2421 aarch64_opnd_info *info, const aarch64_insn code,
2422 const aarch64_inst *inst, aarch64_operand_error *errors)
2423 {
2424 if (!aarch64_ext_imm (self, info, code, inst, errors)
2425 || info->imm.value == 0)
2426 return false;
2427
2428 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
2429 return true;
2430 }
2431
2432 /* Decode X0-X30. Register 31 is unallocated. */
2433 bool
2434 aarch64_ext_x0_to_x30 (const aarch64_operand *self, aarch64_opnd_info *info,
2435 const aarch64_insn code,
2436 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2437 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2438 {
2439 info->reg.regno = extract_field (self->fields[0], code, 0);
2440 return info->reg.regno <= 30;
2441 }
2442
2443 /* Decode an indexed register, with the first field being the register
2444 number and the remaining fields being the index. */
2445 bool
2446 aarch64_ext_simple_index (const aarch64_operand *self, aarch64_opnd_info *info,
2447 const aarch64_insn code,
2448 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2449 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2450 {
2451 int bias = get_operand_specific_data (self);
2452 info->reglane.regno = extract_field (self->fields[0], code, 0) + bias;
2453 info->reglane.index = extract_all_fields_after (self, 1, code);
2454 return true;
2455 }
2456
2457 /* Decode a plain shift-right immediate, when there is only a single
2458 element size. */
2459 bool
2460 aarch64_ext_plain_shrimm (const aarch64_operand *self, aarch64_opnd_info *info,
2461 const aarch64_insn code,
2462 const aarch64_inst *inst ATTRIBUTE_UNUSED,
2463 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
2464 {
2465 unsigned int base = 1 << get_operand_field_width (self, 0);
2466 info->imm.value = base - extract_field (self->fields[0], code, 0);
2467 return true;
2468 }
2469 \f
2470 /* Bitfields that are commonly used to encode certain operands' information
2471 may be partially used as part of the base opcode in some instructions.
2472 For example, the bit 1 of the field 'size' in
2473 FCVTXN <Vb><d>, <Va><n>
2474 is actually part of the base opcode, while only size<0> is available
2475 for encoding the register type. Another example is the AdvSIMD
2476 instruction ORR (register), in which the field 'size' is also used for
2477 the base opcode, leaving only the field 'Q' available to encode the
2478 vector register arrangement specifier '8B' or '16B'.
2479
2480 This function tries to deduce the qualifier from the value of partially
2481 constrained field(s). Given the VALUE of such a field or fields, the
2482 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
2483 operand encoding), the function returns the matching qualifier or
2484 AARCH64_OPND_QLF_NIL if nothing matches.
2485
2486 N.B. CANDIDATES is a group of possible qualifiers that are valid for
2487 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
2488 may end with AARCH64_OPND_QLF_NIL. */
2489
2490 static enum aarch64_opnd_qualifier
2491 get_qualifier_from_partial_encoding (aarch64_insn value,
2492 const enum aarch64_opnd_qualifier* \
2493 candidates,
2494 aarch64_insn mask)
2495 {
2496 int i;
2497 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
2498 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2499 {
2500 aarch64_insn standard_value;
2501 if (candidates[i] == AARCH64_OPND_QLF_NIL)
2502 break;
2503 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
2504 if ((standard_value & mask) == (value & mask))
2505 return candidates[i];
2506 }
2507 return AARCH64_OPND_QLF_NIL;
2508 }
2509
2510 /* Given a list of qualifier sequences, return all possible valid qualifiers
2511 for operand IDX in QUALIFIERS.
2512 Assume QUALIFIERS is an array whose length is large enough. */
2513
2514 static void
2515 get_operand_possible_qualifiers (int idx,
2516 const aarch64_opnd_qualifier_seq_t *list,
2517 enum aarch64_opnd_qualifier *qualifiers)
2518 {
2519 int i;
2520 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2521 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
2522 break;
2523 }
2524
2525 /* Decode the size Q field for e.g. SHADD.
2526 We tag one operand with the qualifer according to the code;
2527 whether the qualifier is valid for this opcode or not, it is the
2528 duty of the semantic checking. */
2529
2530 static int
2531 decode_sizeq (aarch64_inst *inst)
2532 {
2533 int idx;
2534 enum aarch64_opnd_qualifier qualifier;
2535 aarch64_insn code;
2536 aarch64_insn value, mask;
2537 enum aarch64_field_kind fld_sz;
2538 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2539
2540 if (inst->opcode->iclass == asisdlse
2541 || inst->opcode->iclass == asisdlsep
2542 || inst->opcode->iclass == asisdlso
2543 || inst->opcode->iclass == asisdlsop)
2544 fld_sz = FLD_vldst_size;
2545 else
2546 fld_sz = FLD_size;
2547
2548 code = inst->value;
2549 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
2550 /* Obtain the info that which bits of fields Q and size are actually
2551 available for operand encoding. Opcodes like FMAXNM and FMLA have
2552 size[1] unavailable. */
2553 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
2554
2555 /* The index of the operand we are going to tag a qualifier and the qualifer
2556 itself are reasoned from the value of the size and Q fields and the
2557 possible valid qualifier lists. */
2558 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
2559 DEBUG_TRACE ("key idx: %d", idx);
2560
2561 /* For most related instruciton, size:Q are fully available for operand
2562 encoding. */
2563 if (mask == 0x7)
2564 {
2565 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
2566 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2567 return 0;
2568 return 1;
2569 }
2570
2571 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2572 candidates);
2573 #ifdef DEBUG_AARCH64
2574 if (debug_dump)
2575 {
2576 int i;
2577 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
2578 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
2579 DEBUG_TRACE ("qualifier %d: %s", i,
2580 aarch64_get_qualifier_name(candidates[i]));
2581 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
2582 }
2583 #endif /* DEBUG_AARCH64 */
2584
2585 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
2586
2587 if (qualifier == AARCH64_OPND_QLF_NIL)
2588 return 0;
2589
2590 inst->operands[idx].qualifier = qualifier;
2591 return 1;
2592 }
2593
2594 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
2595 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2596
2597 static int
2598 decode_asimd_fcvt (aarch64_inst *inst)
2599 {
2600 aarch64_field field = {0, 0};
2601 aarch64_insn value;
2602 enum aarch64_opnd_qualifier qualifier;
2603
2604 gen_sub_field (FLD_size, 0, 1, &field);
2605 value = extract_field_2 (&field, inst->value, 0);
2606 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2607 : AARCH64_OPND_QLF_V_2D;
2608 switch (inst->opcode->op)
2609 {
2610 case OP_FCVTN:
2611 case OP_FCVTN2:
2612 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2613 inst->operands[1].qualifier = qualifier;
2614 break;
2615 case OP_FCVTL:
2616 case OP_FCVTL2:
2617 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2618 inst->operands[0].qualifier = qualifier;
2619 break;
2620 default:
2621 return 0;
2622 }
2623
2624 return 1;
2625 }
2626
2627 /* Decode size[0], i.e. bit 22, for
2628 e.g. FCVTXN <Vb><d>, <Va><n>. */
2629
2630 static int
2631 decode_asisd_fcvtxn (aarch64_inst *inst)
2632 {
2633 aarch64_field field = {0, 0};
2634 gen_sub_field (FLD_size, 0, 1, &field);
2635 if (!extract_field_2 (&field, inst->value, 0))
2636 return 0;
2637 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2638 return 1;
2639 }
2640
2641 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2642 static int
2643 decode_fcvt (aarch64_inst *inst)
2644 {
2645 enum aarch64_opnd_qualifier qualifier;
2646 aarch64_insn value;
2647 const aarch64_field field = {15, 2};
2648
2649 /* opc dstsize */
2650 value = extract_field_2 (&field, inst->value, 0);
2651 switch (value)
2652 {
2653 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2654 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2655 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2656 default: return 0;
2657 }
2658 inst->operands[0].qualifier = qualifier;
2659
2660 return 1;
2661 }
2662
2663 /* Do miscellaneous decodings that are not common enough to be driven by
2664 flags. */
2665
2666 static int
2667 do_misc_decoding (aarch64_inst *inst)
2668 {
2669 unsigned int value;
2670 switch (inst->opcode->op)
2671 {
2672 case OP_FCVT:
2673 return decode_fcvt (inst);
2674
2675 case OP_FCVTN:
2676 case OP_FCVTN2:
2677 case OP_FCVTL:
2678 case OP_FCVTL2:
2679 return decode_asimd_fcvt (inst);
2680
2681 case OP_FCVTXN_S:
2682 return decode_asisd_fcvtxn (inst);
2683
2684 case OP_MOV_P_P:
2685 case OP_MOVS_P_P:
2686 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2687 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2688 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2689
2690 case OP_MOV_Z_P_Z:
2691 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2692 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2693
2694 case OP_MOV_Z_V:
2695 /* Index must be zero. */
2696 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2697 return value > 0 && value <= 16 && value == (value & -value);
2698
2699 case OP_MOV_Z_Z:
2700 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2701 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2702
2703 case OP_MOV_Z_Zi:
2704 /* Index must be nonzero. */
2705 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2706 return value > 0 && value != (value & -value);
2707
2708 case OP_MOVM_P_P_P:
2709 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2710 == extract_field (FLD_SVE_Pm, inst->value, 0));
2711
2712 case OP_MOVZS_P_P_P:
2713 case OP_MOVZ_P_P_P:
2714 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2715 == extract_field (FLD_SVE_Pm, inst->value, 0));
2716
2717 case OP_NOTS_P_P_P_Z:
2718 case OP_NOT_P_P_P_Z:
2719 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2720 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2721
2722 default:
2723 return 0;
2724 }
2725 }
2726
2727 /* Opcodes that have fields shared by multiple operands are usually flagged
2728 with flags. In this function, we detect such flags, decode the related
2729 field(s) and store the information in one of the related operands. The
2730 'one' operand is not any operand but one of the operands that can
2731 accommadate all the information that has been decoded. */
2732
2733 static int
2734 do_special_decoding (aarch64_inst *inst)
2735 {
2736 int idx;
2737 aarch64_insn value;
2738 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2739 if (inst->opcode->flags & F_COND)
2740 {
2741 value = extract_field (FLD_cond2, inst->value, 0);
2742 inst->cond = get_cond_from_value (value);
2743 }
2744 /* 'sf' field. */
2745 if (inst->opcode->flags & F_SF)
2746 {
2747 idx = select_operand_for_sf_field_coding (inst->opcode);
2748 value = extract_field (FLD_sf, inst->value, 0);
2749 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2750 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2751 return 0;
2752 if ((inst->opcode->flags & F_N)
2753 && extract_field (FLD_N, inst->value, 0) != value)
2754 return 0;
2755 }
2756 /* 'sf' field. */
2757 if (inst->opcode->flags & F_LSE_SZ)
2758 {
2759 idx = select_operand_for_sf_field_coding (inst->opcode);
2760 value = extract_field (FLD_lse_sz, inst->value, 0);
2761 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2762 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2763 return 0;
2764 }
2765 /* rcpc3 'size' field. */
2766 if (inst->opcode->flags & F_RCPC3_SIZE)
2767 {
2768 value = extract_field (FLD_rcpc3_size, inst->value, 0);
2769 for (int i = 0;
2770 aarch64_operands[inst->operands[i].type].op_class != AARCH64_OPND_CLASS_ADDRESS;
2771 i++)
2772 {
2773 if (aarch64_operands[inst->operands[i].type].op_class
2774 == AARCH64_OPND_CLASS_INT_REG)
2775 {
2776 inst->operands[i].qualifier = get_greg_qualifier_from_value (value & 1);
2777 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2778 return 0;
2779 }
2780 else if (aarch64_operands[inst->operands[i].type].op_class
2781 == AARCH64_OPND_CLASS_FP_REG)
2782 {
2783 value += (extract_field (FLD_opc1, inst->value, 0) << 2);
2784 inst->operands[i].qualifier = get_sreg_qualifier_from_value (value);
2785 if (inst->operands[i].qualifier == AARCH64_OPND_QLF_ERR)
2786 return 0;
2787 }
2788 }
2789 }
2790
2791 /* size:Q fields. */
2792 if (inst->opcode->flags & F_SIZEQ)
2793 return decode_sizeq (inst);
2794
2795 if (inst->opcode->flags & F_FPTYPE)
2796 {
2797 idx = select_operand_for_fptype_field_coding (inst->opcode);
2798 value = extract_field (FLD_type, inst->value, 0);
2799 switch (value)
2800 {
2801 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2802 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2803 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2804 default: return 0;
2805 }
2806 }
2807
2808 if (inst->opcode->flags & F_SSIZE)
2809 {
2810 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2811 of the base opcode. */
2812 aarch64_insn mask;
2813 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2814 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2815 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2816 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2817 /* For most related instruciton, the 'size' field is fully available for
2818 operand encoding. */
2819 if (mask == 0x3)
2820 {
2821 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2822 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2823 return 0;
2824 }
2825 else
2826 {
2827 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2828 candidates);
2829 inst->operands[idx].qualifier
2830 = get_qualifier_from_partial_encoding (value, candidates, mask);
2831 }
2832 }
2833
2834 if (inst->opcode->flags & F_T)
2835 {
2836 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2837 int num = 0;
2838 unsigned val, Q;
2839 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2840 == AARCH64_OPND_CLASS_SIMD_REG);
2841 /* imm5<3:0> q <t>
2842 0000 x reserved
2843 xxx1 0 8b
2844 xxx1 1 16b
2845 xx10 0 4h
2846 xx10 1 8h
2847 x100 0 2s
2848 x100 1 4s
2849 1000 0 reserved
2850 1000 1 2d */
2851 val = extract_field (FLD_imm5, inst->value, 0);
2852 while ((val & 0x1) == 0 && ++num <= 3)
2853 val >>= 1;
2854 if (num > 3)
2855 return 0;
2856 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2857 inst->operands[0].qualifier =
2858 get_vreg_qualifier_from_value ((num << 1) | Q);
2859 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_ERR)
2860 return 0;
2861
2862 }
2863
2864 if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2865 {
2866 unsigned size;
2867 size = (unsigned) extract_field (FLD_size, inst->value,
2868 inst->opcode->mask);
2869 inst->operands[0].qualifier
2870 = get_vreg_qualifier_from_value (1 + (size << 1));
2871 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_ERR)
2872 return 0;
2873 inst->operands[2].qualifier = get_sreg_qualifier_from_value (size);
2874 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_ERR)
2875 return 0;
2876 }
2877
2878 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2879 {
2880 /* Use Rt to encode in the case of e.g.
2881 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2882 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2883 if (idx == -1)
2884 {
2885 /* Otherwise use the result operand, which has to be a integer
2886 register. */
2887 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2888 == AARCH64_OPND_CLASS_INT_REG);
2889 idx = 0;
2890 }
2891 assert (idx == 0 || idx == 1);
2892 value = extract_field (FLD_Q, inst->value, 0);
2893 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2894 if (inst->operands[idx].qualifier == AARCH64_OPND_QLF_ERR)
2895 return 0;
2896 }
2897
2898 if (inst->opcode->flags & F_LDS_SIZE)
2899 {
2900 aarch64_field field = {0, 0};
2901 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2902 == AARCH64_OPND_CLASS_INT_REG);
2903 gen_sub_field (FLD_opc, 0, 1, &field);
2904 value = extract_field_2 (&field, inst->value, 0);
2905 inst->operands[0].qualifier
2906 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2907 }
2908
2909 /* Miscellaneous decoding; done as the last step. */
2910 if (inst->opcode->flags & F_MISC)
2911 return do_misc_decoding (inst);
2912
2913 return 1;
2914 }
2915
2916 /* Converters converting a real opcode instruction to its alias form. */
2917
2918 /* ROR <Wd>, <Ws>, #<shift>
2919 is equivalent to:
2920 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2921 static int
2922 convert_extr_to_ror (aarch64_inst *inst)
2923 {
2924 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2925 {
2926 copy_operand_info (inst, 2, 3);
2927 inst->operands[3].type = AARCH64_OPND_NIL;
2928 return 1;
2929 }
2930 return 0;
2931 }
2932
2933 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2934 is equivalent to:
2935 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2936 static int
2937 convert_shll_to_xtl (aarch64_inst *inst)
2938 {
2939 if (inst->operands[2].imm.value == 0)
2940 {
2941 inst->operands[2].type = AARCH64_OPND_NIL;
2942 return 1;
2943 }
2944 return 0;
2945 }
2946
2947 /* Convert
2948 UBFM <Xd>, <Xn>, #<shift>, #63.
2949 to
2950 LSR <Xd>, <Xn>, #<shift>. */
2951 static int
2952 convert_bfm_to_sr (aarch64_inst *inst)
2953 {
2954 int64_t imms, val;
2955
2956 imms = inst->operands[3].imm.value;
2957 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2958 if (imms == val)
2959 {
2960 inst->operands[3].type = AARCH64_OPND_NIL;
2961 return 1;
2962 }
2963
2964 return 0;
2965 }
2966
2967 /* Convert MOV to ORR. */
2968 static int
2969 convert_orr_to_mov (aarch64_inst *inst)
2970 {
2971 /* MOV <Vd>.<T>, <Vn>.<T>
2972 is equivalent to:
2973 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2974 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2975 {
2976 inst->operands[2].type = AARCH64_OPND_NIL;
2977 return 1;
2978 }
2979 return 0;
2980 }
2981
2982 /* When <imms> >= <immr>, the instruction written:
2983 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2984 is equivalent to:
2985 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2986
2987 static int
2988 convert_bfm_to_bfx (aarch64_inst *inst)
2989 {
2990 int64_t immr, imms;
2991
2992 immr = inst->operands[2].imm.value;
2993 imms = inst->operands[3].imm.value;
2994 if (imms >= immr)
2995 {
2996 int64_t lsb = immr;
2997 inst->operands[2].imm.value = lsb;
2998 inst->operands[3].imm.value = imms + 1 - lsb;
2999 /* The two opcodes have different qualifiers for
3000 the immediate operands; reset to help the checking. */
3001 reset_operand_qualifier (inst, 2);
3002 reset_operand_qualifier (inst, 3);
3003 return 1;
3004 }
3005
3006 return 0;
3007 }
3008
3009 /* When <imms> < <immr>, the instruction written:
3010 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
3011 is equivalent to:
3012 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
3013
3014 static int
3015 convert_bfm_to_bfi (aarch64_inst *inst)
3016 {
3017 int64_t immr, imms, val;
3018
3019 immr = inst->operands[2].imm.value;
3020 imms = inst->operands[3].imm.value;
3021 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
3022 if (imms < immr)
3023 {
3024 inst->operands[2].imm.value = (val - immr) & (val - 1);
3025 inst->operands[3].imm.value = imms + 1;
3026 /* The two opcodes have different qualifiers for
3027 the immediate operands; reset to help the checking. */
3028 reset_operand_qualifier (inst, 2);
3029 reset_operand_qualifier (inst, 3);
3030 return 1;
3031 }
3032
3033 return 0;
3034 }
3035
3036 /* The instruction written:
3037 BFC <Xd>, #<lsb>, #<width>
3038 is equivalent to:
3039 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
3040
3041 static int
3042 convert_bfm_to_bfc (aarch64_inst *inst)
3043 {
3044 int64_t immr, imms, val;
3045
3046 /* Should have been assured by the base opcode value. */
3047 assert (inst->operands[1].reg.regno == 0x1f);
3048
3049 immr = inst->operands[2].imm.value;
3050 imms = inst->operands[3].imm.value;
3051 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
3052 if (imms < immr)
3053 {
3054 /* Drop XZR from the second operand. */
3055 copy_operand_info (inst, 1, 2);
3056 copy_operand_info (inst, 2, 3);
3057 inst->operands[3].type = AARCH64_OPND_NIL;
3058
3059 /* Recalculate the immediates. */
3060 inst->operands[1].imm.value = (val - immr) & (val - 1);
3061 inst->operands[2].imm.value = imms + 1;
3062
3063 /* The two opcodes have different qualifiers for the operands; reset to
3064 help the checking. */
3065 reset_operand_qualifier (inst, 1);
3066 reset_operand_qualifier (inst, 2);
3067 reset_operand_qualifier (inst, 3);
3068
3069 return 1;
3070 }
3071
3072 return 0;
3073 }
3074
3075 /* The instruction written:
3076 LSL <Xd>, <Xn>, #<shift>
3077 is equivalent to:
3078 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
3079
3080 static int
3081 convert_ubfm_to_lsl (aarch64_inst *inst)
3082 {
3083 int64_t immr = inst->operands[2].imm.value;
3084 int64_t imms = inst->operands[3].imm.value;
3085 int64_t val
3086 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
3087
3088 if ((immr == 0 && imms == val) || immr == imms + 1)
3089 {
3090 inst->operands[3].type = AARCH64_OPND_NIL;
3091 inst->operands[2].imm.value = val - imms;
3092 return 1;
3093 }
3094
3095 return 0;
3096 }
3097
3098 /* CINC <Wd>, <Wn>, <cond>
3099 is equivalent to:
3100 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
3101 where <cond> is not AL or NV. */
3102
3103 static int
3104 convert_from_csel (aarch64_inst *inst)
3105 {
3106 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
3107 && (inst->operands[3].cond->value & 0xe) != 0xe)
3108 {
3109 copy_operand_info (inst, 2, 3);
3110 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
3111 inst->operands[3].type = AARCH64_OPND_NIL;
3112 return 1;
3113 }
3114 return 0;
3115 }
3116
3117 /* CSET <Wd>, <cond>
3118 is equivalent to:
3119 CSINC <Wd>, WZR, WZR, invert(<cond>)
3120 where <cond> is not AL or NV. */
3121
3122 static int
3123 convert_csinc_to_cset (aarch64_inst *inst)
3124 {
3125 if (inst->operands[1].reg.regno == 0x1f
3126 && inst->operands[2].reg.regno == 0x1f
3127 && (inst->operands[3].cond->value & 0xe) != 0xe)
3128 {
3129 copy_operand_info (inst, 1, 3);
3130 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
3131 inst->operands[3].type = AARCH64_OPND_NIL;
3132 inst->operands[2].type = AARCH64_OPND_NIL;
3133 return 1;
3134 }
3135 return 0;
3136 }
3137
3138 /* MOV <Wd>, #<imm>
3139 is equivalent to:
3140 MOVZ <Wd>, #<imm16_5>, LSL #<shift>.
3141
3142 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3143 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3144 or where a MOVN has an immediate that could be encoded by MOVZ, or where
3145 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3146 machine-instruction mnemonic must be used. */
3147
3148 static int
3149 convert_movewide_to_mov (aarch64_inst *inst)
3150 {
3151 uint64_t value = inst->operands[1].imm.value;
3152 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
3153 if (value == 0 && inst->operands[1].shifter.amount != 0)
3154 return 0;
3155 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3156 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
3157 value <<= inst->operands[1].shifter.amount;
3158 /* As an alias convertor, it has to be clear that the INST->OPCODE
3159 is the opcode of the real instruction. */
3160 if (inst->opcode->op == OP_MOVN)
3161 {
3162 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3163 value = ~value;
3164 /* A MOVN has an immediate that could be encoded by MOVZ. */
3165 if (aarch64_wide_constant_p (value, is32, NULL))
3166 return 0;
3167 }
3168 inst->operands[1].imm.value = value;
3169 inst->operands[1].shifter.amount = 0;
3170 return 1;
3171 }
3172
3173 /* MOV <Wd>, #<imm>
3174 is equivalent to:
3175 ORR <Wd>, WZR, #<imm>.
3176
3177 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
3178 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
3179 or where a MOVN has an immediate that could be encoded by MOVZ, or where
3180 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
3181 machine-instruction mnemonic must be used. */
3182
3183 static int
3184 convert_movebitmask_to_mov (aarch64_inst *inst)
3185 {
3186 int is32;
3187 uint64_t value;
3188
3189 /* Should have been assured by the base opcode value. */
3190 assert (inst->operands[1].reg.regno == 0x1f);
3191 copy_operand_info (inst, 1, 2);
3192 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
3193 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
3194 value = inst->operands[1].imm.value;
3195 /* ORR has an immediate that could be generated by a MOVZ or MOVN
3196 instruction. */
3197 if (inst->operands[0].reg.regno != 0x1f
3198 && (aarch64_wide_constant_p (value, is32, NULL)
3199 || aarch64_wide_constant_p (~value, is32, NULL)))
3200 return 0;
3201
3202 inst->operands[2].type = AARCH64_OPND_NIL;
3203 return 1;
3204 }
3205
3206 /* Some alias opcodes are disassembled by being converted from their real-form.
3207 N.B. INST->OPCODE is the real opcode rather than the alias. */
3208
3209 static int
3210 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
3211 {
3212 switch (alias->op)
3213 {
3214 case OP_ASR_IMM:
3215 case OP_LSR_IMM:
3216 return convert_bfm_to_sr (inst);
3217 case OP_LSL_IMM:
3218 return convert_ubfm_to_lsl (inst);
3219 case OP_CINC:
3220 case OP_CINV:
3221 case OP_CNEG:
3222 return convert_from_csel (inst);
3223 case OP_CSET:
3224 case OP_CSETM:
3225 return convert_csinc_to_cset (inst);
3226 case OP_UBFX:
3227 case OP_BFXIL:
3228 case OP_SBFX:
3229 return convert_bfm_to_bfx (inst);
3230 case OP_SBFIZ:
3231 case OP_BFI:
3232 case OP_UBFIZ:
3233 return convert_bfm_to_bfi (inst);
3234 case OP_BFC:
3235 return convert_bfm_to_bfc (inst);
3236 case OP_MOV_V:
3237 return convert_orr_to_mov (inst);
3238 case OP_MOV_IMM_WIDE:
3239 case OP_MOV_IMM_WIDEN:
3240 return convert_movewide_to_mov (inst);
3241 case OP_MOV_IMM_LOG:
3242 return convert_movebitmask_to_mov (inst);
3243 case OP_ROR_IMM:
3244 return convert_extr_to_ror (inst);
3245 case OP_SXTL:
3246 case OP_SXTL2:
3247 case OP_UXTL:
3248 case OP_UXTL2:
3249 return convert_shll_to_xtl (inst);
3250 default:
3251 return 0;
3252 }
3253 }
3254
3255 static bool
3256 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
3257 aarch64_inst *, int, aarch64_operand_error *errors);
3258
3259 /* Given the instruction information in *INST, check if the instruction has
3260 any alias form that can be used to represent *INST. If the answer is yes,
3261 update *INST to be in the form of the determined alias. */
3262
3263 /* In the opcode description table, the following flags are used in opcode
3264 entries to help establish the relations between the real and alias opcodes:
3265
3266 F_ALIAS: opcode is an alias
3267 F_HAS_ALIAS: opcode has alias(es)
3268 F_P1
3269 F_P2
3270 F_P3: Disassembly preference priority 1-3 (the larger the
3271 higher). If nothing is specified, it is the priority
3272 0 by default, i.e. the lowest priority.
3273
3274 Although the relation between the machine and the alias instructions are not
3275 explicitly described, it can be easily determined from the base opcode
3276 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
3277 description entries:
3278
3279 The mask of an alias opcode must be equal to or a super-set (i.e. more
3280 constrained) of that of the aliased opcode; so is the base opcode value.
3281
3282 if (opcode_has_alias (real) && alias_opcode_p (opcode)
3283 && (opcode->mask & real->mask) == real->mask
3284 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
3285 then OPCODE is an alias of, and only of, the REAL instruction
3286
3287 The alias relationship is forced flat-structured to keep related algorithm
3288 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
3289
3290 During the disassembling, the decoding decision tree (in
3291 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
3292 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
3293 not specified), the disassembler will check whether there is any alias
3294 instruction exists for this real instruction. If there is, the disassembler
3295 will try to disassemble the 32-bit binary again using the alias's rule, or
3296 try to convert the IR to the form of the alias. In the case of the multiple
3297 aliases, the aliases are tried one by one from the highest priority
3298 (currently the flag F_P3) to the lowest priority (no priority flag), and the
3299 first succeeds first adopted.
3300
3301 You may ask why there is a need for the conversion of IR from one form to
3302 another in handling certain aliases. This is because on one hand it avoids
3303 adding more operand code to handle unusual encoding/decoding; on other
3304 hand, during the disassembling, the conversion is an effective approach to
3305 check the condition of an alias (as an alias may be adopted only if certain
3306 conditions are met).
3307
3308 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
3309 aarch64_opcode_table and generated aarch64_find_alias_opcode and
3310 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
3311
3312 static void
3313 determine_disassembling_preference (struct aarch64_inst *inst,
3314 aarch64_operand_error *errors)
3315 {
3316 const aarch64_opcode *opcode;
3317 const aarch64_opcode *alias;
3318
3319 opcode = inst->opcode;
3320
3321 /* This opcode does not have an alias, so use itself. */
3322 if (!opcode_has_alias (opcode))
3323 return;
3324
3325 alias = aarch64_find_alias_opcode (opcode);
3326 assert (alias);
3327
3328 #ifdef DEBUG_AARCH64
3329 if (debug_dump)
3330 {
3331 const aarch64_opcode *tmp = alias;
3332 printf ("#### LIST orderd: ");
3333 while (tmp)
3334 {
3335 printf ("%s, ", tmp->name);
3336 tmp = aarch64_find_next_alias_opcode (tmp);
3337 }
3338 printf ("\n");
3339 }
3340 #endif /* DEBUG_AARCH64 */
3341
3342 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
3343 {
3344 DEBUG_TRACE ("try %s", alias->name);
3345 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
3346
3347 /* An alias can be a pseudo opcode which will never be used in the
3348 disassembly, e.g. BIC logical immediate is such a pseudo opcode
3349 aliasing AND. */
3350 if (pseudo_opcode_p (alias))
3351 {
3352 DEBUG_TRACE ("skip pseudo %s", alias->name);
3353 continue;
3354 }
3355
3356 if ((inst->value & alias->mask) != alias->opcode)
3357 {
3358 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
3359 continue;
3360 }
3361
3362 if (!AARCH64_CPU_HAS_ALL_FEATURES (arch_variant, *alias->avariant))
3363 {
3364 DEBUG_TRACE ("skip %s: we're missing features", alias->name);
3365 continue;
3366 }
3367
3368 /* No need to do any complicated transformation on operands, if the alias
3369 opcode does not have any operand. */
3370 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
3371 {
3372 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
3373 aarch64_replace_opcode (inst, alias);
3374 return;
3375 }
3376 if (alias->flags & F_CONV)
3377 {
3378 aarch64_inst copy;
3379 memcpy (&copy, inst, sizeof (aarch64_inst));
3380 /* ALIAS is the preference as long as the instruction can be
3381 successfully converted to the form of ALIAS. */
3382 if (convert_to_alias (&copy, alias) == 1)
3383 {
3384 aarch64_replace_opcode (&copy, alias);
3385 if (aarch64_match_operands_constraint (&copy, NULL) != 1)
3386 {
3387 DEBUG_TRACE ("FAILED with alias %s ", alias->name);
3388 }
3389 else
3390 {
3391 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
3392 memcpy (inst, &copy, sizeof (aarch64_inst));
3393 }
3394 return;
3395 }
3396 }
3397 else
3398 {
3399 /* Directly decode the alias opcode. */
3400 aarch64_inst temp;
3401 memset (&temp, '\0', sizeof (aarch64_inst));
3402 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
3403 {
3404 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
3405 memcpy (inst, &temp, sizeof (aarch64_inst));
3406 return;
3407 }
3408 }
3409 }
3410 }
3411
3412 /* Some instructions (including all SVE ones) use the instruction class
3413 to describe how a qualifiers_list index is represented in the instruction
3414 encoding. If INST is such an instruction, decode the appropriate fields
3415 and fill in the operand qualifiers accordingly. Return true if no
3416 problems are found. */
3417
3418 static bool
3419 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
3420 {
3421 int i, variant;
3422
3423 variant = 0;
3424 switch (inst->opcode->iclass)
3425 {
3426 case sme_mov:
3427 variant = extract_fields (inst->value, 0, 2, FLD_SME_Q, FLD_SME_size_22);
3428 if (variant >= 4 && variant < 7)
3429 return false;
3430 if (variant == 7)
3431 variant = 4;
3432 break;
3433
3434 case sme_psel:
3435 i = extract_fields (inst->value, 0, 2, FLD_SME_tszh, FLD_SME_tszl);
3436 if (i == 0)
3437 return false;
3438 while ((i & 1) == 0)
3439 {
3440 i >>= 1;
3441 variant += 1;
3442 }
3443 break;
3444
3445 case sme_shift:
3446 i = extract_field (FLD_SVE_tszh, inst->value, 0);
3447 goto sve_shift;
3448
3449 case sme_size_12_bh:
3450 variant = extract_field (FLD_S, inst->value, 0);
3451 if (variant > 1)
3452 return false;
3453 break;
3454
3455 case sme_size_12_bhs:
3456 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3457 if (variant >= 3)
3458 return false;
3459 break;
3460
3461 case sme_size_12_hs:
3462 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3463 if (variant != 1 && variant != 2)
3464 return false;
3465 variant -= 1;
3466 break;
3467
3468 case sme_size_12_b:
3469 variant = extract_field (FLD_SME_size_12, inst->value, 0);
3470 if (variant != 0)
3471 return false;
3472 break;
3473
3474 case sme_size_22:
3475 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3476 break;
3477
3478 case sme_size_22_hsd:
3479 variant = extract_field (FLD_SME_size_22, inst->value, 0);
3480 if (variant < 1)
3481 return false;
3482 variant -= 1;
3483 break;
3484
3485 case sme_sz_23:
3486 variant = extract_field (FLD_SME_sz_23, inst->value, 0);
3487 break;
3488
3489 case sve_cpy:
3490 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
3491 break;
3492
3493 case sve_index:
3494 i = extract_field (FLD_imm5, inst->value, 0);
3495
3496 if ((i & 31) == 0)
3497 return false;
3498 while ((i & 1) == 0)
3499 {
3500 i >>= 1;
3501 variant += 1;
3502 }
3503 break;
3504
3505 case sve_limm:
3506 /* Pick the smallest applicable element size. */
3507 if ((inst->value & 0x20600) == 0x600)
3508 variant = 0;
3509 else if ((inst->value & 0x20400) == 0x400)
3510 variant = 1;
3511 else if ((inst->value & 0x20000) == 0)
3512 variant = 2;
3513 else
3514 variant = 3;
3515 break;
3516
3517 case sme2_mov:
3518 /* .D is preferred over the other sizes in disassembly. */
3519 variant = 3;
3520 break;
3521
3522 case sme2_movaz:
3523 case sme_misc:
3524 case sve_misc:
3525 /* These instructions have only a single variant. */
3526 break;
3527
3528 case sve_movprfx:
3529 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
3530 break;
3531
3532 case sve_pred_zm:
3533 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
3534 break;
3535
3536 case sve_shift_pred:
3537 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
3538 sve_shift:
3539 if (i == 0)
3540 return false;
3541 while (i != 1)
3542 {
3543 i >>= 1;
3544 variant += 1;
3545 }
3546 break;
3547
3548 case sve_shift_unpred:
3549 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3550 goto sve_shift;
3551
3552 case sve_size_bhs:
3553 variant = extract_field (FLD_size, inst->value, 0);
3554 if (variant >= 3)
3555 return false;
3556 break;
3557
3558 case sve_size_bhsd:
3559 variant = extract_field (FLD_size, inst->value, 0);
3560 break;
3561
3562 case sve_size_hsd:
3563 i = extract_field (FLD_size, inst->value, 0);
3564 if (i < 1)
3565 return false;
3566 variant = i - 1;
3567 break;
3568
3569 case sme_fp_sd:
3570 case sme_int_sd:
3571 case sve_size_bh:
3572 case sve_size_sd:
3573 variant = extract_field (FLD_SVE_sz, inst->value, 0);
3574 break;
3575
3576 case sve_size_sd2:
3577 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
3578 break;
3579
3580 case sve_size_hsd2:
3581 i = extract_field (FLD_SVE_size, inst->value, 0);
3582 if (i < 1)
3583 return false;
3584 variant = i - 1;
3585 break;
3586
3587 case sve_size_13:
3588 /* Ignore low bit of this field since that is set in the opcode for
3589 instructions of this iclass. */
3590 i = (extract_field (FLD_size, inst->value, 0) & 2);
3591 variant = (i >> 1);
3592 break;
3593
3594 case sve_shift_tsz_bhsd:
3595 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
3596 if (i == 0)
3597 return false;
3598 while (i != 1)
3599 {
3600 i >>= 1;
3601 variant += 1;
3602 }
3603 break;
3604
3605 case sve_size_tsz_bhs:
3606 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3607 if (i == 0)
3608 return false;
3609 while (i != 1)
3610 {
3611 if (i & 1)
3612 return false;
3613 i >>= 1;
3614 variant += 1;
3615 }
3616 break;
3617
3618 case sve_shift_tsz_hsd:
3619 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
3620 if (i == 0)
3621 return false;
3622 while (i != 1)
3623 {
3624 i >>= 1;
3625 variant += 1;
3626 }
3627 break;
3628
3629 default:
3630 /* No mapping between instruction class and qualifiers. */
3631 return true;
3632 }
3633
3634 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3635 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
3636 return true;
3637 }
3638 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
3639 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
3640 return 1.
3641
3642 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
3643 determined and used to disassemble CODE; this is done just before the
3644 return. */
3645
3646 static bool
3647 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
3648 aarch64_inst *inst, int noaliases_p,
3649 aarch64_operand_error *errors)
3650 {
3651 int i;
3652
3653 DEBUG_TRACE ("enter with %s", opcode->name);
3654
3655 assert (opcode && inst);
3656
3657 /* Clear inst. */
3658 memset (inst, '\0', sizeof (aarch64_inst));
3659
3660 /* Check the base opcode. */
3661 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
3662 {
3663 DEBUG_TRACE ("base opcode match FAIL");
3664 goto decode_fail;
3665 }
3666
3667 inst->opcode = opcode;
3668 inst->value = code;
3669
3670 /* Assign operand codes and indexes. */
3671 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3672 {
3673 if (opcode->operands[i] == AARCH64_OPND_NIL)
3674 break;
3675 inst->operands[i].type = opcode->operands[i];
3676 inst->operands[i].idx = i;
3677 }
3678
3679 /* Call the opcode decoder indicated by flags. */
3680 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
3681 {
3682 DEBUG_TRACE ("opcode flag-based decoder FAIL");
3683 goto decode_fail;
3684 }
3685
3686 /* Possibly use the instruction class to determine the correct
3687 qualifier. */
3688 if (!aarch64_decode_variant_using_iclass (inst))
3689 {
3690 DEBUG_TRACE ("iclass-based decoder FAIL");
3691 goto decode_fail;
3692 }
3693
3694 /* Call operand decoders. */
3695 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3696 {
3697 const aarch64_operand *opnd;
3698 enum aarch64_opnd type;
3699
3700 type = opcode->operands[i];
3701 if (type == AARCH64_OPND_NIL)
3702 break;
3703 opnd = &aarch64_operands[type];
3704 if (operand_has_extractor (opnd)
3705 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
3706 errors)))
3707 {
3708 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
3709 goto decode_fail;
3710 }
3711 }
3712
3713 /* If the opcode has a verifier, then check it now. */
3714 if (opcode->verifier
3715 && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
3716 {
3717 DEBUG_TRACE ("operand verifier FAIL");
3718 goto decode_fail;
3719 }
3720
3721 /* Match the qualifiers. */
3722 if (aarch64_match_operands_constraint (inst, NULL) == 1)
3723 {
3724 /* Arriving here, the CODE has been determined as a valid instruction
3725 of OPCODE and *INST has been filled with information of this OPCODE
3726 instruction. Before the return, check if the instruction has any
3727 alias and should be disassembled in the form of its alias instead.
3728 If the answer is yes, *INST will be updated. */
3729 if (!noaliases_p)
3730 determine_disassembling_preference (inst, errors);
3731 DEBUG_TRACE ("SUCCESS");
3732 return true;
3733 }
3734 else
3735 {
3736 DEBUG_TRACE ("constraint matching FAIL");
3737 }
3738
3739 decode_fail:
3740 return false;
3741 }
3742 \f
3743 /* This does some user-friendly fix-up to *INST. It is currently focus on
3744 the adjustment of qualifiers to help the printed instruction
3745 recognized/understood more easily. */
3746
3747 static void
3748 user_friendly_fixup (aarch64_inst *inst)
3749 {
3750 switch (inst->opcode->iclass)
3751 {
3752 case testbranch:
3753 /* TBNZ Xn|Wn, #uimm6, label
3754 Test and Branch Not Zero: conditionally jumps to label if bit number
3755 uimm6 in register Xn is not zero. The bit number implies the width of
3756 the register, which may be written and should be disassembled as Wn if
3757 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3758 */
3759 if (inst->operands[1].imm.value < 32)
3760 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3761 break;
3762 default: break;
3763 }
3764 }
3765
3766 /* Decode INSN and fill in *INST the instruction information. An alias
3767 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3768 success. */
3769
3770 enum err_type
3771 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3772 bool noaliases_p,
3773 aarch64_operand_error *errors)
3774 {
3775 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3776
3777 #ifdef DEBUG_AARCH64
3778 if (debug_dump)
3779 {
3780 const aarch64_opcode *tmp = opcode;
3781 printf ("\n");
3782 DEBUG_TRACE ("opcode lookup:");
3783 while (tmp != NULL)
3784 {
3785 aarch64_verbose (" %s", tmp->name);
3786 tmp = aarch64_find_next_opcode (tmp);
3787 }
3788 }
3789 #endif /* DEBUG_AARCH64 */
3790
3791 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3792 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3793 opcode field and value, apart from the difference that one of them has an
3794 extra field as part of the opcode, but such a field is used for operand
3795 encoding in other opcode(s) ('immh' in the case of the example). */
3796 while (opcode != NULL)
3797 {
3798 /* But only one opcode can be decoded successfully for, as the
3799 decoding routine will check the constraint carefully. */
3800 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3801 return ERR_OK;
3802 opcode = aarch64_find_next_opcode (opcode);
3803 }
3804
3805 return ERR_UND;
3806 }
3807
3808 /* Return a short string to indicate a switch to STYLE. These strings
3809 will be embedded into the disassembled operand text (as produced by
3810 aarch64_print_operand), and then spotted in the print_operands function
3811 so that the disassembler output can be split by style. */
3812
3813 static const char *
3814 get_style_text (enum disassembler_style style)
3815 {
3816 static bool init = false;
3817 static char formats[16][4];
3818 unsigned num;
3819
3820 /* First time through we build a string for every possible format. This
3821 code relies on there being no more than 16 different styles (there's
3822 an assert below for this). */
3823 if (!init)
3824 {
3825 int i;
3826
3827 for (i = 0; i <= 0xf; ++i)
3828 {
3829 int res ATTRIBUTE_UNUSED
3830 = snprintf (&formats[i][0], sizeof (formats[i]), "%c%x%c",
3831 STYLE_MARKER_CHAR, i, STYLE_MARKER_CHAR);
3832 assert (res == 3);
3833 }
3834
3835 init = true;
3836 }
3837
3838 /* Return the string that marks switching to STYLE. */
3839 num = (unsigned) style;
3840 assert (style <= 0xf);
3841 return formats[num];
3842 }
3843
3844 /* Callback used by aarch64_print_operand to apply STYLE to the
3845 disassembler output created from FMT and ARGS. The STYLER object holds
3846 any required state. Must return a pointer to a string (created from FMT
3847 and ARGS) that will continue to be valid until the complete disassembled
3848 instruction has been printed.
3849
3850 We return a string that includes two embedded style markers, the first,
3851 places at the start of the string, indicates a switch to STYLE, and the
3852 second, placed at the end of the string, indicates a switch back to the
3853 default text style.
3854
3855 Later, when we print the operand text we take care to collapse any
3856 adjacent style markers, and to ignore any style markers that appear at
3857 the very end of a complete operand string. */
3858
3859 static const char *aarch64_apply_style (struct aarch64_styler *styler,
3860 enum disassembler_style style,
3861 const char *fmt,
3862 va_list args)
3863 {
3864 int res;
3865 char *ptr, *tmp;
3866 struct obstack *stack = (struct obstack *) styler->state;
3867 va_list ap;
3868
3869 /* These are the two strings for switching styles. */
3870 const char *style_on = get_style_text (style);
3871 const char *style_off = get_style_text (dis_style_text);
3872
3873 /* Calculate space needed once FMT and ARGS are expanded. */
3874 va_copy (ap, args);
3875 res = vsnprintf (NULL, 0, fmt, ap);
3876 va_end (ap);
3877 assert (res >= 0);
3878
3879 /* Allocate space on the obstack for the expanded FMT and ARGS, as well
3880 as the two strings for switching styles, then write all of these
3881 strings onto the obstack. */
3882 ptr = (char *) obstack_alloc (stack, res + strlen (style_on)
3883 + strlen (style_off) + 1);
3884 tmp = stpcpy (ptr, style_on);
3885 res = vsnprintf (tmp, (res + 1), fmt, args);
3886 assert (res >= 0);
3887 tmp += res;
3888 strcpy (tmp, style_off);
3889
3890 return ptr;
3891 }
3892
3893 /* Print operands. */
3894
3895 static void
3896 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3897 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3898 bool *has_notes)
3899 {
3900 char *notes = NULL;
3901 int i, pcrel_p, num_printed;
3902 struct aarch64_styler styler;
3903 struct obstack content;
3904 obstack_init (&content);
3905
3906 styler.apply_style = aarch64_apply_style;
3907 styler.state = (void *) &content;
3908
3909 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3910 {
3911 char str[128];
3912 char cmt[128];
3913
3914 /* We regard the opcode operand info more, however we also look into
3915 the inst->operands to support the disassembling of the optional
3916 operand.
3917 The two operand code should be the same in all cases, apart from
3918 when the operand can be optional. */
3919 if (opcode->operands[i] == AARCH64_OPND_NIL
3920 || opnds[i].type == AARCH64_OPND_NIL)
3921 break;
3922
3923 /* Generate the operand string in STR. */
3924 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3925 &info->target, &notes, cmt, sizeof (cmt),
3926 arch_variant, &styler);
3927
3928 /* Print the delimiter (taking account of omitted operand(s)). */
3929 if (str[0] != '\0')
3930 (*info->fprintf_styled_func) (info->stream, dis_style_text, "%s",
3931 num_printed++ == 0 ? "\t" : ", ");
3932
3933 /* Print the operand. */
3934 if (pcrel_p)
3935 (*info->print_address_func) (info->target, info);
3936 else
3937 {
3938 /* This operand came from aarch64_print_operand, and will include
3939 embedded strings indicating which style each character should
3940 have. In the following code we split the text based on
3941 CURR_STYLE, and call the styled print callback to print each
3942 block of text in the appropriate style. */
3943 char *start, *curr;
3944 enum disassembler_style curr_style = dis_style_text;
3945
3946 start = curr = str;
3947 do
3948 {
3949 if (*curr == '\0'
3950 || (*curr == STYLE_MARKER_CHAR
3951 && ISXDIGIT (*(curr + 1))
3952 && *(curr + 2) == STYLE_MARKER_CHAR))
3953 {
3954 /* Output content between our START position and CURR. */
3955 int len = curr - start;
3956 if (len > 0)
3957 {
3958 if ((*info->fprintf_styled_func) (info->stream,
3959 curr_style,
3960 "%.*s",
3961 len, start) < 0)
3962 break;
3963 }
3964
3965 if (*curr == '\0')
3966 break;
3967
3968 /* Skip over the initial STYLE_MARKER_CHAR. */
3969 ++curr;
3970
3971 /* Update the CURR_STYLE. As there are less than 16
3972 styles, it is possible, that if the input is corrupted
3973 in some way, that we might set CURR_STYLE to an
3974 invalid value. Don't worry though, we check for this
3975 situation. */
3976 if (*curr >= '0' && *curr <= '9')
3977 curr_style = (enum disassembler_style) (*curr - '0');
3978 else if (*curr >= 'a' && *curr <= 'f')
3979 curr_style = (enum disassembler_style) (*curr - 'a' + 10);
3980 else
3981 curr_style = dis_style_text;
3982
3983 /* Check for an invalid style having been selected. This
3984 should never happen, but it doesn't hurt to be a
3985 little paranoid. */
3986 if (curr_style > dis_style_comment_start)
3987 curr_style = dis_style_text;
3988
3989 /* Skip the hex character, and the closing STYLE_MARKER_CHAR. */
3990 curr += 2;
3991
3992 /* Reset the START to after the style marker. */
3993 start = curr;
3994 }
3995 else
3996 ++curr;
3997 }
3998 while (true);
3999 }
4000
4001 /* Print the comment. This works because only the last operand ever
4002 adds a comment. If that ever changes then we'll need to be
4003 smarter here. */
4004 if (cmt[0] != '\0')
4005 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4006 "\t// %s", cmt);
4007 }
4008
4009 if (notes && !no_notes)
4010 {
4011 *has_notes = true;
4012 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4013 " // note: %s", notes);
4014 }
4015
4016 obstack_free (&content, NULL);
4017 }
4018
4019 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
4020
4021 static void
4022 remove_dot_suffix (char *name, const aarch64_inst *inst)
4023 {
4024 char *ptr;
4025 size_t len;
4026
4027 ptr = strchr (inst->opcode->name, '.');
4028 assert (ptr && inst->cond);
4029 len = ptr - inst->opcode->name;
4030 assert (len < 8);
4031 strncpy (name, inst->opcode->name, len);
4032 name[len] = '\0';
4033 }
4034
4035 /* Print the instruction mnemonic name. */
4036
4037 static void
4038 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
4039 {
4040 if (inst->opcode->flags & F_COND)
4041 {
4042 /* For instructions that are truly conditionally executed, e.g. b.cond,
4043 prepare the full mnemonic name with the corresponding condition
4044 suffix. */
4045 char name[8];
4046
4047 remove_dot_suffix (name, inst);
4048 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
4049 "%s.%s", name, inst->cond->names[0]);
4050 }
4051 else
4052 (*info->fprintf_styled_func) (info->stream, dis_style_mnemonic,
4053 "%s", inst->opcode->name);
4054 }
4055
4056 /* Decide whether we need to print a comment after the operands of
4057 instruction INST. */
4058
4059 static void
4060 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
4061 {
4062 if (inst->opcode->flags & F_COND)
4063 {
4064 char name[8];
4065 unsigned int i, num_conds;
4066
4067 remove_dot_suffix (name, inst);
4068 num_conds = ARRAY_SIZE (inst->cond->names);
4069 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
4070 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4071 "%s %s.%s",
4072 i == 1 ? " //" : ",",
4073 name, inst->cond->names[i]);
4074 }
4075 }
4076
4077 /* Build notes from verifiers into a string for printing. */
4078
4079 static void
4080 print_verifier_notes (aarch64_operand_error *detail,
4081 struct disassemble_info *info)
4082 {
4083 if (no_notes)
4084 return;
4085
4086 /* The output of the verifier cannot be a fatal error, otherwise the assembly
4087 would not have succeeded. We can safely ignore these. */
4088 assert (detail->non_fatal);
4089
4090 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4091 " // note: ");
4092 switch (detail->kind)
4093 {
4094 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
4095 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4096 _("this `%s' should have an immediately"
4097 " preceding `%s'"),
4098 detail->data[0].s, detail->data[1].s);
4099 break;
4100
4101 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
4102 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4103 _("expected `%s' after previous `%s'"),
4104 detail->data[0].s, detail->data[1].s);
4105 break;
4106
4107 default:
4108 assert (detail->error);
4109 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4110 "%s", detail->error);
4111 if (detail->index >= 0)
4112 (*info->fprintf_styled_func) (info->stream, dis_style_text,
4113 " at operand %d", detail->index + 1);
4114 break;
4115 }
4116 }
4117
4118 /* Print the instruction according to *INST. */
4119
4120 static void
4121 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
4122 const aarch64_insn code,
4123 struct disassemble_info *info,
4124 aarch64_operand_error *mismatch_details)
4125 {
4126 bool has_notes = false;
4127
4128 print_mnemonic_name (inst, info);
4129 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
4130 print_comment (inst, info);
4131
4132 /* We've already printed a note, not enough space to print more so exit.
4133 Usually notes shouldn't overlap so it shouldn't happen that we have a note
4134 from a register and instruction at the same time. */
4135 if (has_notes)
4136 return;
4137
4138 /* Always run constraint verifiers, this is needed because constraints need to
4139 maintain a global state regardless of whether the instruction has the flag
4140 set or not. */
4141 enum err_type result = verify_constraints (inst, code, pc, false,
4142 mismatch_details, &insn_sequence);
4143 switch (result)
4144 {
4145 case ERR_VFI:
4146 print_verifier_notes (mismatch_details, info);
4147 break;
4148 case ERR_UND:
4149 case ERR_UNP:
4150 case ERR_NYI:
4151 default:
4152 break;
4153 }
4154 }
4155
4156 /* Entry-point of the instruction disassembler and printer. */
4157
4158 static void
4159 print_insn_aarch64_word (bfd_vma pc,
4160 uint32_t word,
4161 struct disassemble_info *info,
4162 aarch64_operand_error *errors)
4163 {
4164 static const char *err_msg[ERR_NR_ENTRIES+1] =
4165 {
4166 [ERR_OK] = "_",
4167 [ERR_UND] = "undefined",
4168 [ERR_UNP] = "unpredictable",
4169 [ERR_NYI] = "NYI"
4170 };
4171
4172 enum err_type ret;
4173 aarch64_inst inst;
4174
4175 info->insn_info_valid = 1;
4176 info->branch_delay_insns = 0;
4177 info->data_size = 0;
4178 info->target = 0;
4179 info->target2 = 0;
4180
4181 if (info->flags & INSN_HAS_RELOC)
4182 /* If the instruction has a reloc associated with it, then
4183 the offset field in the instruction will actually be the
4184 addend for the reloc. (If we are using REL type relocs).
4185 In such cases, we can ignore the pc when computing
4186 addresses, since the addend is not currently pc-relative. */
4187 pc = 0;
4188
4189 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
4190
4191 if (((word >> 21) & 0x3ff) == 1)
4192 {
4193 /* RESERVED for ALES. */
4194 assert (ret != ERR_OK);
4195 ret = ERR_NYI;
4196 }
4197
4198 switch (ret)
4199 {
4200 case ERR_UND:
4201 case ERR_UNP:
4202 case ERR_NYI:
4203 /* Handle undefined instructions. */
4204 info->insn_type = dis_noninsn;
4205 (*info->fprintf_styled_func) (info->stream,
4206 dis_style_assembler_directive,
4207 ".inst\t");
4208 (*info->fprintf_styled_func) (info->stream, dis_style_immediate,
4209 "0x%08x", word);
4210 (*info->fprintf_styled_func) (info->stream, dis_style_comment_start,
4211 " ; %s", err_msg[ret]);
4212 break;
4213 case ERR_OK:
4214 user_friendly_fixup (&inst);
4215 if (inst.opcode->iclass == condbranch
4216 || inst.opcode->iclass == testbranch
4217 || inst.opcode->iclass == compbranch)
4218 info->insn_type = dis_condbranch;
4219 else if (inst.opcode->iclass == branch_imm)
4220 info->insn_type = dis_jsr;
4221 print_aarch64_insn (pc, &inst, word, info, errors);
4222 break;
4223 default:
4224 abort ();
4225 }
4226 }
4227
4228 /* Disallow mapping symbols ($x, $d etc) from
4229 being displayed in symbol relative addresses. */
4230
4231 bool
4232 aarch64_symbol_is_valid (asymbol * sym,
4233 struct disassemble_info * info ATTRIBUTE_UNUSED)
4234 {
4235 const char * name;
4236
4237 if (sym == NULL)
4238 return false;
4239
4240 name = bfd_asymbol_name (sym);
4241
4242 return name
4243 && (name[0] != '$'
4244 || (name[1] != 'x' && name[1] != 'd')
4245 || (name[2] != '\0' && name[2] != '.'));
4246 }
4247
4248 /* Print data bytes on INFO->STREAM. */
4249
4250 static void
4251 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
4252 uint32_t word,
4253 struct disassemble_info *info,
4254 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
4255 {
4256 switch (info->bytes_per_chunk)
4257 {
4258 case 1:
4259 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4260 ".byte\t");
4261 info->fprintf_styled_func (info->stream, dis_style_immediate,
4262 "0x%02x", word);
4263 break;
4264 case 2:
4265 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4266 ".short\t");
4267 info->fprintf_styled_func (info->stream, dis_style_immediate,
4268 "0x%04x", word);
4269 break;
4270 case 4:
4271 info->fprintf_styled_func (info->stream, dis_style_assembler_directive,
4272 ".word\t");
4273 info->fprintf_styled_func (info->stream, dis_style_immediate,
4274 "0x%08x", word);
4275 break;
4276 default:
4277 abort ();
4278 }
4279 }
4280
4281 /* Try to infer the code or data type from a symbol.
4282 Returns nonzero if *MAP_TYPE was set. */
4283
4284 static int
4285 get_sym_code_type (struct disassemble_info *info, int n,
4286 enum map_type *map_type)
4287 {
4288 asymbol * as;
4289 elf_symbol_type *es;
4290 unsigned int type;
4291 const char *name;
4292
4293 /* If the symbol is in a different section, ignore it. */
4294 if (info->section != NULL && info->section != info->symtab[n]->section)
4295 return false;
4296
4297 if (n >= info->symtab_size)
4298 return false;
4299
4300 as = info->symtab[n];
4301 if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
4302 return false;
4303 es = (elf_symbol_type *) as;
4304
4305 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
4306
4307 /* If the symbol has function type then use that. */
4308 if (type == STT_FUNC)
4309 {
4310 *map_type = MAP_INSN;
4311 return true;
4312 }
4313
4314 /* Check for mapping symbols. */
4315 name = bfd_asymbol_name(info->symtab[n]);
4316 if (name[0] == '$'
4317 && (name[1] == 'x' || name[1] == 'd')
4318 && (name[2] == '\0' || name[2] == '.'))
4319 {
4320 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
4321 return true;
4322 }
4323
4324 return false;
4325 }
4326
4327 /* Set the feature bits in arch_variant in order to get the correct disassembly
4328 for the chosen architecture variant.
4329
4330 Currently we only restrict disassembly for Armv8-R and otherwise enable all
4331 non-R-profile features. */
4332 static void
4333 select_aarch64_variant (unsigned mach)
4334 {
4335 switch (mach)
4336 {
4337 case bfd_mach_aarch64_8R:
4338 AARCH64_SET_FEATURE (arch_variant, AARCH64_ARCH_V8R);
4339 break;
4340 default:
4341 arch_variant = (aarch64_feature_set) AARCH64_ALL_FEATURES;
4342 AARCH64_CLEAR_FEATURE (arch_variant, arch_variant, V8R);
4343 }
4344 }
4345
4346 /* Entry-point of the AArch64 disassembler. */
4347
4348 int
4349 print_insn_aarch64 (bfd_vma pc,
4350 struct disassemble_info *info)
4351 {
4352 bfd_byte buffer[INSNLEN];
4353 int status;
4354 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
4355 aarch64_operand_error *);
4356 bool found = false;
4357 unsigned int size = 4;
4358 unsigned long data;
4359 aarch64_operand_error errors;
4360 static bool set_features;
4361
4362 if (info->disassembler_options)
4363 {
4364 set_default_aarch64_dis_options (info);
4365
4366 parse_aarch64_dis_options (info->disassembler_options);
4367
4368 /* To avoid repeated parsing of these options, we remove them here. */
4369 info->disassembler_options = NULL;
4370 }
4371
4372 if (!set_features)
4373 {
4374 select_aarch64_variant (info->mach);
4375 set_features = true;
4376 }
4377
4378 /* Aarch64 instructions are always little-endian */
4379 info->endian_code = BFD_ENDIAN_LITTLE;
4380
4381 /* Default to DATA. A text section is required by the ABI to contain an
4382 INSN mapping symbol at the start. A data section has no such
4383 requirement, hence if no mapping symbol is found the section must
4384 contain only data. This however isn't very useful if the user has
4385 fully stripped the binaries. If this is the case use the section
4386 attributes to determine the default. If we have no section default to
4387 INSN as well, as we may be disassembling some raw bytes on a baremetal
4388 HEX file or similar. */
4389 enum map_type type = MAP_DATA;
4390 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
4391 type = MAP_INSN;
4392
4393 /* First check the full symtab for a mapping symbol, even if there
4394 are no usable non-mapping symbols for this address. */
4395 if (info->symtab_size != 0
4396 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
4397 {
4398 int last_sym = -1;
4399 bfd_vma addr, section_vma = 0;
4400 bool can_use_search_opt_p;
4401 int n;
4402
4403 if (pc <= last_mapping_addr)
4404 last_mapping_sym = -1;
4405
4406 /* Start scanning at the start of the function, or wherever
4407 we finished last time. */
4408 n = info->symtab_pos + 1;
4409
4410 /* If the last stop offset is different from the current one it means we
4411 are disassembling a different glob of bytes. As such the optimization
4412 would not be safe and we should start over. */
4413 can_use_search_opt_p = last_mapping_sym >= 0
4414 && info->stop_offset == last_stop_offset;
4415
4416 if (n >= last_mapping_sym && can_use_search_opt_p)
4417 n = last_mapping_sym;
4418
4419 /* Look down while we haven't passed the location being disassembled.
4420 The reason for this is that there's no defined order between a symbol
4421 and an mapping symbol that may be at the same address. We may have to
4422 look at least one position ahead. */
4423 for (; n < info->symtab_size; n++)
4424 {
4425 addr = bfd_asymbol_value (info->symtab[n]);
4426 if (addr > pc)
4427 break;
4428 if (get_sym_code_type (info, n, &type))
4429 {
4430 last_sym = n;
4431 found = true;
4432 }
4433 }
4434
4435 if (!found)
4436 {
4437 n = info->symtab_pos;
4438 if (n >= last_mapping_sym && can_use_search_opt_p)
4439 n = last_mapping_sym;
4440
4441 /* No mapping symbol found at this address. Look backwards
4442 for a preceeding one, but don't go pass the section start
4443 otherwise a data section with no mapping symbol can pick up
4444 a text mapping symbol of a preceeding section. The documentation
4445 says section can be NULL, in which case we will seek up all the
4446 way to the top. */
4447 if (info->section)
4448 section_vma = info->section->vma;
4449
4450 for (; n >= 0; n--)
4451 {
4452 addr = bfd_asymbol_value (info->symtab[n]);
4453 if (addr < section_vma)
4454 break;
4455
4456 if (get_sym_code_type (info, n, &type))
4457 {
4458 last_sym = n;
4459 found = true;
4460 break;
4461 }
4462 }
4463 }
4464
4465 last_mapping_sym = last_sym;
4466 last_type = type;
4467 last_stop_offset = info->stop_offset;
4468
4469 /* Look a little bit ahead to see if we should print out
4470 less than four bytes of data. If there's a symbol,
4471 mapping or otherwise, after two bytes then don't
4472 print more. */
4473 if (last_type == MAP_DATA)
4474 {
4475 size = 4 - (pc & 3);
4476 for (n = last_sym + 1; n < info->symtab_size; n++)
4477 {
4478 addr = bfd_asymbol_value (info->symtab[n]);
4479 if (addr > pc)
4480 {
4481 if (addr - pc < size)
4482 size = addr - pc;
4483 break;
4484 }
4485 }
4486 /* If the next symbol is after three bytes, we need to
4487 print only part of the data, so that we can use either
4488 .byte or .short. */
4489 if (size == 3)
4490 size = (pc & 1) ? 1 : 2;
4491 }
4492 }
4493 else
4494 last_type = type;
4495
4496 /* PR 10263: Disassemble data if requested to do so by the user. */
4497 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
4498 {
4499 /* size was set above. */
4500 info->bytes_per_chunk = size;
4501 info->display_endian = info->endian;
4502 printer = print_insn_data;
4503 }
4504 else
4505 {
4506 info->bytes_per_chunk = size = INSNLEN;
4507 info->display_endian = info->endian_code;
4508 printer = print_insn_aarch64_word;
4509 }
4510
4511 status = (*info->read_memory_func) (pc, buffer, size, info);
4512 if (status != 0)
4513 {
4514 (*info->memory_error_func) (status, pc, info);
4515 return -1;
4516 }
4517
4518 data = bfd_get_bits (buffer, size * 8,
4519 info->display_endian == BFD_ENDIAN_BIG);
4520
4521 (*printer) (pc, data, info, &errors);
4522
4523 return size;
4524 }
4525 \f
4526 void
4527 print_aarch64_disassembler_options (FILE *stream)
4528 {
4529 fprintf (stream, _("\n\
4530 The following AARCH64 specific disassembler options are supported for use\n\
4531 with the -M switch (multiple options should be separated by commas):\n"));
4532
4533 fprintf (stream, _("\n\
4534 no-aliases Don't print instruction aliases.\n"));
4535
4536 fprintf (stream, _("\n\
4537 aliases Do print instruction aliases.\n"));
4538
4539 fprintf (stream, _("\n\
4540 no-notes Don't print instruction notes.\n"));
4541
4542 fprintf (stream, _("\n\
4543 notes Do print instruction notes.\n"));
4544
4545 #ifdef DEBUG_AARCH64
4546 fprintf (stream, _("\n\
4547 debug_dump Temp switch for debug trace.\n"));
4548 #endif /* DEBUG_AARCH64 */
4549
4550 fprintf (stream, _("\n"));
4551 }