]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-asm.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2021 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
26
27 /* Utilities. */
28
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
32
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
38
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
41 {
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
46
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
51 {
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
56 }
57 va_end (va);
58 }
59
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
62
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
66 {
67 unsigned int i;
68 enum aarch64_field_kind kind;
69
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
72 {
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
76 }
77 }
78
79 /* Operand inserters. */
80
81 /* Insert nothing. */
82 bfd_boolean
83 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
84 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
85 aarch64_insn *code ATTRIBUTE_UNUSED,
86 const aarch64_inst *inst ATTRIBUTE_UNUSED,
87 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
88 {
89 return TRUE;
90 }
91
92 /* Insert register number. */
93 bfd_boolean
94 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98 {
99 insert_field (self->fields[0], code, info->reg.regno, 0);
100 return TRUE;
101 }
102
103 /* Insert register number, index and/or other data for SIMD register element
104 operand, e.g. the last source operand in
105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
106 bfd_boolean
107 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
108 aarch64_insn *code, const aarch64_inst *inst,
109 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
110 {
111 /* regno */
112 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
113 /* index and/or type */
114 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
115 {
116 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
117 if (info->type == AARCH64_OPND_En
118 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
119 {
120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
121 assert (info->idx == 1); /* Vn */
122 aarch64_insn value = info->reglane.index << pos;
123 insert_field (FLD_imm4, code, value, 0);
124 }
125 else
126 {
127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
128 imm5<3:0> <V>
129 0000 RESERVED
130 xxx1 B
131 xx10 H
132 x100 S
133 1000 D */
134 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
135 insert_field (FLD_imm5, code, value, 0);
136 }
137 }
138 else if (inst->opcode->iclass == dotproduct)
139 {
140 unsigned reglane_index = info->reglane.index;
141 switch (info->qualifier)
142 {
143 case AARCH64_OPND_QLF_S_4B:
144 case AARCH64_OPND_QLF_S_2H:
145 /* L:H */
146 assert (reglane_index < 4);
147 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
148 break;
149 default:
150 assert (0);
151 }
152 }
153 else if (inst->opcode->iclass == cryptosm3)
154 {
155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
156 unsigned reglane_index = info->reglane.index;
157 assert (reglane_index < 4);
158 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
159 }
160 else
161 {
162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
164 unsigned reglane_index = info->reglane.index;
165
166 if (inst->opcode->op == OP_FCMLA_ELEM)
167 /* Complex operand takes two elements. */
168 reglane_index *= 2;
169
170 switch (info->qualifier)
171 {
172 case AARCH64_OPND_QLF_S_H:
173 /* H:L:M */
174 assert (reglane_index < 8);
175 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
176 break;
177 case AARCH64_OPND_QLF_S_S:
178 /* H:L */
179 assert (reglane_index < 4);
180 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
181 break;
182 case AARCH64_OPND_QLF_S_D:
183 /* H */
184 assert (reglane_index < 2);
185 insert_field (FLD_H, code, reglane_index, 0);
186 break;
187 default:
188 assert (0);
189 }
190 }
191 return TRUE;
192 }
193
194 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
195 bfd_boolean
196 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
197 aarch64_insn *code,
198 const aarch64_inst *inst ATTRIBUTE_UNUSED,
199 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
200 {
201 /* R */
202 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
203 /* len */
204 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
205 return TRUE;
206 }
207
208 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
209 in AdvSIMD load/store instructions. */
210 bfd_boolean
211 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
212 const aarch64_opnd_info *info, aarch64_insn *code,
213 const aarch64_inst *inst,
214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
215 {
216 aarch64_insn value = 0;
217 /* Number of elements in each structure to be loaded/stored. */
218 unsigned num = get_opcode_dependent_value (inst->opcode);
219
220 /* Rt */
221 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
222 /* opcode */
223 switch (num)
224 {
225 case 1:
226 switch (info->reglist.num_regs)
227 {
228 case 1: value = 0x7; break;
229 case 2: value = 0xa; break;
230 case 3: value = 0x6; break;
231 case 4: value = 0x2; break;
232 default: assert (0);
233 }
234 break;
235 case 2:
236 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
237 break;
238 case 3:
239 value = 0x4;
240 break;
241 case 4:
242 value = 0x0;
243 break;
244 default:
245 assert (0);
246 }
247 insert_field (FLD_opcode, code, value, 0);
248
249 return TRUE;
250 }
251
252 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
253 single structure to all lanes instructions. */
254 bfd_boolean
255 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
256 const aarch64_opnd_info *info, aarch64_insn *code,
257 const aarch64_inst *inst,
258 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
259 {
260 aarch64_insn value;
261 /* The opcode dependent area stores the number of elements in
262 each structure to be loaded/stored. */
263 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
264
265 /* Rt */
266 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
267 /* S */
268 value = (aarch64_insn) 0;
269 if (is_ld1r && info->reglist.num_regs == 2)
270 /* OP_LD1R does not have alternating variant, but have "two consecutive"
271 instead. */
272 value = (aarch64_insn) 1;
273 insert_field (FLD_S, code, value, 0);
274
275 return TRUE;
276 }
277
278 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
279 operand e.g. Vt in AdvSIMD load/store single element instructions. */
280 bfd_boolean
281 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
282 const aarch64_opnd_info *info, aarch64_insn *code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
285 {
286 aarch64_field field = {0, 0};
287 aarch64_insn QSsize = 0; /* fields Q:S:size. */
288 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
289
290 assert (info->reglist.has_index);
291
292 /* Rt */
293 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
294 /* Encode the index, opcode<2:1> and size. */
295 switch (info->qualifier)
296 {
297 case AARCH64_OPND_QLF_S_B:
298 /* Index encoded in "Q:S:size". */
299 QSsize = info->reglist.index;
300 opcodeh2 = 0x0;
301 break;
302 case AARCH64_OPND_QLF_S_H:
303 /* Index encoded in "Q:S:size<1>". */
304 QSsize = info->reglist.index << 1;
305 opcodeh2 = 0x1;
306 break;
307 case AARCH64_OPND_QLF_S_S:
308 /* Index encoded in "Q:S". */
309 QSsize = info->reglist.index << 2;
310 opcodeh2 = 0x2;
311 break;
312 case AARCH64_OPND_QLF_S_D:
313 /* Index encoded in "Q". */
314 QSsize = info->reglist.index << 3 | 0x1;
315 opcodeh2 = 0x2;
316 break;
317 default:
318 assert (0);
319 }
320 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
321 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
322 insert_field_2 (&field, code, opcodeh2, 0);
323
324 return TRUE;
325 }
326
327 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
329 or SSHR <V><d>, <V><n>, #<shift>. */
330 bfd_boolean
331 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
332 const aarch64_opnd_info *info,
333 aarch64_insn *code, const aarch64_inst *inst,
334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
335 {
336 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
337 aarch64_insn Q, imm;
338
339 if (inst->opcode->iclass == asimdshf)
340 {
341 /* Q
342 immh Q <T>
343 0000 x SEE AdvSIMD modified immediate
344 0001 0 8B
345 0001 1 16B
346 001x 0 4H
347 001x 1 8H
348 01xx 0 2S
349 01xx 1 4S
350 1xxx 0 RESERVED
351 1xxx 1 2D */
352 Q = (val & 0x1) ? 1 : 0;
353 insert_field (FLD_Q, code, Q, inst->opcode->mask);
354 val >>= 1;
355 }
356
357 assert (info->type == AARCH64_OPND_IMM_VLSR
358 || info->type == AARCH64_OPND_IMM_VLSL);
359
360 if (info->type == AARCH64_OPND_IMM_VLSR)
361 /* immh:immb
362 immh <shift>
363 0000 SEE AdvSIMD modified immediate
364 0001 (16-UInt(immh:immb))
365 001x (32-UInt(immh:immb))
366 01xx (64-UInt(immh:immb))
367 1xxx (128-UInt(immh:immb)) */
368 imm = (16 << (unsigned)val) - info->imm.value;
369 else
370 /* immh:immb
371 immh <shift>
372 0000 SEE AdvSIMD modified immediate
373 0001 (UInt(immh:immb)-8)
374 001x (UInt(immh:immb)-16)
375 01xx (UInt(immh:immb)-32)
376 1xxx (UInt(immh:immb)-64) */
377 imm = info->imm.value + (8 << (unsigned)val);
378 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
379
380 return TRUE;
381 }
382
383 /* Insert fields for e.g. the immediate operands in
384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
385 bfd_boolean
386 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
387 aarch64_insn *code,
388 const aarch64_inst *inst ATTRIBUTE_UNUSED,
389 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
390 {
391 int64_t imm;
392
393 imm = info->imm.value;
394 if (operand_need_shift_by_two (self))
395 imm >>= 2;
396 if (operand_need_shift_by_four (self))
397 imm >>= 4;
398 insert_all_fields (self, code, imm);
399 return TRUE;
400 }
401
402 /* Insert immediate and its shift amount for e.g. the last operand in
403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
404 bfd_boolean
405 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
406 aarch64_insn *code, const aarch64_inst *inst,
407 aarch64_operand_error *errors)
408 {
409 /* imm16 */
410 aarch64_ins_imm (self, info, code, inst, errors);
411 /* hw */
412 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
413 return TRUE;
414 }
415
416 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
418 bfd_boolean
419 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
420 const aarch64_opnd_info *info,
421 aarch64_insn *code,
422 const aarch64_inst *inst ATTRIBUTE_UNUSED,
423 aarch64_operand_error *errors
424 ATTRIBUTE_UNUSED)
425 {
426 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
427 uint64_t imm = info->imm.value;
428 enum aarch64_modifier_kind kind = info->shifter.kind;
429 int amount = info->shifter.amount;
430 aarch64_field field = {0, 0};
431
432 /* a:b:c:d:e:f:g:h */
433 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
434 {
435 /* Either MOVI <Dd>, #<imm>
436 or MOVI <Vd>.2D, #<imm>.
437 <imm> is a 64-bit immediate
438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
439 encoded in "a:b:c:d:e:f:g:h". */
440 imm = aarch64_shrink_expanded_imm8 (imm);
441 assert ((int)imm >= 0);
442 }
443 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
444
445 if (kind == AARCH64_MOD_NONE)
446 return TRUE;
447
448 /* shift amount partially in cmode */
449 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
450 if (kind == AARCH64_MOD_LSL)
451 {
452 /* AARCH64_MOD_LSL: shift zeros. */
453 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
454 assert (esize == 4 || esize == 2 || esize == 1);
455 /* For 8-bit move immediate, the optional LSL #0 does not require
456 encoding. */
457 if (esize == 1)
458 return TRUE;
459 amount >>= 3;
460 if (esize == 4)
461 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
462 else
463 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
464 }
465 else
466 {
467 /* AARCH64_MOD_MSL: shift ones. */
468 amount >>= 4;
469 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
470 }
471 insert_field_2 (&field, code, amount, 0);
472
473 return TRUE;
474 }
475
476 /* Insert fields for an 8-bit floating-point immediate. */
477 bfd_boolean
478 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
479 aarch64_insn *code,
480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
482 {
483 insert_all_fields (self, code, info->imm.value);
484 return TRUE;
485 }
486
487 /* Insert 1-bit rotation immediate (#90 or #270). */
488 bfd_boolean
489 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
490 const aarch64_opnd_info *info,
491 aarch64_insn *code, const aarch64_inst *inst,
492 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
493 {
494 uint64_t rot = (info->imm.value - 90) / 180;
495 assert (rot < 2U);
496 insert_field (self->fields[0], code, rot, inst->opcode->mask);
497 return TRUE;
498 }
499
500 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
501 bfd_boolean
502 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
506 {
507 uint64_t rot = info->imm.value / 90;
508 assert (rot < 4U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return TRUE;
511 }
512
513 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
515 bfd_boolean
516 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
517 aarch64_insn *code,
518 const aarch64_inst *inst ATTRIBUTE_UNUSED,
519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
520 {
521 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
522 return TRUE;
523 }
524
525 /* Insert arithmetic immediate for e.g. the last operand in
526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
527 bfd_boolean
528 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
529 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
530 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
531 {
532 /* shift */
533 aarch64_insn value = info->shifter.amount ? 1 : 0;
534 insert_field (self->fields[0], code, value, 0);
535 /* imm12 (unsigned) */
536 insert_field (self->fields[1], code, info->imm.value, 0);
537 return TRUE;
538 }
539
540 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
541 the operand should be inverted before encoding. */
542 static bfd_boolean
543 aarch64_ins_limm_1 (const aarch64_operand *self,
544 const aarch64_opnd_info *info, aarch64_insn *code,
545 const aarch64_inst *inst, bfd_boolean invert_p,
546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
547 {
548 aarch64_insn value;
549 uint64_t imm = info->imm.value;
550 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
551
552 if (invert_p)
553 imm = ~imm;
554 /* The constraint check should have guaranteed this wouldn't happen. */
555 assert (aarch64_logical_immediate_p (imm, esize, &value));
556
557 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
558 self->fields[0]);
559 return TRUE;
560 }
561
562 /* Insert logical/bitmask immediate for e.g. the last operand in
563 ORR <Wd|WSP>, <Wn>, #<imm>. */
564 bfd_boolean
565 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
566 aarch64_insn *code, const aarch64_inst *inst,
567 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
568 {
569 return aarch64_ins_limm_1 (self, info, code, inst,
570 inst->opcode->op == OP_BIC, errors);
571 }
572
573 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
574 bfd_boolean
575 aarch64_ins_inv_limm (const aarch64_operand *self,
576 const aarch64_opnd_info *info, aarch64_insn *code,
577 const aarch64_inst *inst,
578 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
579 {
580 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
581 }
582
583 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
584 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
585 bfd_boolean
586 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
587 aarch64_insn *code, const aarch64_inst *inst,
588 aarch64_operand_error *errors)
589 {
590 aarch64_insn value = 0;
591
592 assert (info->idx == 0);
593
594 /* Rt */
595 aarch64_ins_regno (self, info, code, inst, errors);
596 if (inst->opcode->iclass == ldstpair_indexed
597 || inst->opcode->iclass == ldstnapair_offs
598 || inst->opcode->iclass == ldstpair_off
599 || inst->opcode->iclass == loadlit)
600 {
601 /* size */
602 switch (info->qualifier)
603 {
604 case AARCH64_OPND_QLF_S_S: value = 0; break;
605 case AARCH64_OPND_QLF_S_D: value = 1; break;
606 case AARCH64_OPND_QLF_S_Q: value = 2; break;
607 default: assert (0);
608 }
609 insert_field (FLD_ldst_size, code, value, 0);
610 }
611 else
612 {
613 /* opc[1]:size */
614 value = aarch64_get_qualifier_standard_value (info->qualifier);
615 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
616 }
617
618 return TRUE;
619 }
620
621 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
622 bfd_boolean
623 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
624 const aarch64_opnd_info *info, aarch64_insn *code,
625 const aarch64_inst *inst ATTRIBUTE_UNUSED,
626 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
627 {
628 /* Rn */
629 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
630 return TRUE;
631 }
632
633 /* Encode the address operand for e.g.
634 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
635 bfd_boolean
636 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
637 const aarch64_opnd_info *info, aarch64_insn *code,
638 const aarch64_inst *inst ATTRIBUTE_UNUSED,
639 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
640 {
641 aarch64_insn S;
642 enum aarch64_modifier_kind kind = info->shifter.kind;
643
644 /* Rn */
645 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
646 /* Rm */
647 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
648 /* option */
649 if (kind == AARCH64_MOD_LSL)
650 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
651 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
652 /* S */
653 if (info->qualifier != AARCH64_OPND_QLF_S_B)
654 S = info->shifter.amount != 0;
655 else
656 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
657 S <amount>
658 0 [absent]
659 1 #0
660 Must be #0 if <extend> is explicitly LSL. */
661 S = info->shifter.operator_present && info->shifter.amount_present;
662 insert_field (FLD_S, code, S, 0);
663
664 return TRUE;
665 }
666
667 /* Encode the address operand for e.g.
668 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
669 bfd_boolean
670 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
671 const aarch64_opnd_info *info, aarch64_insn *code,
672 const aarch64_inst *inst ATTRIBUTE_UNUSED,
673 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
674 {
675 /* Rn */
676 insert_field (self->fields[0], code, info->addr.base_regno, 0);
677
678 /* simm9 */
679 int imm = info->addr.offset.imm;
680 insert_field (self->fields[1], code, imm, 0);
681
682 /* writeback */
683 if (info->addr.writeback)
684 {
685 assert (info->addr.preind == 1 && info->addr.postind == 0);
686 insert_field (self->fields[2], code, 1, 0);
687 }
688 return TRUE;
689 }
690
691 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
692 bfd_boolean
693 aarch64_ins_addr_simm (const aarch64_operand *self,
694 const aarch64_opnd_info *info,
695 aarch64_insn *code,
696 const aarch64_inst *inst ATTRIBUTE_UNUSED,
697 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
698 {
699 int imm;
700
701 /* Rn */
702 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
703 /* simm (imm9 or imm7) */
704 imm = info->addr.offset.imm;
705 if (self->fields[0] == FLD_imm7
706 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
707 /* scaled immediate in ld/st pair instructions.. */
708 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
709 insert_field (self->fields[0], code, imm, 0);
710 /* pre/post- index */
711 if (info->addr.writeback)
712 {
713 assert (inst->opcode->iclass != ldst_unscaled
714 && inst->opcode->iclass != ldstnapair_offs
715 && inst->opcode->iclass != ldstpair_off
716 && inst->opcode->iclass != ldst_unpriv);
717 assert (info->addr.preind != info->addr.postind);
718 if (info->addr.preind)
719 insert_field (self->fields[1], code, 1, 0);
720 }
721
722 return TRUE;
723 }
724
725 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
726 bfd_boolean
727 aarch64_ins_addr_simm10 (const aarch64_operand *self,
728 const aarch64_opnd_info *info,
729 aarch64_insn *code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED,
731 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
732 {
733 int imm;
734
735 /* Rn */
736 insert_field (self->fields[0], code, info->addr.base_regno, 0);
737 /* simm10 */
738 imm = info->addr.offset.imm >> 3;
739 insert_field (self->fields[1], code, imm >> 9, 0);
740 insert_field (self->fields[2], code, imm, 0);
741 /* writeback */
742 if (info->addr.writeback)
743 {
744 assert (info->addr.preind == 1 && info->addr.postind == 0);
745 insert_field (self->fields[3], code, 1, 0);
746 }
747 return TRUE;
748 }
749
750 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
751 bfd_boolean
752 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
753 const aarch64_opnd_info *info,
754 aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED,
756 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
757 {
758 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
759
760 /* Rn */
761 insert_field (self->fields[0], code, info->addr.base_regno, 0);
762 /* uimm12 */
763 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
764 return TRUE;
765 }
766
767 /* Encode the address operand for e.g.
768 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
769 bfd_boolean
770 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
771 const aarch64_opnd_info *info, aarch64_insn *code,
772 const aarch64_inst *inst ATTRIBUTE_UNUSED,
773 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
774 {
775 /* Rn */
776 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
777 /* Rm | #<amount> */
778 if (info->addr.offset.is_reg)
779 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
780 else
781 insert_field (FLD_Rm, code, 0x1f, 0);
782 return TRUE;
783 }
784
785 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
786 bfd_boolean
787 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
788 const aarch64_opnd_info *info, aarch64_insn *code,
789 const aarch64_inst *inst ATTRIBUTE_UNUSED,
790 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
791 {
792 /* cond */
793 insert_field (FLD_cond, code, info->cond->value, 0);
794 return TRUE;
795 }
796
797 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
798 bfd_boolean
799 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
800 const aarch64_opnd_info *info, aarch64_insn *code,
801 const aarch64_inst *inst,
802 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
803 {
804 /* If a system instruction check if we have any restrictions on which
805 registers it can use. */
806 if (inst->opcode->iclass == ic_system)
807 {
808 uint64_t opcode_flags
809 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
810 uint32_t sysreg_flags
811 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
812
813 /* Check to see if it's read-only, else check if it's write only.
814 if it's both or unspecified don't care. */
815 if (opcode_flags == F_SYS_READ
816 && sysreg_flags
817 && sysreg_flags != F_REG_READ)
818 {
819 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
820 detail->error = _("specified register cannot be read from");
821 detail->index = info->idx;
822 detail->non_fatal = TRUE;
823 }
824 else if (opcode_flags == F_SYS_WRITE
825 && sysreg_flags
826 && sysreg_flags != F_REG_WRITE)
827 {
828 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
829 detail->error = _("specified register cannot be written to");
830 detail->index = info->idx;
831 detail->non_fatal = TRUE;
832 }
833 }
834 /* op0:op1:CRn:CRm:op2 */
835 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
836 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
837 return TRUE;
838 }
839
840 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
841 bfd_boolean
842 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
843 const aarch64_opnd_info *info, aarch64_insn *code,
844 const aarch64_inst *inst ATTRIBUTE_UNUSED,
845 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
846 {
847 /* op1:op2 */
848 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
849 FLD_op2, FLD_op1);
850 return TRUE;
851 }
852
853 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
854 bfd_boolean
855 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
856 const aarch64_opnd_info *info, aarch64_insn *code,
857 const aarch64_inst *inst ATTRIBUTE_UNUSED,
858 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
859 {
860 /* op1:CRn:CRm:op2 */
861 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
862 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
863 return TRUE;
864 }
865
866 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
867
868 bfd_boolean
869 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
870 const aarch64_opnd_info *info, aarch64_insn *code,
871 const aarch64_inst *inst ATTRIBUTE_UNUSED,
872 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
873 {
874 /* CRm */
875 insert_field (FLD_CRm, code, info->barrier->value, 0);
876 return TRUE;
877 }
878
879 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
880
881 bfd_boolean
882 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
883 const aarch64_opnd_info *info, aarch64_insn *code,
884 const aarch64_inst *inst ATTRIBUTE_UNUSED,
885 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
886 {
887 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
888 encoded in CRm<3:2>. */
889 aarch64_insn value = (info->barrier->value >> 2) - 4;
890 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
891 return TRUE;
892 }
893
894 /* Encode the prefetch operation option operand for e.g.
895 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
896
897 bfd_boolean
898 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
899 const aarch64_opnd_info *info, aarch64_insn *code,
900 const aarch64_inst *inst ATTRIBUTE_UNUSED,
901 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
902 {
903 /* prfop in Rt */
904 insert_field (FLD_Rt, code, info->prfop->value, 0);
905 return TRUE;
906 }
907
908 /* Encode the hint number for instructions that alias HINT but take an
909 operand. */
910
911 bfd_boolean
912 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
913 const aarch64_opnd_info *info, aarch64_insn *code,
914 const aarch64_inst *inst ATTRIBUTE_UNUSED,
915 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
916 {
917 /* CRm:op2. */
918 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
919 return TRUE;
920 }
921
922 /* Encode the extended register operand for e.g.
923 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
924 bfd_boolean
925 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
926 const aarch64_opnd_info *info, aarch64_insn *code,
927 const aarch64_inst *inst ATTRIBUTE_UNUSED,
928 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
929 {
930 enum aarch64_modifier_kind kind;
931
932 /* Rm */
933 insert_field (FLD_Rm, code, info->reg.regno, 0);
934 /* option */
935 kind = info->shifter.kind;
936 if (kind == AARCH64_MOD_LSL)
937 kind = info->qualifier == AARCH64_OPND_QLF_W
938 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
939 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
940 /* imm3 */
941 insert_field (FLD_imm3, code, info->shifter.amount, 0);
942
943 return TRUE;
944 }
945
946 /* Encode the shifted register operand for e.g.
947 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
948 bfd_boolean
949 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
950 const aarch64_opnd_info *info, aarch64_insn *code,
951 const aarch64_inst *inst ATTRIBUTE_UNUSED,
952 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
953 {
954 /* Rm */
955 insert_field (FLD_Rm, code, info->reg.regno, 0);
956 /* shift */
957 insert_field (FLD_shift, code,
958 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
959 /* imm6 */
960 insert_field (FLD_imm6, code, info->shifter.amount, 0);
961
962 return TRUE;
963 }
964
965 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
966 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
967 SELF's operand-dependent value. fields[0] specifies the field that
968 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
969 bfd_boolean
970 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
971 const aarch64_opnd_info *info,
972 aarch64_insn *code,
973 const aarch64_inst *inst ATTRIBUTE_UNUSED,
974 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
975 {
976 int factor = 1 + get_operand_specific_data (self);
977 insert_field (self->fields[0], code, info->addr.base_regno, 0);
978 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
979 return TRUE;
980 }
981
982 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
983 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
984 SELF's operand-dependent value. fields[0] specifies the field that
985 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
986 bfd_boolean
987 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
988 const aarch64_opnd_info *info,
989 aarch64_insn *code,
990 const aarch64_inst *inst ATTRIBUTE_UNUSED,
991 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
992 {
993 int factor = 1 + get_operand_specific_data (self);
994 insert_field (self->fields[0], code, info->addr.base_regno, 0);
995 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
996 return TRUE;
997 }
998
999 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1000 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1001 SELF's operand-dependent value. fields[0] specifies the field that
1002 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1003 and imm3 fields, with imm3 being the less-significant part. */
1004 bfd_boolean
1005 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1006 const aarch64_opnd_info *info,
1007 aarch64_insn *code,
1008 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1009 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1010 {
1011 int factor = 1 + get_operand_specific_data (self);
1012 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1013 insert_fields (code, info->addr.offset.imm / factor, 0,
1014 2, FLD_imm3, FLD_SVE_imm6);
1015 return TRUE;
1016 }
1017
1018 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1019 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1020 value. fields[0] specifies the base register field. */
1021 bfd_boolean
1022 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1023 const aarch64_opnd_info *info, aarch64_insn *code,
1024 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1025 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1026 {
1027 int factor = 1 << get_operand_specific_data (self);
1028 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1029 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1030 return TRUE;
1031 }
1032
1033 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1034 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1035 value. fields[0] specifies the base register field. */
1036 bfd_boolean
1037 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1038 const aarch64_opnd_info *info, aarch64_insn *code,
1039 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1040 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1041 {
1042 int factor = 1 << get_operand_specific_data (self);
1043 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1044 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1045 return TRUE;
1046 }
1047
1048 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1049 is SELF's operand-dependent value. fields[0] specifies the base
1050 register field and fields[1] specifies the offset register field. */
1051 bfd_boolean
1052 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1053 const aarch64_opnd_info *info, aarch64_insn *code,
1054 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1055 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1056 {
1057 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1058 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1059 return TRUE;
1060 }
1061
1062 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1063 <shift> is SELF's operand-dependent value. fields[0] specifies the
1064 base register field, fields[1] specifies the offset register field and
1065 fields[2] is a single-bit field that selects SXTW over UXTW. */
1066 bfd_boolean
1067 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1068 const aarch64_opnd_info *info, aarch64_insn *code,
1069 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1070 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1071 {
1072 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1073 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1074 if (info->shifter.kind == AARCH64_MOD_UXTW)
1075 insert_field (self->fields[2], code, 0, 0);
1076 else
1077 insert_field (self->fields[2], code, 1, 0);
1078 return TRUE;
1079 }
1080
1081 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1082 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1083 fields[0] specifies the base register field. */
1084 bfd_boolean
1085 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1086 const aarch64_opnd_info *info, aarch64_insn *code,
1087 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1088 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1089 {
1090 int factor = 1 << get_operand_specific_data (self);
1091 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1092 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1093 return TRUE;
1094 }
1095
1096 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1097 where <modifier> is fixed by the instruction and where <msz> is a
1098 2-bit unsigned number. fields[0] specifies the base register field
1099 and fields[1] specifies the offset register field. */
1100 static bfd_boolean
1101 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1102 const aarch64_opnd_info *info, aarch64_insn *code,
1103 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1104 {
1105 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1106 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1107 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1108 return TRUE;
1109 }
1110
1111 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1112 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1113 field and fields[1] specifies the offset register field. */
1114 bfd_boolean
1115 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1116 const aarch64_opnd_info *info, aarch64_insn *code,
1117 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1118 aarch64_operand_error *errors)
1119 {
1120 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1121 }
1122
1123 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1124 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1125 field and fields[1] specifies the offset register field. */
1126 bfd_boolean
1127 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1128 const aarch64_opnd_info *info,
1129 aarch64_insn *code,
1130 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1131 aarch64_operand_error *errors)
1132 {
1133 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1134 }
1135
1136 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1137 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1138 field and fields[1] specifies the offset register field. */
1139 bfd_boolean
1140 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1141 const aarch64_opnd_info *info,
1142 aarch64_insn *code,
1143 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1144 aarch64_operand_error *errors)
1145 {
1146 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1147 }
1148
1149 /* Encode an SVE ADD/SUB immediate. */
1150 bfd_boolean
1151 aarch64_ins_sve_aimm (const aarch64_operand *self,
1152 const aarch64_opnd_info *info, aarch64_insn *code,
1153 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1154 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1155 {
1156 if (info->shifter.amount == 8)
1157 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1158 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1159 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1160 else
1161 insert_all_fields (self, code, info->imm.value & 0xff);
1162 return TRUE;
1163 }
1164
1165 /* Encode an SVE CPY/DUP immediate. */
1166 bfd_boolean
1167 aarch64_ins_sve_asimm (const aarch64_operand *self,
1168 const aarch64_opnd_info *info, aarch64_insn *code,
1169 const aarch64_inst *inst,
1170 aarch64_operand_error *errors)
1171 {
1172 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1173 }
1174
1175 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1176 array specifies which field to use for Zn. MM is encoded in the
1177 concatenation of imm5 and SVE_tszh, with imm5 being the less
1178 significant part. */
1179 bfd_boolean
1180 aarch64_ins_sve_index (const aarch64_operand *self,
1181 const aarch64_opnd_info *info, aarch64_insn *code,
1182 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1183 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1184 {
1185 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1186 insert_field (self->fields[0], code, info->reglane.regno, 0);
1187 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1188 2, FLD_imm5, FLD_SVE_tszh);
1189 return TRUE;
1190 }
1191
1192 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1193 bfd_boolean
1194 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1195 const aarch64_opnd_info *info, aarch64_insn *code,
1196 const aarch64_inst *inst,
1197 aarch64_operand_error *errors)
1198 {
1199 return aarch64_ins_limm (self, info, code, inst, errors);
1200 }
1201
1202 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1203 and where MM occupies the most-significant part. The operand-dependent
1204 value specifies the number of bits in Zn. */
1205 bfd_boolean
1206 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1207 const aarch64_opnd_info *info, aarch64_insn *code,
1208 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1209 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1210 {
1211 unsigned int reg_bits = get_operand_specific_data (self);
1212 assert (info->reglane.regno < (1U << reg_bits));
1213 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1214 insert_all_fields (self, code, val);
1215 return TRUE;
1216 }
1217
1218 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1219 to use for Zn. */
1220 bfd_boolean
1221 aarch64_ins_sve_reglist (const aarch64_operand *self,
1222 const aarch64_opnd_info *info, aarch64_insn *code,
1223 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1224 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1225 {
1226 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1227 return TRUE;
1228 }
1229
1230 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1231 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1232 field. */
1233 bfd_boolean
1234 aarch64_ins_sve_scale (const aarch64_operand *self,
1235 const aarch64_opnd_info *info, aarch64_insn *code,
1236 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1237 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1238 {
1239 insert_all_fields (self, code, info->imm.value);
1240 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1241 return TRUE;
1242 }
1243
1244 /* Encode an SVE shift left immediate. */
1245 bfd_boolean
1246 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1247 const aarch64_opnd_info *info, aarch64_insn *code,
1248 const aarch64_inst *inst,
1249 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1250 {
1251 const aarch64_opnd_info *prev_operand;
1252 unsigned int esize;
1253
1254 assert (info->idx > 0);
1255 prev_operand = &inst->operands[info->idx - 1];
1256 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1257 insert_all_fields (self, code, 8 * esize + info->imm.value);
1258 return TRUE;
1259 }
1260
1261 /* Encode an SVE shift right immediate. */
1262 bfd_boolean
1263 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1264 const aarch64_opnd_info *info, aarch64_insn *code,
1265 const aarch64_inst *inst,
1266 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1267 {
1268 const aarch64_opnd_info *prev_operand;
1269 unsigned int esize;
1270
1271 unsigned int opnd_backshift = get_operand_specific_data (self);
1272 assert (info->idx >= (int)opnd_backshift);
1273 prev_operand = &inst->operands[info->idx - opnd_backshift];
1274 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1275 insert_all_fields (self, code, 16 * esize - info->imm.value);
1276 return TRUE;
1277 }
1278
1279 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1280 The fields array specifies which field to use. */
1281 bfd_boolean
1282 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1283 const aarch64_opnd_info *info,
1284 aarch64_insn *code,
1285 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1286 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1287 {
1288 if (info->imm.value == 0x3f000000)
1289 insert_field (self->fields[0], code, 0, 0);
1290 else
1291 insert_field (self->fields[0], code, 1, 0);
1292 return TRUE;
1293 }
1294
1295 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1296 The fields array specifies which field to use. */
1297 bfd_boolean
1298 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1299 const aarch64_opnd_info *info,
1300 aarch64_insn *code,
1301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1303 {
1304 if (info->imm.value == 0x3f000000)
1305 insert_field (self->fields[0], code, 0, 0);
1306 else
1307 insert_field (self->fields[0], code, 1, 0);
1308 return TRUE;
1309 }
1310
1311 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1312 The fields array specifies which field to use. */
1313 bfd_boolean
1314 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1315 const aarch64_opnd_info *info,
1316 aarch64_insn *code,
1317 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1318 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1319 {
1320 if (info->imm.value == 0)
1321 insert_field (self->fields[0], code, 0, 0);
1322 else
1323 insert_field (self->fields[0], code, 1, 0);
1324 return TRUE;
1325 }
1326
1327 /* Miscellaneous encoding functions. */
1328
1329 /* Encode size[0], i.e. bit 22, for
1330 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1331
1332 static void
1333 encode_asimd_fcvt (aarch64_inst *inst)
1334 {
1335 aarch64_insn value;
1336 aarch64_field field = {0, 0};
1337 enum aarch64_opnd_qualifier qualifier;
1338
1339 switch (inst->opcode->op)
1340 {
1341 case OP_FCVTN:
1342 case OP_FCVTN2:
1343 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1344 qualifier = inst->operands[1].qualifier;
1345 break;
1346 case OP_FCVTL:
1347 case OP_FCVTL2:
1348 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1349 qualifier = inst->operands[0].qualifier;
1350 break;
1351 default:
1352 assert (0);
1353 }
1354 assert (qualifier == AARCH64_OPND_QLF_V_4S
1355 || qualifier == AARCH64_OPND_QLF_V_2D);
1356 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1357 gen_sub_field (FLD_size, 0, 1, &field);
1358 insert_field_2 (&field, &inst->value, value, 0);
1359 }
1360
1361 /* Encode size[0], i.e. bit 22, for
1362 e.g. FCVTXN <Vb><d>, <Va><n>. */
1363
1364 static void
1365 encode_asisd_fcvtxn (aarch64_inst *inst)
1366 {
1367 aarch64_insn val = 1;
1368 aarch64_field field = {0, 0};
1369 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1370 gen_sub_field (FLD_size, 0, 1, &field);
1371 insert_field_2 (&field, &inst->value, val, 0);
1372 }
1373
1374 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1375 static void
1376 encode_fcvt (aarch64_inst *inst)
1377 {
1378 aarch64_insn val;
1379 const aarch64_field field = {15, 2};
1380
1381 /* opc dstsize */
1382 switch (inst->operands[0].qualifier)
1383 {
1384 case AARCH64_OPND_QLF_S_S: val = 0; break;
1385 case AARCH64_OPND_QLF_S_D: val = 1; break;
1386 case AARCH64_OPND_QLF_S_H: val = 3; break;
1387 default: abort ();
1388 }
1389 insert_field_2 (&field, &inst->value, val, 0);
1390
1391 return;
1392 }
1393
1394 /* Return the index in qualifiers_list that INST is using. Should only
1395 be called once the qualifiers are known to be valid. */
1396
1397 static int
1398 aarch64_get_variant (struct aarch64_inst *inst)
1399 {
1400 int i, nops, variant;
1401
1402 nops = aarch64_num_of_operands (inst->opcode);
1403 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1404 {
1405 for (i = 0; i < nops; ++i)
1406 if (inst->opcode->qualifiers_list[variant][i]
1407 != inst->operands[i].qualifier)
1408 break;
1409 if (i == nops)
1410 return variant;
1411 }
1412 abort ();
1413 }
1414
1415 /* Do miscellaneous encodings that are not common enough to be driven by
1416 flags. */
1417
1418 static void
1419 do_misc_encoding (aarch64_inst *inst)
1420 {
1421 unsigned int value;
1422
1423 switch (inst->opcode->op)
1424 {
1425 case OP_FCVT:
1426 encode_fcvt (inst);
1427 break;
1428 case OP_FCVTN:
1429 case OP_FCVTN2:
1430 case OP_FCVTL:
1431 case OP_FCVTL2:
1432 encode_asimd_fcvt (inst);
1433 break;
1434 case OP_FCVTXN_S:
1435 encode_asisd_fcvtxn (inst);
1436 break;
1437 case OP_MOV_P_P:
1438 case OP_MOVS_P_P:
1439 /* Copy Pn to Pm and Pg. */
1440 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1441 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1442 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1443 break;
1444 case OP_MOV_Z_P_Z:
1445 /* Copy Zd to Zm. */
1446 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1447 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1448 break;
1449 case OP_MOV_Z_V:
1450 /* Fill in the zero immediate. */
1451 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1452 2, FLD_imm5, FLD_SVE_tszh);
1453 break;
1454 case OP_MOV_Z_Z:
1455 /* Copy Zn to Zm. */
1456 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1457 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1458 break;
1459 case OP_MOV_Z_Zi:
1460 break;
1461 case OP_MOVM_P_P_P:
1462 /* Copy Pd to Pm. */
1463 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1464 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1465 break;
1466 case OP_MOVZS_P_P_P:
1467 case OP_MOVZ_P_P_P:
1468 /* Copy Pn to Pm. */
1469 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1470 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1471 break;
1472 case OP_NOTS_P_P_P_Z:
1473 case OP_NOT_P_P_P_Z:
1474 /* Copy Pg to Pm. */
1475 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1476 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1477 break;
1478 default: break;
1479 }
1480 }
1481
1482 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1483 static void
1484 encode_sizeq (aarch64_inst *inst)
1485 {
1486 aarch64_insn sizeq;
1487 enum aarch64_field_kind kind;
1488 int idx;
1489
1490 /* Get the index of the operand whose information we are going to use
1491 to encode the size and Q fields.
1492 This is deduced from the possible valid qualifier lists. */
1493 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1494 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1495 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1496 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1497 /* Q */
1498 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1499 /* size */
1500 if (inst->opcode->iclass == asisdlse
1501 || inst->opcode->iclass == asisdlsep
1502 || inst->opcode->iclass == asisdlso
1503 || inst->opcode->iclass == asisdlsop)
1504 kind = FLD_vldst_size;
1505 else
1506 kind = FLD_size;
1507 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1508 }
1509
1510 /* Opcodes that have fields shared by multiple operands are usually flagged
1511 with flags. In this function, we detect such flags and use the
1512 information in one of the related operands to do the encoding. The 'one'
1513 operand is not any operand but one of the operands that has the enough
1514 information for such an encoding. */
1515
1516 static void
1517 do_special_encoding (struct aarch64_inst *inst)
1518 {
1519 int idx;
1520 aarch64_insn value = 0;
1521
1522 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1523
1524 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1525 if (inst->opcode->flags & F_COND)
1526 {
1527 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1528 }
1529 if (inst->opcode->flags & F_SF)
1530 {
1531 idx = select_operand_for_sf_field_coding (inst->opcode);
1532 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1533 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1534 ? 1 : 0;
1535 insert_field (FLD_sf, &inst->value, value, 0);
1536 if (inst->opcode->flags & F_N)
1537 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1538 }
1539 if (inst->opcode->flags & F_LSE_SZ)
1540 {
1541 idx = select_operand_for_sf_field_coding (inst->opcode);
1542 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1543 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1544 ? 1 : 0;
1545 insert_field (FLD_lse_sz, &inst->value, value, 0);
1546 }
1547 if (inst->opcode->flags & F_SIZEQ)
1548 encode_sizeq (inst);
1549 if (inst->opcode->flags & F_FPTYPE)
1550 {
1551 idx = select_operand_for_fptype_field_coding (inst->opcode);
1552 switch (inst->operands[idx].qualifier)
1553 {
1554 case AARCH64_OPND_QLF_S_S: value = 0; break;
1555 case AARCH64_OPND_QLF_S_D: value = 1; break;
1556 case AARCH64_OPND_QLF_S_H: value = 3; break;
1557 default: assert (0);
1558 }
1559 insert_field (FLD_type, &inst->value, value, 0);
1560 }
1561 if (inst->opcode->flags & F_SSIZE)
1562 {
1563 enum aarch64_opnd_qualifier qualifier;
1564 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1565 qualifier = inst->operands[idx].qualifier;
1566 assert (qualifier >= AARCH64_OPND_QLF_S_B
1567 && qualifier <= AARCH64_OPND_QLF_S_Q);
1568 value = aarch64_get_qualifier_standard_value (qualifier);
1569 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1570 }
1571 if (inst->opcode->flags & F_T)
1572 {
1573 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1574 aarch64_field field = {0, 0};
1575 enum aarch64_opnd_qualifier qualifier;
1576
1577 idx = 0;
1578 qualifier = inst->operands[idx].qualifier;
1579 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1580 == AARCH64_OPND_CLASS_SIMD_REG
1581 && qualifier >= AARCH64_OPND_QLF_V_8B
1582 && qualifier <= AARCH64_OPND_QLF_V_2D);
1583 /* imm5<3:0> q <t>
1584 0000 x reserved
1585 xxx1 0 8b
1586 xxx1 1 16b
1587 xx10 0 4h
1588 xx10 1 8h
1589 x100 0 2s
1590 x100 1 4s
1591 1000 0 reserved
1592 1000 1 2d */
1593 value = aarch64_get_qualifier_standard_value (qualifier);
1594 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1595 num = (int) value >> 1;
1596 assert (num >= 0 && num <= 3);
1597 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1598 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1599 }
1600 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1601 {
1602 /* Use Rt to encode in the case of e.g.
1603 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1604 enum aarch64_opnd_qualifier qualifier;
1605 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1606 if (idx == -1)
1607 /* Otherwise use the result operand, which has to be a integer
1608 register. */
1609 idx = 0;
1610 assert (idx == 0 || idx == 1);
1611 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1612 == AARCH64_OPND_CLASS_INT_REG);
1613 qualifier = inst->operands[idx].qualifier;
1614 insert_field (FLD_Q, &inst->value,
1615 aarch64_get_qualifier_standard_value (qualifier), 0);
1616 }
1617 if (inst->opcode->flags & F_LDS_SIZE)
1618 {
1619 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1620 enum aarch64_opnd_qualifier qualifier;
1621 aarch64_field field = {0, 0};
1622 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1623 == AARCH64_OPND_CLASS_INT_REG);
1624 gen_sub_field (FLD_opc, 0, 1, &field);
1625 qualifier = inst->operands[0].qualifier;
1626 insert_field_2 (&field, &inst->value,
1627 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1628 }
1629 /* Miscellaneous encoding as the last step. */
1630 if (inst->opcode->flags & F_MISC)
1631 do_misc_encoding (inst);
1632
1633 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1634 }
1635
1636 /* Some instructions (including all SVE ones) use the instruction class
1637 to describe how a qualifiers_list index is represented in the instruction
1638 encoding. If INST is such an instruction, encode the chosen qualifier
1639 variant. */
1640
1641 static void
1642 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1643 {
1644 int variant = 0;
1645 switch (inst->opcode->iclass)
1646 {
1647 case sve_cpy:
1648 insert_fields (&inst->value, aarch64_get_variant (inst),
1649 0, 2, FLD_SVE_M_14, FLD_size);
1650 break;
1651
1652 case sve_index:
1653 case sve_shift_pred:
1654 case sve_shift_unpred:
1655 case sve_shift_tsz_hsd:
1656 case sve_shift_tsz_bhsd:
1657 /* For indices and shift amounts, the variant is encoded as
1658 part of the immediate. */
1659 break;
1660
1661 case sve_limm:
1662 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1663 and depend on the immediate. They don't have a separate
1664 encoding. */
1665 break;
1666
1667 case sve_misc:
1668 /* sve_misc instructions have only a single variant. */
1669 break;
1670
1671 case sve_movprfx:
1672 insert_fields (&inst->value, aarch64_get_variant (inst),
1673 0, 2, FLD_SVE_M_16, FLD_size);
1674 break;
1675
1676 case sve_pred_zm:
1677 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1678 break;
1679
1680 case sve_size_bhs:
1681 case sve_size_bhsd:
1682 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1683 break;
1684
1685 case sve_size_hsd:
1686 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1687 break;
1688
1689 case sve_size_bh:
1690 case sve_size_sd:
1691 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1692 break;
1693
1694 case sve_size_sd2:
1695 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1696 break;
1697
1698 case sve_size_hsd2:
1699 insert_field (FLD_SVE_size, &inst->value,
1700 aarch64_get_variant (inst) + 1, 0);
1701 break;
1702
1703 case sve_size_tsz_bhs:
1704 insert_fields (&inst->value,
1705 (1 << aarch64_get_variant (inst)),
1706 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1707 break;
1708
1709 case sve_size_13:
1710 variant = aarch64_get_variant (inst) + 1;
1711 if (variant == 2)
1712 variant = 3;
1713 insert_field (FLD_size, &inst->value, variant, 0);
1714 break;
1715
1716 default:
1717 break;
1718 }
1719 }
1720
1721 /* Converters converting an alias opcode instruction to its real form. */
1722
1723 /* ROR <Wd>, <Ws>, #<shift>
1724 is equivalent to:
1725 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1726 static void
1727 convert_ror_to_extr (aarch64_inst *inst)
1728 {
1729 copy_operand_info (inst, 3, 2);
1730 copy_operand_info (inst, 2, 1);
1731 }
1732
1733 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1734 is equivalent to:
1735 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1736 static void
1737 convert_xtl_to_shll (aarch64_inst *inst)
1738 {
1739 inst->operands[2].qualifier = inst->operands[1].qualifier;
1740 inst->operands[2].imm.value = 0;
1741 }
1742
1743 /* Convert
1744 LSR <Xd>, <Xn>, #<shift>
1745 to
1746 UBFM <Xd>, <Xn>, #<shift>, #63. */
1747 static void
1748 convert_sr_to_bfm (aarch64_inst *inst)
1749 {
1750 inst->operands[3].imm.value =
1751 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1752 }
1753
1754 /* Convert MOV to ORR. */
1755 static void
1756 convert_mov_to_orr (aarch64_inst *inst)
1757 {
1758 /* MOV <Vd>.<T>, <Vn>.<T>
1759 is equivalent to:
1760 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1761 copy_operand_info (inst, 2, 1);
1762 }
1763
1764 /* When <imms> >= <immr>, the instruction written:
1765 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1766 is equivalent to:
1767 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1768
1769 static void
1770 convert_bfx_to_bfm (aarch64_inst *inst)
1771 {
1772 int64_t lsb, width;
1773
1774 /* Convert the operand. */
1775 lsb = inst->operands[2].imm.value;
1776 width = inst->operands[3].imm.value;
1777 inst->operands[2].imm.value = lsb;
1778 inst->operands[3].imm.value = lsb + width - 1;
1779 }
1780
1781 /* When <imms> < <immr>, the instruction written:
1782 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1783 is equivalent to:
1784 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1785
1786 static void
1787 convert_bfi_to_bfm (aarch64_inst *inst)
1788 {
1789 int64_t lsb, width;
1790
1791 /* Convert the operand. */
1792 lsb = inst->operands[2].imm.value;
1793 width = inst->operands[3].imm.value;
1794 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1795 {
1796 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1797 inst->operands[3].imm.value = width - 1;
1798 }
1799 else
1800 {
1801 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1802 inst->operands[3].imm.value = width - 1;
1803 }
1804 }
1805
1806 /* The instruction written:
1807 BFC <Xd>, #<lsb>, #<width>
1808 is equivalent to:
1809 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1810
1811 static void
1812 convert_bfc_to_bfm (aarch64_inst *inst)
1813 {
1814 int64_t lsb, width;
1815
1816 /* Insert XZR. */
1817 copy_operand_info (inst, 3, 2);
1818 copy_operand_info (inst, 2, 1);
1819 copy_operand_info (inst, 1, 0);
1820 inst->operands[1].reg.regno = 0x1f;
1821
1822 /* Convert the immediate operand. */
1823 lsb = inst->operands[2].imm.value;
1824 width = inst->operands[3].imm.value;
1825 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1826 {
1827 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1828 inst->operands[3].imm.value = width - 1;
1829 }
1830 else
1831 {
1832 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1833 inst->operands[3].imm.value = width - 1;
1834 }
1835 }
1836
1837 /* The instruction written:
1838 LSL <Xd>, <Xn>, #<shift>
1839 is equivalent to:
1840 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1841
1842 static void
1843 convert_lsl_to_ubfm (aarch64_inst *inst)
1844 {
1845 int64_t shift = inst->operands[2].imm.value;
1846
1847 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1848 {
1849 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1850 inst->operands[3].imm.value = 31 - shift;
1851 }
1852 else
1853 {
1854 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1855 inst->operands[3].imm.value = 63 - shift;
1856 }
1857 }
1858
1859 /* CINC <Wd>, <Wn>, <cond>
1860 is equivalent to:
1861 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1862
1863 static void
1864 convert_to_csel (aarch64_inst *inst)
1865 {
1866 copy_operand_info (inst, 3, 2);
1867 copy_operand_info (inst, 2, 1);
1868 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1869 }
1870
1871 /* CSET <Wd>, <cond>
1872 is equivalent to:
1873 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1874
1875 static void
1876 convert_cset_to_csinc (aarch64_inst *inst)
1877 {
1878 copy_operand_info (inst, 3, 1);
1879 copy_operand_info (inst, 2, 0);
1880 copy_operand_info (inst, 1, 0);
1881 inst->operands[1].reg.regno = 0x1f;
1882 inst->operands[2].reg.regno = 0x1f;
1883 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1884 }
1885
1886 /* MOV <Wd>, #<imm>
1887 is equivalent to:
1888 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1889
1890 static void
1891 convert_mov_to_movewide (aarch64_inst *inst)
1892 {
1893 int is32;
1894 uint32_t shift_amount;
1895 uint64_t value;
1896
1897 switch (inst->opcode->op)
1898 {
1899 case OP_MOV_IMM_WIDE:
1900 value = inst->operands[1].imm.value;
1901 break;
1902 case OP_MOV_IMM_WIDEN:
1903 value = ~inst->operands[1].imm.value;
1904 break;
1905 default:
1906 assert (0);
1907 }
1908 inst->operands[1].type = AARCH64_OPND_HALF;
1909 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1910 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1911 /* The constraint check should have guaranteed this wouldn't happen. */
1912 assert (0);
1913 value >>= shift_amount;
1914 value &= 0xffff;
1915 inst->operands[1].imm.value = value;
1916 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1917 inst->operands[1].shifter.amount = shift_amount;
1918 }
1919
1920 /* MOV <Wd>, #<imm>
1921 is equivalent to:
1922 ORR <Wd>, WZR, #<imm>. */
1923
1924 static void
1925 convert_mov_to_movebitmask (aarch64_inst *inst)
1926 {
1927 copy_operand_info (inst, 2, 1);
1928 inst->operands[1].reg.regno = 0x1f;
1929 inst->operands[1].skip = 0;
1930 }
1931
1932 /* Some alias opcodes are assembled by being converted to their real-form. */
1933
1934 static void
1935 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1936 {
1937 const aarch64_opcode *alias = inst->opcode;
1938
1939 if ((alias->flags & F_CONV) == 0)
1940 goto convert_to_real_return;
1941
1942 switch (alias->op)
1943 {
1944 case OP_ASR_IMM:
1945 case OP_LSR_IMM:
1946 convert_sr_to_bfm (inst);
1947 break;
1948 case OP_LSL_IMM:
1949 convert_lsl_to_ubfm (inst);
1950 break;
1951 case OP_CINC:
1952 case OP_CINV:
1953 case OP_CNEG:
1954 convert_to_csel (inst);
1955 break;
1956 case OP_CSET:
1957 case OP_CSETM:
1958 convert_cset_to_csinc (inst);
1959 break;
1960 case OP_UBFX:
1961 case OP_BFXIL:
1962 case OP_SBFX:
1963 convert_bfx_to_bfm (inst);
1964 break;
1965 case OP_SBFIZ:
1966 case OP_BFI:
1967 case OP_UBFIZ:
1968 convert_bfi_to_bfm (inst);
1969 break;
1970 case OP_BFC:
1971 convert_bfc_to_bfm (inst);
1972 break;
1973 case OP_MOV_V:
1974 convert_mov_to_orr (inst);
1975 break;
1976 case OP_MOV_IMM_WIDE:
1977 case OP_MOV_IMM_WIDEN:
1978 convert_mov_to_movewide (inst);
1979 break;
1980 case OP_MOV_IMM_LOG:
1981 convert_mov_to_movebitmask (inst);
1982 break;
1983 case OP_ROR_IMM:
1984 convert_ror_to_extr (inst);
1985 break;
1986 case OP_SXTL:
1987 case OP_SXTL2:
1988 case OP_UXTL:
1989 case OP_UXTL2:
1990 convert_xtl_to_shll (inst);
1991 break;
1992 default:
1993 break;
1994 }
1995
1996 convert_to_real_return:
1997 aarch64_replace_opcode (inst, real);
1998 }
1999
2000 /* Encode *INST_ORI of the opcode code OPCODE.
2001 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2002 matched operand qualifier sequence in *QLF_SEQ. */
2003
2004 bfd_boolean
2005 aarch64_opcode_encode (const aarch64_opcode *opcode,
2006 const aarch64_inst *inst_ori, aarch64_insn *code,
2007 aarch64_opnd_qualifier_t *qlf_seq,
2008 aarch64_operand_error *mismatch_detail,
2009 aarch64_instr_sequence* insn_sequence)
2010 {
2011 int i;
2012 const aarch64_opcode *aliased;
2013 aarch64_inst copy, *inst;
2014
2015 DEBUG_TRACE ("enter with %s", opcode->name);
2016
2017 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2018 copy = *inst_ori;
2019 inst = &copy;
2020
2021 assert (inst->opcode == NULL || inst->opcode == opcode);
2022 if (inst->opcode == NULL)
2023 inst->opcode = opcode;
2024
2025 /* Constrain the operands.
2026 After passing this, the encoding is guaranteed to succeed. */
2027 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2028 {
2029 DEBUG_TRACE ("FAIL since operand constraint not met");
2030 return 0;
2031 }
2032
2033 /* Get the base value.
2034 Note: this has to be before the aliasing handling below in order to
2035 get the base value from the alias opcode before we move on to the
2036 aliased opcode for encoding. */
2037 inst->value = opcode->opcode;
2038
2039 /* No need to do anything else if the opcode does not have any operand. */
2040 if (aarch64_num_of_operands (opcode) == 0)
2041 goto encoding_exit;
2042
2043 /* Assign operand indexes and check types. Also put the matched
2044 operand qualifiers in *QLF_SEQ to return. */
2045 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2046 {
2047 assert (opcode->operands[i] == inst->operands[i].type);
2048 inst->operands[i].idx = i;
2049 if (qlf_seq != NULL)
2050 *qlf_seq = inst->operands[i].qualifier;
2051 }
2052
2053 aliased = aarch64_find_real_opcode (opcode);
2054 /* If the opcode is an alias and it does not ask for direct encoding by
2055 itself, the instruction will be transformed to the form of real opcode
2056 and the encoding will be carried out using the rules for the aliased
2057 opcode. */
2058 if (aliased != NULL && (opcode->flags & F_CONV))
2059 {
2060 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2061 aliased->name, opcode->name);
2062 /* Convert the operands to the form of the real opcode. */
2063 convert_to_real (inst, aliased);
2064 opcode = aliased;
2065 }
2066
2067 aarch64_opnd_info *info = inst->operands;
2068
2069 /* Call the inserter of each operand. */
2070 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2071 {
2072 const aarch64_operand *opnd;
2073 enum aarch64_opnd type = opcode->operands[i];
2074 if (type == AARCH64_OPND_NIL)
2075 break;
2076 if (info->skip)
2077 {
2078 DEBUG_TRACE ("skip the incomplete operand %d", i);
2079 continue;
2080 }
2081 opnd = &aarch64_operands[type];
2082 if (operand_has_inserter (opnd)
2083 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2084 mismatch_detail))
2085 return FALSE;
2086 }
2087
2088 /* Call opcode encoders indicated by flags. */
2089 if (opcode_has_special_coder (opcode))
2090 do_special_encoding (inst);
2091
2092 /* Possibly use the instruction class to encode the chosen qualifier
2093 variant. */
2094 aarch64_encode_variant_using_iclass (inst);
2095
2096 /* Run a verifier if the instruction has one set. */
2097 if (opcode->verifier)
2098 {
2099 enum err_type result = opcode->verifier (inst, *code, 0, TRUE,
2100 mismatch_detail, insn_sequence);
2101 switch (result)
2102 {
2103 case ERR_UND:
2104 case ERR_UNP:
2105 case ERR_NYI:
2106 return FALSE;
2107 default:
2108 break;
2109 }
2110 }
2111
2112 /* Always run constrain verifiers, this is needed because constrains need to
2113 maintain a global state. Regardless if the instruction has the flag set
2114 or not. */
2115 enum err_type result = verify_constraints (inst, *code, 0, TRUE,
2116 mismatch_detail, insn_sequence);
2117 switch (result)
2118 {
2119 case ERR_UND:
2120 case ERR_UNP:
2121 case ERR_NYI:
2122 return FALSE;
2123 default:
2124 break;
2125 }
2126
2127
2128 encoding_exit:
2129 DEBUG_TRACE ("exit with %s", opcode->name);
2130
2131 *code = inst->value;
2132
2133 return TRUE;
2134 }