]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-asm.c
[AArch64] Add dot product support for AArch64 to binutils
[thirdparty/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25
26 /* Utilities. */
27
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
37
38 static inline void
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40 {
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57 }
58
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62 static void
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65 {
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76 }
77
78 /* Operand inserters. */
79
80 /* Insert register number. */
81 const char *
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85 {
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88 }
89
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93 const char *
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96 {
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else if (inst->opcode->iclass == dotproduct)
125 {
126 unsigned reglane_index = info->reglane.index;
127 switch (info->qualifier)
128 {
129 case AARCH64_OPND_QLF_S_B:
130 /* L:H */
131 assert (reglane_index < 4);
132 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
133 break;
134 default:
135 assert (0);
136 }
137 }
138 else
139 {
140 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
141 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
142 unsigned reglane_index = info->reglane.index;
143
144 if (inst->opcode->op == OP_FCMLA_ELEM)
145 /* Complex operand takes two elements. */
146 reglane_index *= 2;
147
148 switch (info->qualifier)
149 {
150 case AARCH64_OPND_QLF_S_H:
151 /* H:L:M */
152 assert (reglane_index < 8);
153 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
154 break;
155 case AARCH64_OPND_QLF_S_S:
156 /* H:L */
157 assert (reglane_index < 4);
158 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
159 break;
160 case AARCH64_OPND_QLF_S_D:
161 /* H */
162 assert (reglane_index < 2);
163 insert_field (FLD_H, code, reglane_index, 0);
164 break;
165 default:
166 assert (0);
167 }
168 }
169 return NULL;
170 }
171
172 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
173 const char *
174 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
175 aarch64_insn *code,
176 const aarch64_inst *inst ATTRIBUTE_UNUSED)
177 {
178 /* R */
179 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
180 /* len */
181 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
182 return NULL;
183 }
184
185 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
186 in AdvSIMD load/store instructions. */
187 const char *
188 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
189 const aarch64_opnd_info *info, aarch64_insn *code,
190 const aarch64_inst *inst)
191 {
192 aarch64_insn value = 0;
193 /* Number of elements in each structure to be loaded/stored. */
194 unsigned num = get_opcode_dependent_value (inst->opcode);
195
196 /* Rt */
197 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
198 /* opcode */
199 switch (num)
200 {
201 case 1:
202 switch (info->reglist.num_regs)
203 {
204 case 1: value = 0x7; break;
205 case 2: value = 0xa; break;
206 case 3: value = 0x6; break;
207 case 4: value = 0x2; break;
208 default: assert (0);
209 }
210 break;
211 case 2:
212 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
213 break;
214 case 3:
215 value = 0x4;
216 break;
217 case 4:
218 value = 0x0;
219 break;
220 default:
221 assert (0);
222 }
223 insert_field (FLD_opcode, code, value, 0);
224
225 return NULL;
226 }
227
228 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
229 single structure to all lanes instructions. */
230 const char *
231 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
232 const aarch64_opnd_info *info, aarch64_insn *code,
233 const aarch64_inst *inst)
234 {
235 aarch64_insn value;
236 /* The opcode dependent area stores the number of elements in
237 each structure to be loaded/stored. */
238 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
239
240 /* Rt */
241 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
242 /* S */
243 value = (aarch64_insn) 0;
244 if (is_ld1r && info->reglist.num_regs == 2)
245 /* OP_LD1R does not have alternating variant, but have "two consecutive"
246 instead. */
247 value = (aarch64_insn) 1;
248 insert_field (FLD_S, code, value, 0);
249
250 return NULL;
251 }
252
253 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
254 operand e.g. Vt in AdvSIMD load/store single element instructions. */
255 const char *
256 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
257 const aarch64_opnd_info *info, aarch64_insn *code,
258 const aarch64_inst *inst ATTRIBUTE_UNUSED)
259 {
260 aarch64_field field = {0, 0};
261 aarch64_insn QSsize = 0; /* fields Q:S:size. */
262 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
263
264 assert (info->reglist.has_index);
265
266 /* Rt */
267 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
268 /* Encode the index, opcode<2:1> and size. */
269 switch (info->qualifier)
270 {
271 case AARCH64_OPND_QLF_S_B:
272 /* Index encoded in "Q:S:size". */
273 QSsize = info->reglist.index;
274 opcodeh2 = 0x0;
275 break;
276 case AARCH64_OPND_QLF_S_H:
277 /* Index encoded in "Q:S:size<1>". */
278 QSsize = info->reglist.index << 1;
279 opcodeh2 = 0x1;
280 break;
281 case AARCH64_OPND_QLF_S_S:
282 /* Index encoded in "Q:S". */
283 QSsize = info->reglist.index << 2;
284 opcodeh2 = 0x2;
285 break;
286 case AARCH64_OPND_QLF_S_D:
287 /* Index encoded in "Q". */
288 QSsize = info->reglist.index << 3 | 0x1;
289 opcodeh2 = 0x2;
290 break;
291 default:
292 assert (0);
293 }
294 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
295 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
296 insert_field_2 (&field, code, opcodeh2, 0);
297
298 return NULL;
299 }
300
301 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
302 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
303 or SSHR <V><d>, <V><n>, #<shift>. */
304 const char *
305 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
306 const aarch64_opnd_info *info,
307 aarch64_insn *code, const aarch64_inst *inst)
308 {
309 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
310 aarch64_insn Q, imm;
311
312 if (inst->opcode->iclass == asimdshf)
313 {
314 /* Q
315 immh Q <T>
316 0000 x SEE AdvSIMD modified immediate
317 0001 0 8B
318 0001 1 16B
319 001x 0 4H
320 001x 1 8H
321 01xx 0 2S
322 01xx 1 4S
323 1xxx 0 RESERVED
324 1xxx 1 2D */
325 Q = (val & 0x1) ? 1 : 0;
326 insert_field (FLD_Q, code, Q, inst->opcode->mask);
327 val >>= 1;
328 }
329
330 assert (info->type == AARCH64_OPND_IMM_VLSR
331 || info->type == AARCH64_OPND_IMM_VLSL);
332
333 if (info->type == AARCH64_OPND_IMM_VLSR)
334 /* immh:immb
335 immh <shift>
336 0000 SEE AdvSIMD modified immediate
337 0001 (16-UInt(immh:immb))
338 001x (32-UInt(immh:immb))
339 01xx (64-UInt(immh:immb))
340 1xxx (128-UInt(immh:immb)) */
341 imm = (16 << (unsigned)val) - info->imm.value;
342 else
343 /* immh:immb
344 immh <shift>
345 0000 SEE AdvSIMD modified immediate
346 0001 (UInt(immh:immb)-8)
347 001x (UInt(immh:immb)-16)
348 01xx (UInt(immh:immb)-32)
349 1xxx (UInt(immh:immb)-64) */
350 imm = info->imm.value + (8 << (unsigned)val);
351 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
352
353 return NULL;
354 }
355
356 /* Insert fields for e.g. the immediate operands in
357 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
358 const char *
359 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
360 aarch64_insn *code,
361 const aarch64_inst *inst ATTRIBUTE_UNUSED)
362 {
363 int64_t imm;
364
365 imm = info->imm.value;
366 if (operand_need_shift_by_two (self))
367 imm >>= 2;
368 insert_all_fields (self, code, imm);
369 return NULL;
370 }
371
372 /* Insert immediate and its shift amount for e.g. the last operand in
373 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
374 const char *
375 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
376 aarch64_insn *code, const aarch64_inst *inst)
377 {
378 /* imm16 */
379 aarch64_ins_imm (self, info, code, inst);
380 /* hw */
381 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
382 return NULL;
383 }
384
385 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
386 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
387 const char *
388 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
389 const aarch64_opnd_info *info,
390 aarch64_insn *code,
391 const aarch64_inst *inst ATTRIBUTE_UNUSED)
392 {
393 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
394 uint64_t imm = info->imm.value;
395 enum aarch64_modifier_kind kind = info->shifter.kind;
396 int amount = info->shifter.amount;
397 aarch64_field field = {0, 0};
398
399 /* a:b:c:d:e:f:g:h */
400 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
401 {
402 /* Either MOVI <Dd>, #<imm>
403 or MOVI <Vd>.2D, #<imm>.
404 <imm> is a 64-bit immediate
405 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
406 encoded in "a:b:c:d:e:f:g:h". */
407 imm = aarch64_shrink_expanded_imm8 (imm);
408 assert ((int)imm >= 0);
409 }
410 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
411
412 if (kind == AARCH64_MOD_NONE)
413 return NULL;
414
415 /* shift amount partially in cmode */
416 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
417 if (kind == AARCH64_MOD_LSL)
418 {
419 /* AARCH64_MOD_LSL: shift zeros. */
420 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
421 assert (esize == 4 || esize == 2 || esize == 1);
422 /* For 8-bit move immediate, the optional LSL #0 does not require
423 encoding. */
424 if (esize == 1)
425 return NULL;
426 amount >>= 3;
427 if (esize == 4)
428 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
429 else
430 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
431 }
432 else
433 {
434 /* AARCH64_MOD_MSL: shift ones. */
435 amount >>= 4;
436 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
437 }
438 insert_field_2 (&field, code, amount, 0);
439
440 return NULL;
441 }
442
443 /* Insert fields for an 8-bit floating-point immediate. */
444 const char *
445 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
446 aarch64_insn *code,
447 const aarch64_inst *inst ATTRIBUTE_UNUSED)
448 {
449 insert_all_fields (self, code, info->imm.value);
450 return NULL;
451 }
452
453 /* Insert 1-bit rotation immediate (#90 or #270). */
454 const char *
455 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
456 const aarch64_opnd_info *info,
457 aarch64_insn *code, const aarch64_inst *inst)
458 {
459 uint64_t rot = (info->imm.value - 90) / 180;
460 assert (rot < 2U);
461 insert_field (self->fields[0], code, rot, inst->opcode->mask);
462 return NULL;
463 }
464
465 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
466 const char *
467 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
468 const aarch64_opnd_info *info,
469 aarch64_insn *code, const aarch64_inst *inst)
470 {
471 uint64_t rot = info->imm.value / 90;
472 assert (rot < 4U);
473 insert_field (self->fields[0], code, rot, inst->opcode->mask);
474 return NULL;
475 }
476
477 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
478 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
479 const char *
480 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
481 aarch64_insn *code,
482 const aarch64_inst *inst ATTRIBUTE_UNUSED)
483 {
484 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
485 return NULL;
486 }
487
488 /* Insert arithmetic immediate for e.g. the last operand in
489 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
490 const char *
491 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
492 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
493 {
494 /* shift */
495 aarch64_insn value = info->shifter.amount ? 1 : 0;
496 insert_field (self->fields[0], code, value, 0);
497 /* imm12 (unsigned) */
498 insert_field (self->fields[1], code, info->imm.value, 0);
499 return NULL;
500 }
501
502 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
503 the operand should be inverted before encoding. */
504 static const char *
505 aarch64_ins_limm_1 (const aarch64_operand *self,
506 const aarch64_opnd_info *info, aarch64_insn *code,
507 const aarch64_inst *inst, bfd_boolean invert_p)
508 {
509 aarch64_insn value;
510 uint64_t imm = info->imm.value;
511 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
512
513 if (invert_p)
514 imm = ~imm;
515 /* The constraint check should have guaranteed this wouldn't happen. */
516 assert (aarch64_logical_immediate_p (imm, esize, &value));
517
518 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
519 self->fields[0]);
520 return NULL;
521 }
522
523 /* Insert logical/bitmask immediate for e.g. the last operand in
524 ORR <Wd|WSP>, <Wn>, #<imm>. */
525 const char *
526 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
527 aarch64_insn *code, const aarch64_inst *inst)
528 {
529 return aarch64_ins_limm_1 (self, info, code, inst,
530 inst->opcode->op == OP_BIC);
531 }
532
533 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
534 const char *
535 aarch64_ins_inv_limm (const aarch64_operand *self,
536 const aarch64_opnd_info *info, aarch64_insn *code,
537 const aarch64_inst *inst)
538 {
539 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
540 }
541
542 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
543 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
544 const char *
545 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
546 aarch64_insn *code, const aarch64_inst *inst)
547 {
548 aarch64_insn value = 0;
549
550 assert (info->idx == 0);
551
552 /* Rt */
553 aarch64_ins_regno (self, info, code, inst);
554 if (inst->opcode->iclass == ldstpair_indexed
555 || inst->opcode->iclass == ldstnapair_offs
556 || inst->opcode->iclass == ldstpair_off
557 || inst->opcode->iclass == loadlit)
558 {
559 /* size */
560 switch (info->qualifier)
561 {
562 case AARCH64_OPND_QLF_S_S: value = 0; break;
563 case AARCH64_OPND_QLF_S_D: value = 1; break;
564 case AARCH64_OPND_QLF_S_Q: value = 2; break;
565 default: assert (0);
566 }
567 insert_field (FLD_ldst_size, code, value, 0);
568 }
569 else
570 {
571 /* opc[1]:size */
572 value = aarch64_get_qualifier_standard_value (info->qualifier);
573 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
574 }
575
576 return NULL;
577 }
578
579 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
580 const char *
581 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
582 const aarch64_opnd_info *info, aarch64_insn *code,
583 const aarch64_inst *inst ATTRIBUTE_UNUSED)
584 {
585 /* Rn */
586 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
587 return NULL;
588 }
589
590 /* Encode the address operand for e.g.
591 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
592 const char *
593 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
594 const aarch64_opnd_info *info, aarch64_insn *code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 {
597 aarch64_insn S;
598 enum aarch64_modifier_kind kind = info->shifter.kind;
599
600 /* Rn */
601 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
602 /* Rm */
603 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
604 /* option */
605 if (kind == AARCH64_MOD_LSL)
606 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
607 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
608 /* S */
609 if (info->qualifier != AARCH64_OPND_QLF_S_B)
610 S = info->shifter.amount != 0;
611 else
612 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
613 S <amount>
614 0 [absent]
615 1 #0
616 Must be #0 if <extend> is explicitly LSL. */
617 S = info->shifter.operator_present && info->shifter.amount_present;
618 insert_field (FLD_S, code, S, 0);
619
620 return NULL;
621 }
622
623 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
624 const char *
625 aarch64_ins_addr_simm (const aarch64_operand *self,
626 const aarch64_opnd_info *info,
627 aarch64_insn *code,
628 const aarch64_inst *inst ATTRIBUTE_UNUSED)
629 {
630 int imm;
631
632 /* Rn */
633 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
634 /* simm (imm9 or imm7) */
635 imm = info->addr.offset.imm;
636 if (self->fields[0] == FLD_imm7)
637 /* scaled immediate in ld/st pair instructions.. */
638 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
639 insert_field (self->fields[0], code, imm, 0);
640 /* pre/post- index */
641 if (info->addr.writeback)
642 {
643 assert (inst->opcode->iclass != ldst_unscaled
644 && inst->opcode->iclass != ldstnapair_offs
645 && inst->opcode->iclass != ldstpair_off
646 && inst->opcode->iclass != ldst_unpriv);
647 assert (info->addr.preind != info->addr.postind);
648 if (info->addr.preind)
649 insert_field (self->fields[1], code, 1, 0);
650 }
651
652 return NULL;
653 }
654
655 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
656 const char *
657 aarch64_ins_addr_simm10 (const aarch64_operand *self,
658 const aarch64_opnd_info *info,
659 aarch64_insn *code,
660 const aarch64_inst *inst ATTRIBUTE_UNUSED)
661 {
662 int imm;
663
664 /* Rn */
665 insert_field (self->fields[0], code, info->addr.base_regno, 0);
666 /* simm10 */
667 imm = info->addr.offset.imm >> 3;
668 insert_field (self->fields[1], code, imm >> 9, 0);
669 insert_field (self->fields[2], code, imm, 0);
670 /* writeback */
671 if (info->addr.writeback)
672 {
673 assert (info->addr.preind == 1 && info->addr.postind == 0);
674 insert_field (self->fields[3], code, 1, 0);
675 }
676 return NULL;
677 }
678
679 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
680 const char *
681 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
682 const aarch64_opnd_info *info,
683 aarch64_insn *code,
684 const aarch64_inst *inst ATTRIBUTE_UNUSED)
685 {
686 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
687
688 /* Rn */
689 insert_field (self->fields[0], code, info->addr.base_regno, 0);
690 /* uimm12 */
691 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
692 return NULL;
693 }
694
695 /* Encode the address operand for e.g.
696 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
697 const char *
698 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 const aarch64_opnd_info *info, aarch64_insn *code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701 {
702 /* Rn */
703 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
704 /* Rm | #<amount> */
705 if (info->addr.offset.is_reg)
706 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
707 else
708 insert_field (FLD_Rm, code, 0x1f, 0);
709 return NULL;
710 }
711
712 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
713 const char *
714 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
715 const aarch64_opnd_info *info, aarch64_insn *code,
716 const aarch64_inst *inst ATTRIBUTE_UNUSED)
717 {
718 /* cond */
719 insert_field (FLD_cond, code, info->cond->value, 0);
720 return NULL;
721 }
722
723 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
724 const char *
725 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
726 const aarch64_opnd_info *info, aarch64_insn *code,
727 const aarch64_inst *inst ATTRIBUTE_UNUSED)
728 {
729 /* op0:op1:CRn:CRm:op2 */
730 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
731 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
732 return NULL;
733 }
734
735 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
736 const char *
737 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
738 const aarch64_opnd_info *info, aarch64_insn *code,
739 const aarch64_inst *inst ATTRIBUTE_UNUSED)
740 {
741 /* op1:op2 */
742 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
743 FLD_op2, FLD_op1);
744 return NULL;
745 }
746
747 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
748 const char *
749 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
750 const aarch64_opnd_info *info, aarch64_insn *code,
751 const aarch64_inst *inst ATTRIBUTE_UNUSED)
752 {
753 /* op1:CRn:CRm:op2 */
754 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
755 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
756 return NULL;
757 }
758
759 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
760
761 const char *
762 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
763 const aarch64_opnd_info *info, aarch64_insn *code,
764 const aarch64_inst *inst ATTRIBUTE_UNUSED)
765 {
766 /* CRm */
767 insert_field (FLD_CRm, code, info->barrier->value, 0);
768 return NULL;
769 }
770
771 /* Encode the prefetch operation option operand for e.g.
772 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
773
774 const char *
775 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
776 const aarch64_opnd_info *info, aarch64_insn *code,
777 const aarch64_inst *inst ATTRIBUTE_UNUSED)
778 {
779 /* prfop in Rt */
780 insert_field (FLD_Rt, code, info->prfop->value, 0);
781 return NULL;
782 }
783
784 /* Encode the hint number for instructions that alias HINT but take an
785 operand. */
786
787 const char *
788 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
789 const aarch64_opnd_info *info, aarch64_insn *code,
790 const aarch64_inst *inst ATTRIBUTE_UNUSED)
791 {
792 /* CRm:op2. */
793 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
794 return NULL;
795 }
796
797 /* Encode the extended register operand for e.g.
798 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
799 const char *
800 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
801 const aarch64_opnd_info *info, aarch64_insn *code,
802 const aarch64_inst *inst ATTRIBUTE_UNUSED)
803 {
804 enum aarch64_modifier_kind kind;
805
806 /* Rm */
807 insert_field (FLD_Rm, code, info->reg.regno, 0);
808 /* option */
809 kind = info->shifter.kind;
810 if (kind == AARCH64_MOD_LSL)
811 kind = info->qualifier == AARCH64_OPND_QLF_W
812 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
813 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
814 /* imm3 */
815 insert_field (FLD_imm3, code, info->shifter.amount, 0);
816
817 return NULL;
818 }
819
820 /* Encode the shifted register operand for e.g.
821 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
822 const char *
823 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
824 const aarch64_opnd_info *info, aarch64_insn *code,
825 const aarch64_inst *inst ATTRIBUTE_UNUSED)
826 {
827 /* Rm */
828 insert_field (FLD_Rm, code, info->reg.regno, 0);
829 /* shift */
830 insert_field (FLD_shift, code,
831 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
832 /* imm6 */
833 insert_field (FLD_imm6, code, info->shifter.amount, 0);
834
835 return NULL;
836 }
837
838 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
839 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
840 SELF's operand-dependent value. fields[0] specifies the field that
841 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
842 const char *
843 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
844 const aarch64_opnd_info *info,
845 aarch64_insn *code,
846 const aarch64_inst *inst ATTRIBUTE_UNUSED)
847 {
848 int factor = 1 + get_operand_specific_data (self);
849 insert_field (self->fields[0], code, info->addr.base_regno, 0);
850 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
851 return NULL;
852 }
853
854 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
855 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
856 SELF's operand-dependent value. fields[0] specifies the field that
857 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
858 const char *
859 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
860 const aarch64_opnd_info *info,
861 aarch64_insn *code,
862 const aarch64_inst *inst ATTRIBUTE_UNUSED)
863 {
864 int factor = 1 + get_operand_specific_data (self);
865 insert_field (self->fields[0], code, info->addr.base_regno, 0);
866 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
867 return NULL;
868 }
869
870 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
871 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
872 SELF's operand-dependent value. fields[0] specifies the field that
873 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
874 and imm3 fields, with imm3 being the less-significant part. */
875 const char *
876 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
877 const aarch64_opnd_info *info,
878 aarch64_insn *code,
879 const aarch64_inst *inst ATTRIBUTE_UNUSED)
880 {
881 int factor = 1 + get_operand_specific_data (self);
882 insert_field (self->fields[0], code, info->addr.base_regno, 0);
883 insert_fields (code, info->addr.offset.imm / factor, 0,
884 2, FLD_imm3, FLD_SVE_imm6);
885 return NULL;
886 }
887
888 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
889 is a 4-bit signed number and where <shift> is SELF's operand-dependent
890 value. fields[0] specifies the base register field. */
891 const char *
892 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
893 const aarch64_opnd_info *info, aarch64_insn *code,
894 const aarch64_inst *inst ATTRIBUTE_UNUSED)
895 {
896 int factor = 1 << get_operand_specific_data (self);
897 insert_field (self->fields[0], code, info->addr.base_regno, 0);
898 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
899 return NULL;
900 }
901
902 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
903 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
904 value. fields[0] specifies the base register field. */
905 const char *
906 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
907 const aarch64_opnd_info *info, aarch64_insn *code,
908 const aarch64_inst *inst ATTRIBUTE_UNUSED)
909 {
910 int factor = 1 << get_operand_specific_data (self);
911 insert_field (self->fields[0], code, info->addr.base_regno, 0);
912 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
913 return NULL;
914 }
915
916 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
917 is SELF's operand-dependent value. fields[0] specifies the base
918 register field and fields[1] specifies the offset register field. */
919 const char *
920 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
921 const aarch64_opnd_info *info, aarch64_insn *code,
922 const aarch64_inst *inst ATTRIBUTE_UNUSED)
923 {
924 insert_field (self->fields[0], code, info->addr.base_regno, 0);
925 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
926 return NULL;
927 }
928
929 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
930 <shift> is SELF's operand-dependent value. fields[0] specifies the
931 base register field, fields[1] specifies the offset register field and
932 fields[2] is a single-bit field that selects SXTW over UXTW. */
933 const char *
934 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
935 const aarch64_opnd_info *info, aarch64_insn *code,
936 const aarch64_inst *inst ATTRIBUTE_UNUSED)
937 {
938 insert_field (self->fields[0], code, info->addr.base_regno, 0);
939 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
940 if (info->shifter.kind == AARCH64_MOD_UXTW)
941 insert_field (self->fields[2], code, 0, 0);
942 else
943 insert_field (self->fields[2], code, 1, 0);
944 return NULL;
945 }
946
947 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
948 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
949 fields[0] specifies the base register field. */
950 const char *
951 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
952 const aarch64_opnd_info *info, aarch64_insn *code,
953 const aarch64_inst *inst ATTRIBUTE_UNUSED)
954 {
955 int factor = 1 << get_operand_specific_data (self);
956 insert_field (self->fields[0], code, info->addr.base_regno, 0);
957 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
958 return NULL;
959 }
960
961 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
962 where <modifier> is fixed by the instruction and where <msz> is a
963 2-bit unsigned number. fields[0] specifies the base register field
964 and fields[1] specifies the offset register field. */
965 static const char *
966 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
967 const aarch64_opnd_info *info, aarch64_insn *code)
968 {
969 insert_field (self->fields[0], code, info->addr.base_regno, 0);
970 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
971 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
972 return NULL;
973 }
974
975 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
976 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
977 field and fields[1] specifies the offset register field. */
978 const char *
979 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
980 const aarch64_opnd_info *info, aarch64_insn *code,
981 const aarch64_inst *inst ATTRIBUTE_UNUSED)
982 {
983 return aarch64_ext_sve_addr_zz (self, info, code);
984 }
985
986 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
987 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
988 field and fields[1] specifies the offset register field. */
989 const char *
990 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
991 const aarch64_opnd_info *info,
992 aarch64_insn *code,
993 const aarch64_inst *inst ATTRIBUTE_UNUSED)
994 {
995 return aarch64_ext_sve_addr_zz (self, info, code);
996 }
997
998 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
999 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1000 field and fields[1] specifies the offset register field. */
1001 const char *
1002 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1003 const aarch64_opnd_info *info,
1004 aarch64_insn *code,
1005 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1006 {
1007 return aarch64_ext_sve_addr_zz (self, info, code);
1008 }
1009
1010 /* Encode an SVE ADD/SUB immediate. */
1011 const char *
1012 aarch64_ins_sve_aimm (const aarch64_operand *self,
1013 const aarch64_opnd_info *info, aarch64_insn *code,
1014 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1015 {
1016 if (info->shifter.amount == 8)
1017 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1018 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1019 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1020 else
1021 insert_all_fields (self, code, info->imm.value & 0xff);
1022 return NULL;
1023 }
1024
1025 /* Encode an SVE CPY/DUP immediate. */
1026 const char *
1027 aarch64_ins_sve_asimm (const aarch64_operand *self,
1028 const aarch64_opnd_info *info, aarch64_insn *code,
1029 const aarch64_inst *inst)
1030 {
1031 return aarch64_ins_sve_aimm (self, info, code, inst);
1032 }
1033
1034 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1035 array specifies which field to use for Zn. MM is encoded in the
1036 concatenation of imm5 and SVE_tszh, with imm5 being the less
1037 significant part. */
1038 const char *
1039 aarch64_ins_sve_index (const aarch64_operand *self,
1040 const aarch64_opnd_info *info, aarch64_insn *code,
1041 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1042 {
1043 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1044 insert_field (self->fields[0], code, info->reglane.regno, 0);
1045 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1046 2, FLD_imm5, FLD_SVE_tszh);
1047 return NULL;
1048 }
1049
1050 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1051 const char *
1052 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1053 const aarch64_opnd_info *info, aarch64_insn *code,
1054 const aarch64_inst *inst)
1055 {
1056 return aarch64_ins_limm (self, info, code, inst);
1057 }
1058
1059 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1060 and where MM occupies the most-significant part. The operand-dependent
1061 value specifies the number of bits in Zn. */
1062 const char *
1063 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1064 const aarch64_opnd_info *info, aarch64_insn *code,
1065 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1066 {
1067 unsigned int reg_bits = get_operand_specific_data (self);
1068 assert (info->reglane.regno < (1U << reg_bits));
1069 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1070 insert_all_fields (self, code, val);
1071 return NULL;
1072 }
1073
1074 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1075 to use for Zn. */
1076 const char *
1077 aarch64_ins_sve_reglist (const aarch64_operand *self,
1078 const aarch64_opnd_info *info, aarch64_insn *code,
1079 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1080 {
1081 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1082 return NULL;
1083 }
1084
1085 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1086 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1087 field. */
1088 const char *
1089 aarch64_ins_sve_scale (const aarch64_operand *self,
1090 const aarch64_opnd_info *info, aarch64_insn *code,
1091 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1092 {
1093 insert_all_fields (self, code, info->imm.value);
1094 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1095 return NULL;
1096 }
1097
1098 /* Encode an SVE shift left immediate. */
1099 const char *
1100 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1101 const aarch64_opnd_info *info, aarch64_insn *code,
1102 const aarch64_inst *inst)
1103 {
1104 const aarch64_opnd_info *prev_operand;
1105 unsigned int esize;
1106
1107 assert (info->idx > 0);
1108 prev_operand = &inst->operands[info->idx - 1];
1109 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1110 insert_all_fields (self, code, 8 * esize + info->imm.value);
1111 return NULL;
1112 }
1113
1114 /* Encode an SVE shift right immediate. */
1115 const char *
1116 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1117 const aarch64_opnd_info *info, aarch64_insn *code,
1118 const aarch64_inst *inst)
1119 {
1120 const aarch64_opnd_info *prev_operand;
1121 unsigned int esize;
1122
1123 assert (info->idx > 0);
1124 prev_operand = &inst->operands[info->idx - 1];
1125 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1126 insert_all_fields (self, code, 16 * esize - info->imm.value);
1127 return NULL;
1128 }
1129
1130 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1131 The fields array specifies which field to use. */
1132 const char *
1133 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1134 const aarch64_opnd_info *info,
1135 aarch64_insn *code,
1136 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1137 {
1138 if (info->imm.value == 0x3f000000)
1139 insert_field (self->fields[0], code, 0, 0);
1140 else
1141 insert_field (self->fields[0], code, 1, 0);
1142 return NULL;
1143 }
1144
1145 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1146 The fields array specifies which field to use. */
1147 const char *
1148 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1149 const aarch64_opnd_info *info,
1150 aarch64_insn *code,
1151 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1152 {
1153 if (info->imm.value == 0x3f000000)
1154 insert_field (self->fields[0], code, 0, 0);
1155 else
1156 insert_field (self->fields[0], code, 1, 0);
1157 return NULL;
1158 }
1159
1160 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1161 The fields array specifies which field to use. */
1162 const char *
1163 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1164 const aarch64_opnd_info *info,
1165 aarch64_insn *code,
1166 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1167 {
1168 if (info->imm.value == 0)
1169 insert_field (self->fields[0], code, 0, 0);
1170 else
1171 insert_field (self->fields[0], code, 1, 0);
1172 return NULL;
1173 }
1174
1175 /* Miscellaneous encoding functions. */
1176
1177 /* Encode size[0], i.e. bit 22, for
1178 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1179
1180 static void
1181 encode_asimd_fcvt (aarch64_inst *inst)
1182 {
1183 aarch64_insn value;
1184 aarch64_field field = {0, 0};
1185 enum aarch64_opnd_qualifier qualifier;
1186
1187 switch (inst->opcode->op)
1188 {
1189 case OP_FCVTN:
1190 case OP_FCVTN2:
1191 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1192 qualifier = inst->operands[1].qualifier;
1193 break;
1194 case OP_FCVTL:
1195 case OP_FCVTL2:
1196 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1197 qualifier = inst->operands[0].qualifier;
1198 break;
1199 default:
1200 assert (0);
1201 }
1202 assert (qualifier == AARCH64_OPND_QLF_V_4S
1203 || qualifier == AARCH64_OPND_QLF_V_2D);
1204 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1205 gen_sub_field (FLD_size, 0, 1, &field);
1206 insert_field_2 (&field, &inst->value, value, 0);
1207 }
1208
1209 /* Encode size[0], i.e. bit 22, for
1210 e.g. FCVTXN <Vb><d>, <Va><n>. */
1211
1212 static void
1213 encode_asisd_fcvtxn (aarch64_inst *inst)
1214 {
1215 aarch64_insn val = 1;
1216 aarch64_field field = {0, 0};
1217 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1218 gen_sub_field (FLD_size, 0, 1, &field);
1219 insert_field_2 (&field, &inst->value, val, 0);
1220 }
1221
1222 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1223 static void
1224 encode_fcvt (aarch64_inst *inst)
1225 {
1226 aarch64_insn val;
1227 const aarch64_field field = {15, 2};
1228
1229 /* opc dstsize */
1230 switch (inst->operands[0].qualifier)
1231 {
1232 case AARCH64_OPND_QLF_S_S: val = 0; break;
1233 case AARCH64_OPND_QLF_S_D: val = 1; break;
1234 case AARCH64_OPND_QLF_S_H: val = 3; break;
1235 default: abort ();
1236 }
1237 insert_field_2 (&field, &inst->value, val, 0);
1238
1239 return;
1240 }
1241
1242 /* Return the index in qualifiers_list that INST is using. Should only
1243 be called once the qualifiers are known to be valid. */
1244
1245 static int
1246 aarch64_get_variant (struct aarch64_inst *inst)
1247 {
1248 int i, nops, variant;
1249
1250 nops = aarch64_num_of_operands (inst->opcode);
1251 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1252 {
1253 for (i = 0; i < nops; ++i)
1254 if (inst->opcode->qualifiers_list[variant][i]
1255 != inst->operands[i].qualifier)
1256 break;
1257 if (i == nops)
1258 return variant;
1259 }
1260 abort ();
1261 }
1262
1263 /* Do miscellaneous encodings that are not common enough to be driven by
1264 flags. */
1265
1266 static void
1267 do_misc_encoding (aarch64_inst *inst)
1268 {
1269 unsigned int value;
1270
1271 switch (inst->opcode->op)
1272 {
1273 case OP_FCVT:
1274 encode_fcvt (inst);
1275 break;
1276 case OP_FCVTN:
1277 case OP_FCVTN2:
1278 case OP_FCVTL:
1279 case OP_FCVTL2:
1280 encode_asimd_fcvt (inst);
1281 break;
1282 case OP_FCVTXN_S:
1283 encode_asisd_fcvtxn (inst);
1284 break;
1285 case OP_MOV_P_P:
1286 case OP_MOVS_P_P:
1287 /* Copy Pn to Pm and Pg. */
1288 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1289 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1290 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1291 break;
1292 case OP_MOV_Z_P_Z:
1293 /* Copy Zd to Zm. */
1294 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1295 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1296 break;
1297 case OP_MOV_Z_V:
1298 /* Fill in the zero immediate. */
1299 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1300 2, FLD_imm5, FLD_SVE_tszh);
1301 break;
1302 case OP_MOV_Z_Z:
1303 /* Copy Zn to Zm. */
1304 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1305 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1306 break;
1307 case OP_MOV_Z_Zi:
1308 break;
1309 case OP_MOVM_P_P_P:
1310 /* Copy Pd to Pm. */
1311 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1312 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1313 break;
1314 case OP_MOVZS_P_P_P:
1315 case OP_MOVZ_P_P_P:
1316 /* Copy Pn to Pm. */
1317 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1318 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1319 break;
1320 case OP_NOTS_P_P_P_Z:
1321 case OP_NOT_P_P_P_Z:
1322 /* Copy Pg to Pm. */
1323 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1324 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1325 break;
1326 default: break;
1327 }
1328 }
1329
1330 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1331 static void
1332 encode_sizeq (aarch64_inst *inst)
1333 {
1334 aarch64_insn sizeq;
1335 enum aarch64_field_kind kind;
1336 int idx;
1337
1338 /* Get the index of the operand whose information we are going to use
1339 to encode the size and Q fields.
1340 This is deduced from the possible valid qualifier lists. */
1341 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1342 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1343 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1344 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1345 /* Q */
1346 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1347 /* size */
1348 if (inst->opcode->iclass == asisdlse
1349 || inst->opcode->iclass == asisdlsep
1350 || inst->opcode->iclass == asisdlso
1351 || inst->opcode->iclass == asisdlsop)
1352 kind = FLD_vldst_size;
1353 else
1354 kind = FLD_size;
1355 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1356 }
1357
1358 /* Opcodes that have fields shared by multiple operands are usually flagged
1359 with flags. In this function, we detect such flags and use the
1360 information in one of the related operands to do the encoding. The 'one'
1361 operand is not any operand but one of the operands that has the enough
1362 information for such an encoding. */
1363
1364 static void
1365 do_special_encoding (struct aarch64_inst *inst)
1366 {
1367 int idx;
1368 aarch64_insn value = 0;
1369
1370 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1371
1372 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1373 if (inst->opcode->flags & F_COND)
1374 {
1375 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1376 }
1377 if (inst->opcode->flags & F_SF)
1378 {
1379 idx = select_operand_for_sf_field_coding (inst->opcode);
1380 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1381 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1382 ? 1 : 0;
1383 insert_field (FLD_sf, &inst->value, value, 0);
1384 if (inst->opcode->flags & F_N)
1385 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1386 }
1387 if (inst->opcode->flags & F_LSE_SZ)
1388 {
1389 idx = select_operand_for_sf_field_coding (inst->opcode);
1390 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1391 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1392 ? 1 : 0;
1393 insert_field (FLD_lse_sz, &inst->value, value, 0);
1394 }
1395 if (inst->opcode->flags & F_SIZEQ)
1396 encode_sizeq (inst);
1397 if (inst->opcode->flags & F_FPTYPE)
1398 {
1399 idx = select_operand_for_fptype_field_coding (inst->opcode);
1400 switch (inst->operands[idx].qualifier)
1401 {
1402 case AARCH64_OPND_QLF_S_S: value = 0; break;
1403 case AARCH64_OPND_QLF_S_D: value = 1; break;
1404 case AARCH64_OPND_QLF_S_H: value = 3; break;
1405 default: assert (0);
1406 }
1407 insert_field (FLD_type, &inst->value, value, 0);
1408 }
1409 if (inst->opcode->flags & F_SSIZE)
1410 {
1411 enum aarch64_opnd_qualifier qualifier;
1412 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1413 qualifier = inst->operands[idx].qualifier;
1414 assert (qualifier >= AARCH64_OPND_QLF_S_B
1415 && qualifier <= AARCH64_OPND_QLF_S_Q);
1416 value = aarch64_get_qualifier_standard_value (qualifier);
1417 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1418 }
1419 if (inst->opcode->flags & F_T)
1420 {
1421 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1422 aarch64_field field = {0, 0};
1423 enum aarch64_opnd_qualifier qualifier;
1424
1425 idx = 0;
1426 qualifier = inst->operands[idx].qualifier;
1427 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1428 == AARCH64_OPND_CLASS_SIMD_REG
1429 && qualifier >= AARCH64_OPND_QLF_V_8B
1430 && qualifier <= AARCH64_OPND_QLF_V_2D);
1431 /* imm5<3:0> q <t>
1432 0000 x reserved
1433 xxx1 0 8b
1434 xxx1 1 16b
1435 xx10 0 4h
1436 xx10 1 8h
1437 x100 0 2s
1438 x100 1 4s
1439 1000 0 reserved
1440 1000 1 2d */
1441 value = aarch64_get_qualifier_standard_value (qualifier);
1442 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1443 num = (int) value >> 1;
1444 assert (num >= 0 && num <= 3);
1445 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1446 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1447 }
1448 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1449 {
1450 /* Use Rt to encode in the case of e.g.
1451 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1452 enum aarch64_opnd_qualifier qualifier;
1453 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1454 if (idx == -1)
1455 /* Otherwise use the result operand, which has to be a integer
1456 register. */
1457 idx = 0;
1458 assert (idx == 0 || idx == 1);
1459 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1460 == AARCH64_OPND_CLASS_INT_REG);
1461 qualifier = inst->operands[idx].qualifier;
1462 insert_field (FLD_Q, &inst->value,
1463 aarch64_get_qualifier_standard_value (qualifier), 0);
1464 }
1465 if (inst->opcode->flags & F_LDS_SIZE)
1466 {
1467 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1468 enum aarch64_opnd_qualifier qualifier;
1469 aarch64_field field = {0, 0};
1470 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1471 == AARCH64_OPND_CLASS_INT_REG);
1472 gen_sub_field (FLD_opc, 0, 1, &field);
1473 qualifier = inst->operands[0].qualifier;
1474 insert_field_2 (&field, &inst->value,
1475 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1476 }
1477 /* Miscellaneous encoding as the last step. */
1478 if (inst->opcode->flags & F_MISC)
1479 do_misc_encoding (inst);
1480
1481 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1482 }
1483
1484 /* Some instructions (including all SVE ones) use the instruction class
1485 to describe how a qualifiers_list index is represented in the instruction
1486 encoding. If INST is such an instruction, encode the chosen qualifier
1487 variant. */
1488
1489 static void
1490 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1491 {
1492 switch (inst->opcode->iclass)
1493 {
1494 case sve_cpy:
1495 insert_fields (&inst->value, aarch64_get_variant (inst),
1496 0, 2, FLD_SVE_M_14, FLD_size);
1497 break;
1498
1499 case sve_index:
1500 case sve_shift_pred:
1501 case sve_shift_unpred:
1502 /* For indices and shift amounts, the variant is encoded as
1503 part of the immediate. */
1504 break;
1505
1506 case sve_limm:
1507 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1508 and depend on the immediate. They don't have a separate
1509 encoding. */
1510 break;
1511
1512 case sve_misc:
1513 /* sve_misc instructions have only a single variant. */
1514 break;
1515
1516 case sve_movprfx:
1517 insert_fields (&inst->value, aarch64_get_variant (inst),
1518 0, 2, FLD_SVE_M_16, FLD_size);
1519 break;
1520
1521 case sve_pred_zm:
1522 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1523 break;
1524
1525 case sve_size_bhs:
1526 case sve_size_bhsd:
1527 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1528 break;
1529
1530 case sve_size_hsd:
1531 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1532 break;
1533
1534 case sve_size_sd:
1535 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1536 break;
1537
1538 default:
1539 break;
1540 }
1541 }
1542
1543 /* Converters converting an alias opcode instruction to its real form. */
1544
1545 /* ROR <Wd>, <Ws>, #<shift>
1546 is equivalent to:
1547 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1548 static void
1549 convert_ror_to_extr (aarch64_inst *inst)
1550 {
1551 copy_operand_info (inst, 3, 2);
1552 copy_operand_info (inst, 2, 1);
1553 }
1554
1555 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1556 is equivalent to:
1557 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1558 static void
1559 convert_xtl_to_shll (aarch64_inst *inst)
1560 {
1561 inst->operands[2].qualifier = inst->operands[1].qualifier;
1562 inst->operands[2].imm.value = 0;
1563 }
1564
1565 /* Convert
1566 LSR <Xd>, <Xn>, #<shift>
1567 to
1568 UBFM <Xd>, <Xn>, #<shift>, #63. */
1569 static void
1570 convert_sr_to_bfm (aarch64_inst *inst)
1571 {
1572 inst->operands[3].imm.value =
1573 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1574 }
1575
1576 /* Convert MOV to ORR. */
1577 static void
1578 convert_mov_to_orr (aarch64_inst *inst)
1579 {
1580 /* MOV <Vd>.<T>, <Vn>.<T>
1581 is equivalent to:
1582 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1583 copy_operand_info (inst, 2, 1);
1584 }
1585
1586 /* When <imms> >= <immr>, the instruction written:
1587 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1588 is equivalent to:
1589 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1590
1591 static void
1592 convert_bfx_to_bfm (aarch64_inst *inst)
1593 {
1594 int64_t lsb, width;
1595
1596 /* Convert the operand. */
1597 lsb = inst->operands[2].imm.value;
1598 width = inst->operands[3].imm.value;
1599 inst->operands[2].imm.value = lsb;
1600 inst->operands[3].imm.value = lsb + width - 1;
1601 }
1602
1603 /* When <imms> < <immr>, the instruction written:
1604 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1605 is equivalent to:
1606 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1607
1608 static void
1609 convert_bfi_to_bfm (aarch64_inst *inst)
1610 {
1611 int64_t lsb, width;
1612
1613 /* Convert the operand. */
1614 lsb = inst->operands[2].imm.value;
1615 width = inst->operands[3].imm.value;
1616 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1617 {
1618 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1619 inst->operands[3].imm.value = width - 1;
1620 }
1621 else
1622 {
1623 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1624 inst->operands[3].imm.value = width - 1;
1625 }
1626 }
1627
1628 /* The instruction written:
1629 BFC <Xd>, #<lsb>, #<width>
1630 is equivalent to:
1631 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1632
1633 static void
1634 convert_bfc_to_bfm (aarch64_inst *inst)
1635 {
1636 int64_t lsb, width;
1637
1638 /* Insert XZR. */
1639 copy_operand_info (inst, 3, 2);
1640 copy_operand_info (inst, 2, 1);
1641 copy_operand_info (inst, 1, 0);
1642 inst->operands[1].reg.regno = 0x1f;
1643
1644 /* Convert the immediate operand. */
1645 lsb = inst->operands[2].imm.value;
1646 width = inst->operands[3].imm.value;
1647 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1648 {
1649 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1650 inst->operands[3].imm.value = width - 1;
1651 }
1652 else
1653 {
1654 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1655 inst->operands[3].imm.value = width - 1;
1656 }
1657 }
1658
1659 /* The instruction written:
1660 LSL <Xd>, <Xn>, #<shift>
1661 is equivalent to:
1662 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1663
1664 static void
1665 convert_lsl_to_ubfm (aarch64_inst *inst)
1666 {
1667 int64_t shift = inst->operands[2].imm.value;
1668
1669 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1670 {
1671 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1672 inst->operands[3].imm.value = 31 - shift;
1673 }
1674 else
1675 {
1676 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1677 inst->operands[3].imm.value = 63 - shift;
1678 }
1679 }
1680
1681 /* CINC <Wd>, <Wn>, <cond>
1682 is equivalent to:
1683 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1684
1685 static void
1686 convert_to_csel (aarch64_inst *inst)
1687 {
1688 copy_operand_info (inst, 3, 2);
1689 copy_operand_info (inst, 2, 1);
1690 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1691 }
1692
1693 /* CSET <Wd>, <cond>
1694 is equivalent to:
1695 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1696
1697 static void
1698 convert_cset_to_csinc (aarch64_inst *inst)
1699 {
1700 copy_operand_info (inst, 3, 1);
1701 copy_operand_info (inst, 2, 0);
1702 copy_operand_info (inst, 1, 0);
1703 inst->operands[1].reg.regno = 0x1f;
1704 inst->operands[2].reg.regno = 0x1f;
1705 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1706 }
1707
1708 /* MOV <Wd>, #<imm>
1709 is equivalent to:
1710 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1711
1712 static void
1713 convert_mov_to_movewide (aarch64_inst *inst)
1714 {
1715 int is32;
1716 uint32_t shift_amount;
1717 uint64_t value;
1718
1719 switch (inst->opcode->op)
1720 {
1721 case OP_MOV_IMM_WIDE:
1722 value = inst->operands[1].imm.value;
1723 break;
1724 case OP_MOV_IMM_WIDEN:
1725 value = ~inst->operands[1].imm.value;
1726 break;
1727 default:
1728 assert (0);
1729 }
1730 inst->operands[1].type = AARCH64_OPND_HALF;
1731 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1732 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1733 /* The constraint check should have guaranteed this wouldn't happen. */
1734 assert (0);
1735 value >>= shift_amount;
1736 value &= 0xffff;
1737 inst->operands[1].imm.value = value;
1738 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1739 inst->operands[1].shifter.amount = shift_amount;
1740 }
1741
1742 /* MOV <Wd>, #<imm>
1743 is equivalent to:
1744 ORR <Wd>, WZR, #<imm>. */
1745
1746 static void
1747 convert_mov_to_movebitmask (aarch64_inst *inst)
1748 {
1749 copy_operand_info (inst, 2, 1);
1750 inst->operands[1].reg.regno = 0x1f;
1751 inst->operands[1].skip = 0;
1752 }
1753
1754 /* Some alias opcodes are assembled by being converted to their real-form. */
1755
1756 static void
1757 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1758 {
1759 const aarch64_opcode *alias = inst->opcode;
1760
1761 if ((alias->flags & F_CONV) == 0)
1762 goto convert_to_real_return;
1763
1764 switch (alias->op)
1765 {
1766 case OP_ASR_IMM:
1767 case OP_LSR_IMM:
1768 convert_sr_to_bfm (inst);
1769 break;
1770 case OP_LSL_IMM:
1771 convert_lsl_to_ubfm (inst);
1772 break;
1773 case OP_CINC:
1774 case OP_CINV:
1775 case OP_CNEG:
1776 convert_to_csel (inst);
1777 break;
1778 case OP_CSET:
1779 case OP_CSETM:
1780 convert_cset_to_csinc (inst);
1781 break;
1782 case OP_UBFX:
1783 case OP_BFXIL:
1784 case OP_SBFX:
1785 convert_bfx_to_bfm (inst);
1786 break;
1787 case OP_SBFIZ:
1788 case OP_BFI:
1789 case OP_UBFIZ:
1790 convert_bfi_to_bfm (inst);
1791 break;
1792 case OP_BFC:
1793 convert_bfc_to_bfm (inst);
1794 break;
1795 case OP_MOV_V:
1796 convert_mov_to_orr (inst);
1797 break;
1798 case OP_MOV_IMM_WIDE:
1799 case OP_MOV_IMM_WIDEN:
1800 convert_mov_to_movewide (inst);
1801 break;
1802 case OP_MOV_IMM_LOG:
1803 convert_mov_to_movebitmask (inst);
1804 break;
1805 case OP_ROR_IMM:
1806 convert_ror_to_extr (inst);
1807 break;
1808 case OP_SXTL:
1809 case OP_SXTL2:
1810 case OP_UXTL:
1811 case OP_UXTL2:
1812 convert_xtl_to_shll (inst);
1813 break;
1814 default:
1815 break;
1816 }
1817
1818 convert_to_real_return:
1819 aarch64_replace_opcode (inst, real);
1820 }
1821
1822 /* Encode *INST_ORI of the opcode code OPCODE.
1823 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1824 matched operand qualifier sequence in *QLF_SEQ. */
1825
1826 int
1827 aarch64_opcode_encode (const aarch64_opcode *opcode,
1828 const aarch64_inst *inst_ori, aarch64_insn *code,
1829 aarch64_opnd_qualifier_t *qlf_seq,
1830 aarch64_operand_error *mismatch_detail)
1831 {
1832 int i;
1833 const aarch64_opcode *aliased;
1834 aarch64_inst copy, *inst;
1835
1836 DEBUG_TRACE ("enter with %s", opcode->name);
1837
1838 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1839 copy = *inst_ori;
1840 inst = &copy;
1841
1842 assert (inst->opcode == NULL || inst->opcode == opcode);
1843 if (inst->opcode == NULL)
1844 inst->opcode = opcode;
1845
1846 /* Constrain the operands.
1847 After passing this, the encoding is guaranteed to succeed. */
1848 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1849 {
1850 DEBUG_TRACE ("FAIL since operand constraint not met");
1851 return 0;
1852 }
1853
1854 /* Get the base value.
1855 Note: this has to be before the aliasing handling below in order to
1856 get the base value from the alias opcode before we move on to the
1857 aliased opcode for encoding. */
1858 inst->value = opcode->opcode;
1859
1860 /* No need to do anything else if the opcode does not have any operand. */
1861 if (aarch64_num_of_operands (opcode) == 0)
1862 goto encoding_exit;
1863
1864 /* Assign operand indexes and check types. Also put the matched
1865 operand qualifiers in *QLF_SEQ to return. */
1866 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1867 {
1868 assert (opcode->operands[i] == inst->operands[i].type);
1869 inst->operands[i].idx = i;
1870 if (qlf_seq != NULL)
1871 *qlf_seq = inst->operands[i].qualifier;
1872 }
1873
1874 aliased = aarch64_find_real_opcode (opcode);
1875 /* If the opcode is an alias and it does not ask for direct encoding by
1876 itself, the instruction will be transformed to the form of real opcode
1877 and the encoding will be carried out using the rules for the aliased
1878 opcode. */
1879 if (aliased != NULL && (opcode->flags & F_CONV))
1880 {
1881 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1882 aliased->name, opcode->name);
1883 /* Convert the operands to the form of the real opcode. */
1884 convert_to_real (inst, aliased);
1885 opcode = aliased;
1886 }
1887
1888 aarch64_opnd_info *info = inst->operands;
1889
1890 /* Call the inserter of each operand. */
1891 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1892 {
1893 const aarch64_operand *opnd;
1894 enum aarch64_opnd type = opcode->operands[i];
1895 if (type == AARCH64_OPND_NIL)
1896 break;
1897 if (info->skip)
1898 {
1899 DEBUG_TRACE ("skip the incomplete operand %d", i);
1900 continue;
1901 }
1902 opnd = &aarch64_operands[type];
1903 if (operand_has_inserter (opnd))
1904 aarch64_insert_operand (opnd, info, &inst->value, inst);
1905 }
1906
1907 /* Call opcode encoders indicated by flags. */
1908 if (opcode_has_special_coder (opcode))
1909 do_special_encoding (inst);
1910
1911 /* Possibly use the instruction class to encode the chosen qualifier
1912 variant. */
1913 aarch64_encode_variant_using_iclass (inst);
1914
1915 encoding_exit:
1916 DEBUG_TRACE ("exit with %s", opcode->name);
1917
1918 *code = inst->value;
1919
1920 return 1;
1921 }