]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - opcodes/aarch64-asm.c
[AArch64][SVE 25/32] Add support for SVE addressing modes
[thirdparty/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25
26 /* Utilities. */
27
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
37
38 static inline void
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40 {
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57 }
58
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62 static void
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65 {
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76 }
77
78 /* Operand inserters. */
79
80 /* Insert register number. */
81 const char *
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85 {
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88 }
89
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93 const char *
94 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96 {
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147 }
148
149 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150 const char *
151 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154 {
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160 }
161
162 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164 const char *
165 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168 {
169 aarch64_insn value = 0;
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203 }
204
205 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207 const char *
208 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211 {
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228 }
229
230 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232 const char *
233 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236 {
237 aarch64_field field = {0, 0};
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276 }
277
278 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281 const char *
282 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285 {
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331 }
332
333 /* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335 const char *
336 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339 {
340 int64_t imm;
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
345 insert_all_fields (self, code, imm);
346 return NULL;
347 }
348
349 /* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351 const char *
352 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
353 aarch64_insn *code, const aarch64_inst *inst)
354 {
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360 }
361
362 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364 const char *
365 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369 {
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
418 }
419
420 /* Insert fields for an 8-bit floating-point immediate. */
421 const char *
422 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
423 aarch64_insn *code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
425 {
426 insert_all_fields (self, code, info->imm.value);
427 return NULL;
428 }
429
430 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
432 const char *
433 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
436 {
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
438 return NULL;
439 }
440
441 /* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
443 const char *
444 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
446 {
447 /* shift */
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
452 return NULL;
453 }
454
455 /* Insert logical/bitmask immediate for e.g. the last operand in
456 ORR <Wd|WSP>, <Wn>, #<imm>. */
457 const char *
458 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
459 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
460 {
461 aarch64_insn value;
462 uint64_t imm = info->imm.value;
463 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
464
465 if (inst->opcode->op == OP_BIC)
466 imm = ~imm;
467 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
468 /* The constraint check should have guaranteed this wouldn't happen. */
469 assert (0);
470
471 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
472 self->fields[0]);
473 return NULL;
474 }
475
476 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
477 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
478 const char *
479 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
480 aarch64_insn *code, const aarch64_inst *inst)
481 {
482 aarch64_insn value = 0;
483
484 assert (info->idx == 0);
485
486 /* Rt */
487 aarch64_ins_regno (self, info, code, inst);
488 if (inst->opcode->iclass == ldstpair_indexed
489 || inst->opcode->iclass == ldstnapair_offs
490 || inst->opcode->iclass == ldstpair_off
491 || inst->opcode->iclass == loadlit)
492 {
493 /* size */
494 switch (info->qualifier)
495 {
496 case AARCH64_OPND_QLF_S_S: value = 0; break;
497 case AARCH64_OPND_QLF_S_D: value = 1; break;
498 case AARCH64_OPND_QLF_S_Q: value = 2; break;
499 default: assert (0);
500 }
501 insert_field (FLD_ldst_size, code, value, 0);
502 }
503 else
504 {
505 /* opc[1]:size */
506 value = aarch64_get_qualifier_standard_value (info->qualifier);
507 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
508 }
509
510 return NULL;
511 }
512
513 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
514 const char *
515 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
516 const aarch64_opnd_info *info, aarch64_insn *code,
517 const aarch64_inst *inst ATTRIBUTE_UNUSED)
518 {
519 /* Rn */
520 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
521 return NULL;
522 }
523
524 /* Encode the address operand for e.g.
525 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
526 const char *
527 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
528 const aarch64_opnd_info *info, aarch64_insn *code,
529 const aarch64_inst *inst ATTRIBUTE_UNUSED)
530 {
531 aarch64_insn S;
532 enum aarch64_modifier_kind kind = info->shifter.kind;
533
534 /* Rn */
535 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
536 /* Rm */
537 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
538 /* option */
539 if (kind == AARCH64_MOD_LSL)
540 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
541 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
542 /* S */
543 if (info->qualifier != AARCH64_OPND_QLF_S_B)
544 S = info->shifter.amount != 0;
545 else
546 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
547 S <amount>
548 0 [absent]
549 1 #0
550 Must be #0 if <extend> is explicitly LSL. */
551 S = info->shifter.operator_present && info->shifter.amount_present;
552 insert_field (FLD_S, code, S, 0);
553
554 return NULL;
555 }
556
557 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
558 const char *
559 aarch64_ins_addr_simm (const aarch64_operand *self,
560 const aarch64_opnd_info *info,
561 aarch64_insn *code,
562 const aarch64_inst *inst ATTRIBUTE_UNUSED)
563 {
564 int imm;
565
566 /* Rn */
567 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
568 /* simm (imm9 or imm7) */
569 imm = info->addr.offset.imm;
570 if (self->fields[0] == FLD_imm7)
571 /* scaled immediate in ld/st pair instructions.. */
572 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
573 insert_field (self->fields[0], code, imm, 0);
574 /* pre/post- index */
575 if (info->addr.writeback)
576 {
577 assert (inst->opcode->iclass != ldst_unscaled
578 && inst->opcode->iclass != ldstnapair_offs
579 && inst->opcode->iclass != ldstpair_off
580 && inst->opcode->iclass != ldst_unpriv);
581 assert (info->addr.preind != info->addr.postind);
582 if (info->addr.preind)
583 insert_field (self->fields[1], code, 1, 0);
584 }
585
586 return NULL;
587 }
588
589 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
590 const char *
591 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
592 const aarch64_opnd_info *info,
593 aarch64_insn *code,
594 const aarch64_inst *inst ATTRIBUTE_UNUSED)
595 {
596 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
597
598 /* Rn */
599 insert_field (self->fields[0], code, info->addr.base_regno, 0);
600 /* uimm12 */
601 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
602 return NULL;
603 }
604
605 /* Encode the address operand for e.g.
606 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
607 const char *
608 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
611 {
612 /* Rn */
613 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
614 /* Rm | #<amount> */
615 if (info->addr.offset.is_reg)
616 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
617 else
618 insert_field (FLD_Rm, code, 0x1f, 0);
619 return NULL;
620 }
621
622 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
623 const char *
624 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627 {
628 /* cond */
629 insert_field (FLD_cond, code, info->cond->value, 0);
630 return NULL;
631 }
632
633 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
634 const char *
635 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
638 {
639 /* op0:op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
642 return NULL;
643 }
644
645 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
646 const char *
647 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
648 const aarch64_opnd_info *info, aarch64_insn *code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
650 {
651 /* op1:op2 */
652 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
653 FLD_op2, FLD_op1);
654 return NULL;
655 }
656
657 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
658 const char *
659 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
660 const aarch64_opnd_info *info, aarch64_insn *code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED)
662 {
663 /* op1:CRn:CRm:op2 */
664 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
665 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
666 return NULL;
667 }
668
669 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
670
671 const char *
672 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
673 const aarch64_opnd_info *info, aarch64_insn *code,
674 const aarch64_inst *inst ATTRIBUTE_UNUSED)
675 {
676 /* CRm */
677 insert_field (FLD_CRm, code, info->barrier->value, 0);
678 return NULL;
679 }
680
681 /* Encode the prefetch operation option operand for e.g.
682 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
683
684 const char *
685 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
686 const aarch64_opnd_info *info, aarch64_insn *code,
687 const aarch64_inst *inst ATTRIBUTE_UNUSED)
688 {
689 /* prfop in Rt */
690 insert_field (FLD_Rt, code, info->prfop->value, 0);
691 return NULL;
692 }
693
694 /* Encode the hint number for instructions that alias HINT but take an
695 operand. */
696
697 const char *
698 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 const aarch64_opnd_info *info, aarch64_insn *code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701 {
702 /* CRm:op2. */
703 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
704 return NULL;
705 }
706
707 /* Encode the extended register operand for e.g.
708 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
709 const char *
710 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
711 const aarch64_opnd_info *info, aarch64_insn *code,
712 const aarch64_inst *inst ATTRIBUTE_UNUSED)
713 {
714 enum aarch64_modifier_kind kind;
715
716 /* Rm */
717 insert_field (FLD_Rm, code, info->reg.regno, 0);
718 /* option */
719 kind = info->shifter.kind;
720 if (kind == AARCH64_MOD_LSL)
721 kind = info->qualifier == AARCH64_OPND_QLF_W
722 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
723 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
724 /* imm3 */
725 insert_field (FLD_imm3, code, info->shifter.amount, 0);
726
727 return NULL;
728 }
729
730 /* Encode the shifted register operand for e.g.
731 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
732 const char *
733 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
734 const aarch64_opnd_info *info, aarch64_insn *code,
735 const aarch64_inst *inst ATTRIBUTE_UNUSED)
736 {
737 /* Rm */
738 insert_field (FLD_Rm, code, info->reg.regno, 0);
739 /* shift */
740 insert_field (FLD_shift, code,
741 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
742 /* imm6 */
743 insert_field (FLD_imm6, code, info->shifter.amount, 0);
744
745 return NULL;
746 }
747
748 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
749 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
750 value. fields[0] specifies the base register field. */
751 const char *
752 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
753 const aarch64_opnd_info *info, aarch64_insn *code,
754 const aarch64_inst *inst ATTRIBUTE_UNUSED)
755 {
756 int factor = 1 << get_operand_specific_data (self);
757 insert_field (self->fields[0], code, info->addr.base_regno, 0);
758 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
759 return NULL;
760 }
761
762 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
763 is SELF's operand-dependent value. fields[0] specifies the base
764 register field and fields[1] specifies the offset register field. */
765 const char *
766 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
767 const aarch64_opnd_info *info, aarch64_insn *code,
768 const aarch64_inst *inst ATTRIBUTE_UNUSED)
769 {
770 insert_field (self->fields[0], code, info->addr.base_regno, 0);
771 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
772 return NULL;
773 }
774
775 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
776 <shift> is SELF's operand-dependent value. fields[0] specifies the
777 base register field, fields[1] specifies the offset register field and
778 fields[2] is a single-bit field that selects SXTW over UXTW. */
779 const char *
780 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
781 const aarch64_opnd_info *info, aarch64_insn *code,
782 const aarch64_inst *inst ATTRIBUTE_UNUSED)
783 {
784 insert_field (self->fields[0], code, info->addr.base_regno, 0);
785 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
786 if (info->shifter.kind == AARCH64_MOD_UXTW)
787 insert_field (self->fields[2], code, 0, 0);
788 else
789 insert_field (self->fields[2], code, 1, 0);
790 return NULL;
791 }
792
793 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
794 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
795 fields[0] specifies the base register field. */
796 const char *
797 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
798 const aarch64_opnd_info *info, aarch64_insn *code,
799 const aarch64_inst *inst ATTRIBUTE_UNUSED)
800 {
801 int factor = 1 << get_operand_specific_data (self);
802 insert_field (self->fields[0], code, info->addr.base_regno, 0);
803 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
804 return NULL;
805 }
806
807 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
808 where <modifier> is fixed by the instruction and where <msz> is a
809 2-bit unsigned number. fields[0] specifies the base register field
810 and fields[1] specifies the offset register field. */
811 static const char *
812 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
813 const aarch64_opnd_info *info, aarch64_insn *code)
814 {
815 insert_field (self->fields[0], code, info->addr.base_regno, 0);
816 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
817 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
818 return NULL;
819 }
820
821 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
822 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
823 field and fields[1] specifies the offset register field. */
824 const char *
825 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
826 const aarch64_opnd_info *info, aarch64_insn *code,
827 const aarch64_inst *inst ATTRIBUTE_UNUSED)
828 {
829 return aarch64_ext_sve_addr_zz (self, info, code);
830 }
831
832 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
833 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
834 field and fields[1] specifies the offset register field. */
835 const char *
836 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
837 const aarch64_opnd_info *info,
838 aarch64_insn *code,
839 const aarch64_inst *inst ATTRIBUTE_UNUSED)
840 {
841 return aarch64_ext_sve_addr_zz (self, info, code);
842 }
843
844 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
845 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
846 field and fields[1] specifies the offset register field. */
847 const char *
848 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
849 const aarch64_opnd_info *info,
850 aarch64_insn *code,
851 const aarch64_inst *inst ATTRIBUTE_UNUSED)
852 {
853 return aarch64_ext_sve_addr_zz (self, info, code);
854 }
855
856 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
857 array specifies which field to use for Zn. MM is encoded in the
858 concatenation of imm5 and SVE_tszh, with imm5 being the less
859 significant part. */
860 const char *
861 aarch64_ins_sve_index (const aarch64_operand *self,
862 const aarch64_opnd_info *info, aarch64_insn *code,
863 const aarch64_inst *inst ATTRIBUTE_UNUSED)
864 {
865 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
866 insert_field (self->fields[0], code, info->reglane.regno, 0);
867 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
868 2, FLD_imm5, FLD_SVE_tszh);
869 return NULL;
870 }
871
872 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
873 to use for Zn. */
874 const char *
875 aarch64_ins_sve_reglist (const aarch64_operand *self,
876 const aarch64_opnd_info *info, aarch64_insn *code,
877 const aarch64_inst *inst ATTRIBUTE_UNUSED)
878 {
879 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
880 return NULL;
881 }
882
883 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
884 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
885 field. */
886 const char *
887 aarch64_ins_sve_scale (const aarch64_operand *self,
888 const aarch64_opnd_info *info, aarch64_insn *code,
889 const aarch64_inst *inst ATTRIBUTE_UNUSED)
890 {
891 insert_all_fields (self, code, info->imm.value);
892 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
893 return NULL;
894 }
895
896 /* Miscellaneous encoding functions. */
897
898 /* Encode size[0], i.e. bit 22, for
899 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
900
901 static void
902 encode_asimd_fcvt (aarch64_inst *inst)
903 {
904 aarch64_insn value;
905 aarch64_field field = {0, 0};
906 enum aarch64_opnd_qualifier qualifier;
907
908 switch (inst->opcode->op)
909 {
910 case OP_FCVTN:
911 case OP_FCVTN2:
912 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
913 qualifier = inst->operands[1].qualifier;
914 break;
915 case OP_FCVTL:
916 case OP_FCVTL2:
917 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
918 qualifier = inst->operands[0].qualifier;
919 break;
920 default:
921 assert (0);
922 }
923 assert (qualifier == AARCH64_OPND_QLF_V_4S
924 || qualifier == AARCH64_OPND_QLF_V_2D);
925 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
926 gen_sub_field (FLD_size, 0, 1, &field);
927 insert_field_2 (&field, &inst->value, value, 0);
928 }
929
930 /* Encode size[0], i.e. bit 22, for
931 e.g. FCVTXN <Vb><d>, <Va><n>. */
932
933 static void
934 encode_asisd_fcvtxn (aarch64_inst *inst)
935 {
936 aarch64_insn val = 1;
937 aarch64_field field = {0, 0};
938 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
939 gen_sub_field (FLD_size, 0, 1, &field);
940 insert_field_2 (&field, &inst->value, val, 0);
941 }
942
943 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
944 static void
945 encode_fcvt (aarch64_inst *inst)
946 {
947 aarch64_insn val;
948 const aarch64_field field = {15, 2};
949
950 /* opc dstsize */
951 switch (inst->operands[0].qualifier)
952 {
953 case AARCH64_OPND_QLF_S_S: val = 0; break;
954 case AARCH64_OPND_QLF_S_D: val = 1; break;
955 case AARCH64_OPND_QLF_S_H: val = 3; break;
956 default: abort ();
957 }
958 insert_field_2 (&field, &inst->value, val, 0);
959
960 return;
961 }
962
963 /* Do miscellaneous encodings that are not common enough to be driven by
964 flags. */
965
966 static void
967 do_misc_encoding (aarch64_inst *inst)
968 {
969 switch (inst->opcode->op)
970 {
971 case OP_FCVT:
972 encode_fcvt (inst);
973 break;
974 case OP_FCVTN:
975 case OP_FCVTN2:
976 case OP_FCVTL:
977 case OP_FCVTL2:
978 encode_asimd_fcvt (inst);
979 break;
980 case OP_FCVTXN_S:
981 encode_asisd_fcvtxn (inst);
982 break;
983 default: break;
984 }
985 }
986
987 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
988 static void
989 encode_sizeq (aarch64_inst *inst)
990 {
991 aarch64_insn sizeq;
992 enum aarch64_field_kind kind;
993 int idx;
994
995 /* Get the index of the operand whose information we are going to use
996 to encode the size and Q fields.
997 This is deduced from the possible valid qualifier lists. */
998 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
999 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1000 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1001 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1002 /* Q */
1003 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1004 /* size */
1005 if (inst->opcode->iclass == asisdlse
1006 || inst->opcode->iclass == asisdlsep
1007 || inst->opcode->iclass == asisdlso
1008 || inst->opcode->iclass == asisdlsop)
1009 kind = FLD_vldst_size;
1010 else
1011 kind = FLD_size;
1012 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1013 }
1014
1015 /* Opcodes that have fields shared by multiple operands are usually flagged
1016 with flags. In this function, we detect such flags and use the
1017 information in one of the related operands to do the encoding. The 'one'
1018 operand is not any operand but one of the operands that has the enough
1019 information for such an encoding. */
1020
1021 static void
1022 do_special_encoding (struct aarch64_inst *inst)
1023 {
1024 int idx;
1025 aarch64_insn value = 0;
1026
1027 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1028
1029 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1030 if (inst->opcode->flags & F_COND)
1031 {
1032 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1033 }
1034 if (inst->opcode->flags & F_SF)
1035 {
1036 idx = select_operand_for_sf_field_coding (inst->opcode);
1037 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1038 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1039 ? 1 : 0;
1040 insert_field (FLD_sf, &inst->value, value, 0);
1041 if (inst->opcode->flags & F_N)
1042 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1043 }
1044 if (inst->opcode->flags & F_LSE_SZ)
1045 {
1046 idx = select_operand_for_sf_field_coding (inst->opcode);
1047 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1048 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1049 ? 1 : 0;
1050 insert_field (FLD_lse_sz, &inst->value, value, 0);
1051 }
1052 if (inst->opcode->flags & F_SIZEQ)
1053 encode_sizeq (inst);
1054 if (inst->opcode->flags & F_FPTYPE)
1055 {
1056 idx = select_operand_for_fptype_field_coding (inst->opcode);
1057 switch (inst->operands[idx].qualifier)
1058 {
1059 case AARCH64_OPND_QLF_S_S: value = 0; break;
1060 case AARCH64_OPND_QLF_S_D: value = 1; break;
1061 case AARCH64_OPND_QLF_S_H: value = 3; break;
1062 default: assert (0);
1063 }
1064 insert_field (FLD_type, &inst->value, value, 0);
1065 }
1066 if (inst->opcode->flags & F_SSIZE)
1067 {
1068 enum aarch64_opnd_qualifier qualifier;
1069 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1070 qualifier = inst->operands[idx].qualifier;
1071 assert (qualifier >= AARCH64_OPND_QLF_S_B
1072 && qualifier <= AARCH64_OPND_QLF_S_Q);
1073 value = aarch64_get_qualifier_standard_value (qualifier);
1074 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1075 }
1076 if (inst->opcode->flags & F_T)
1077 {
1078 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1079 aarch64_field field = {0, 0};
1080 enum aarch64_opnd_qualifier qualifier;
1081
1082 idx = 0;
1083 qualifier = inst->operands[idx].qualifier;
1084 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1085 == AARCH64_OPND_CLASS_SIMD_REG
1086 && qualifier >= AARCH64_OPND_QLF_V_8B
1087 && qualifier <= AARCH64_OPND_QLF_V_2D);
1088 /* imm5<3:0> q <t>
1089 0000 x reserved
1090 xxx1 0 8b
1091 xxx1 1 16b
1092 xx10 0 4h
1093 xx10 1 8h
1094 x100 0 2s
1095 x100 1 4s
1096 1000 0 reserved
1097 1000 1 2d */
1098 value = aarch64_get_qualifier_standard_value (qualifier);
1099 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1100 num = (int) value >> 1;
1101 assert (num >= 0 && num <= 3);
1102 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1103 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1104 }
1105 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1106 {
1107 /* Use Rt to encode in the case of e.g.
1108 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1109 enum aarch64_opnd_qualifier qualifier;
1110 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1111 if (idx == -1)
1112 /* Otherwise use the result operand, which has to be a integer
1113 register. */
1114 idx = 0;
1115 assert (idx == 0 || idx == 1);
1116 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1117 == AARCH64_OPND_CLASS_INT_REG);
1118 qualifier = inst->operands[idx].qualifier;
1119 insert_field (FLD_Q, &inst->value,
1120 aarch64_get_qualifier_standard_value (qualifier), 0);
1121 }
1122 if (inst->opcode->flags & F_LDS_SIZE)
1123 {
1124 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1125 enum aarch64_opnd_qualifier qualifier;
1126 aarch64_field field = {0, 0};
1127 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1128 == AARCH64_OPND_CLASS_INT_REG);
1129 gen_sub_field (FLD_opc, 0, 1, &field);
1130 qualifier = inst->operands[0].qualifier;
1131 insert_field_2 (&field, &inst->value,
1132 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1133 }
1134 /* Miscellaneous encoding as the last step. */
1135 if (inst->opcode->flags & F_MISC)
1136 do_misc_encoding (inst);
1137
1138 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1139 }
1140
1141 /* Converters converting an alias opcode instruction to its real form. */
1142
1143 /* ROR <Wd>, <Ws>, #<shift>
1144 is equivalent to:
1145 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1146 static void
1147 convert_ror_to_extr (aarch64_inst *inst)
1148 {
1149 copy_operand_info (inst, 3, 2);
1150 copy_operand_info (inst, 2, 1);
1151 }
1152
1153 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1154 is equivalent to:
1155 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1156 static void
1157 convert_xtl_to_shll (aarch64_inst *inst)
1158 {
1159 inst->operands[2].qualifier = inst->operands[1].qualifier;
1160 inst->operands[2].imm.value = 0;
1161 }
1162
1163 /* Convert
1164 LSR <Xd>, <Xn>, #<shift>
1165 to
1166 UBFM <Xd>, <Xn>, #<shift>, #63. */
1167 static void
1168 convert_sr_to_bfm (aarch64_inst *inst)
1169 {
1170 inst->operands[3].imm.value =
1171 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1172 }
1173
1174 /* Convert MOV to ORR. */
1175 static void
1176 convert_mov_to_orr (aarch64_inst *inst)
1177 {
1178 /* MOV <Vd>.<T>, <Vn>.<T>
1179 is equivalent to:
1180 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1181 copy_operand_info (inst, 2, 1);
1182 }
1183
1184 /* When <imms> >= <immr>, the instruction written:
1185 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1186 is equivalent to:
1187 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1188
1189 static void
1190 convert_bfx_to_bfm (aarch64_inst *inst)
1191 {
1192 int64_t lsb, width;
1193
1194 /* Convert the operand. */
1195 lsb = inst->operands[2].imm.value;
1196 width = inst->operands[3].imm.value;
1197 inst->operands[2].imm.value = lsb;
1198 inst->operands[3].imm.value = lsb + width - 1;
1199 }
1200
1201 /* When <imms> < <immr>, the instruction written:
1202 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1203 is equivalent to:
1204 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1205
1206 static void
1207 convert_bfi_to_bfm (aarch64_inst *inst)
1208 {
1209 int64_t lsb, width;
1210
1211 /* Convert the operand. */
1212 lsb = inst->operands[2].imm.value;
1213 width = inst->operands[3].imm.value;
1214 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1215 {
1216 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1217 inst->operands[3].imm.value = width - 1;
1218 }
1219 else
1220 {
1221 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1222 inst->operands[3].imm.value = width - 1;
1223 }
1224 }
1225
1226 /* The instruction written:
1227 BFC <Xd>, #<lsb>, #<width>
1228 is equivalent to:
1229 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1230
1231 static void
1232 convert_bfc_to_bfm (aarch64_inst *inst)
1233 {
1234 int64_t lsb, width;
1235
1236 /* Insert XZR. */
1237 copy_operand_info (inst, 3, 2);
1238 copy_operand_info (inst, 2, 1);
1239 copy_operand_info (inst, 2, 0);
1240 inst->operands[1].reg.regno = 0x1f;
1241
1242 /* Convert the immedate operand. */
1243 lsb = inst->operands[2].imm.value;
1244 width = inst->operands[3].imm.value;
1245 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1246 {
1247 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1248 inst->operands[3].imm.value = width - 1;
1249 }
1250 else
1251 {
1252 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1253 inst->operands[3].imm.value = width - 1;
1254 }
1255 }
1256
1257 /* The instruction written:
1258 LSL <Xd>, <Xn>, #<shift>
1259 is equivalent to:
1260 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1261
1262 static void
1263 convert_lsl_to_ubfm (aarch64_inst *inst)
1264 {
1265 int64_t shift = inst->operands[2].imm.value;
1266
1267 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1268 {
1269 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1270 inst->operands[3].imm.value = 31 - shift;
1271 }
1272 else
1273 {
1274 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1275 inst->operands[3].imm.value = 63 - shift;
1276 }
1277 }
1278
1279 /* CINC <Wd>, <Wn>, <cond>
1280 is equivalent to:
1281 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1282
1283 static void
1284 convert_to_csel (aarch64_inst *inst)
1285 {
1286 copy_operand_info (inst, 3, 2);
1287 copy_operand_info (inst, 2, 1);
1288 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1289 }
1290
1291 /* CSET <Wd>, <cond>
1292 is equivalent to:
1293 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1294
1295 static void
1296 convert_cset_to_csinc (aarch64_inst *inst)
1297 {
1298 copy_operand_info (inst, 3, 1);
1299 copy_operand_info (inst, 2, 0);
1300 copy_operand_info (inst, 1, 0);
1301 inst->operands[1].reg.regno = 0x1f;
1302 inst->operands[2].reg.regno = 0x1f;
1303 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1304 }
1305
1306 /* MOV <Wd>, #<imm>
1307 is equivalent to:
1308 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1309
1310 static void
1311 convert_mov_to_movewide (aarch64_inst *inst)
1312 {
1313 int is32;
1314 uint32_t shift_amount;
1315 uint64_t value;
1316
1317 switch (inst->opcode->op)
1318 {
1319 case OP_MOV_IMM_WIDE:
1320 value = inst->operands[1].imm.value;
1321 break;
1322 case OP_MOV_IMM_WIDEN:
1323 value = ~inst->operands[1].imm.value;
1324 break;
1325 default:
1326 assert (0);
1327 }
1328 inst->operands[1].type = AARCH64_OPND_HALF;
1329 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1330 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1331 /* The constraint check should have guaranteed this wouldn't happen. */
1332 assert (0);
1333 value >>= shift_amount;
1334 value &= 0xffff;
1335 inst->operands[1].imm.value = value;
1336 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1337 inst->operands[1].shifter.amount = shift_amount;
1338 }
1339
1340 /* MOV <Wd>, #<imm>
1341 is equivalent to:
1342 ORR <Wd>, WZR, #<imm>. */
1343
1344 static void
1345 convert_mov_to_movebitmask (aarch64_inst *inst)
1346 {
1347 copy_operand_info (inst, 2, 1);
1348 inst->operands[1].reg.regno = 0x1f;
1349 inst->operands[1].skip = 0;
1350 }
1351
1352 /* Some alias opcodes are assembled by being converted to their real-form. */
1353
1354 static void
1355 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1356 {
1357 const aarch64_opcode *alias = inst->opcode;
1358
1359 if ((alias->flags & F_CONV) == 0)
1360 goto convert_to_real_return;
1361
1362 switch (alias->op)
1363 {
1364 case OP_ASR_IMM:
1365 case OP_LSR_IMM:
1366 convert_sr_to_bfm (inst);
1367 break;
1368 case OP_LSL_IMM:
1369 convert_lsl_to_ubfm (inst);
1370 break;
1371 case OP_CINC:
1372 case OP_CINV:
1373 case OP_CNEG:
1374 convert_to_csel (inst);
1375 break;
1376 case OP_CSET:
1377 case OP_CSETM:
1378 convert_cset_to_csinc (inst);
1379 break;
1380 case OP_UBFX:
1381 case OP_BFXIL:
1382 case OP_SBFX:
1383 convert_bfx_to_bfm (inst);
1384 break;
1385 case OP_SBFIZ:
1386 case OP_BFI:
1387 case OP_UBFIZ:
1388 convert_bfi_to_bfm (inst);
1389 break;
1390 case OP_BFC:
1391 convert_bfc_to_bfm (inst);
1392 break;
1393 case OP_MOV_V:
1394 convert_mov_to_orr (inst);
1395 break;
1396 case OP_MOV_IMM_WIDE:
1397 case OP_MOV_IMM_WIDEN:
1398 convert_mov_to_movewide (inst);
1399 break;
1400 case OP_MOV_IMM_LOG:
1401 convert_mov_to_movebitmask (inst);
1402 break;
1403 case OP_ROR_IMM:
1404 convert_ror_to_extr (inst);
1405 break;
1406 case OP_SXTL:
1407 case OP_SXTL2:
1408 case OP_UXTL:
1409 case OP_UXTL2:
1410 convert_xtl_to_shll (inst);
1411 break;
1412 default:
1413 break;
1414 }
1415
1416 convert_to_real_return:
1417 aarch64_replace_opcode (inst, real);
1418 }
1419
1420 /* Encode *INST_ORI of the opcode code OPCODE.
1421 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1422 matched operand qualifier sequence in *QLF_SEQ. */
1423
1424 int
1425 aarch64_opcode_encode (const aarch64_opcode *opcode,
1426 const aarch64_inst *inst_ori, aarch64_insn *code,
1427 aarch64_opnd_qualifier_t *qlf_seq,
1428 aarch64_operand_error *mismatch_detail)
1429 {
1430 int i;
1431 const aarch64_opcode *aliased;
1432 aarch64_inst copy, *inst;
1433
1434 DEBUG_TRACE ("enter with %s", opcode->name);
1435
1436 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1437 copy = *inst_ori;
1438 inst = &copy;
1439
1440 assert (inst->opcode == NULL || inst->opcode == opcode);
1441 if (inst->opcode == NULL)
1442 inst->opcode = opcode;
1443
1444 /* Constrain the operands.
1445 After passing this, the encoding is guaranteed to succeed. */
1446 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1447 {
1448 DEBUG_TRACE ("FAIL since operand constraint not met");
1449 return 0;
1450 }
1451
1452 /* Get the base value.
1453 Note: this has to be before the aliasing handling below in order to
1454 get the base value from the alias opcode before we move on to the
1455 aliased opcode for encoding. */
1456 inst->value = opcode->opcode;
1457
1458 /* No need to do anything else if the opcode does not have any operand. */
1459 if (aarch64_num_of_operands (opcode) == 0)
1460 goto encoding_exit;
1461
1462 /* Assign operand indexes and check types. Also put the matched
1463 operand qualifiers in *QLF_SEQ to return. */
1464 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1465 {
1466 assert (opcode->operands[i] == inst->operands[i].type);
1467 inst->operands[i].idx = i;
1468 if (qlf_seq != NULL)
1469 *qlf_seq = inst->operands[i].qualifier;
1470 }
1471
1472 aliased = aarch64_find_real_opcode (opcode);
1473 /* If the opcode is an alias and it does not ask for direct encoding by
1474 itself, the instruction will be transformed to the form of real opcode
1475 and the encoding will be carried out using the rules for the aliased
1476 opcode. */
1477 if (aliased != NULL && (opcode->flags & F_CONV))
1478 {
1479 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1480 aliased->name, opcode->name);
1481 /* Convert the operands to the form of the real opcode. */
1482 convert_to_real (inst, aliased);
1483 opcode = aliased;
1484 }
1485
1486 aarch64_opnd_info *info = inst->operands;
1487
1488 /* Call the inserter of each operand. */
1489 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1490 {
1491 const aarch64_operand *opnd;
1492 enum aarch64_opnd type = opcode->operands[i];
1493 if (type == AARCH64_OPND_NIL)
1494 break;
1495 if (info->skip)
1496 {
1497 DEBUG_TRACE ("skip the incomplete operand %d", i);
1498 continue;
1499 }
1500 opnd = &aarch64_operands[type];
1501 if (operand_has_inserter (opnd))
1502 aarch64_insert_operand (opnd, info, &inst->value, inst);
1503 }
1504
1505 /* Call opcode encoders indicated by flags. */
1506 if (opcode_has_special_coder (opcode))
1507 do_special_encoding (inst);
1508
1509 encoding_exit:
1510 DEBUG_TRACE ("exit with %s", opcode->name);
1511
1512 *code = inst->value;
1513
1514 return 1;
1515 }