]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - opcodes/aarch64-asm.c
[AArch64][SVE 24/32] Add AARCH64_OPND_SVE_PATTERN_SCALED
[thirdparty/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
6f2750fe 2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147}
148
149/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150const char *
151aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154{
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160}
161
162/* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164const char *
165aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168{
4ad3b7ef 169 aarch64_insn value = 0;
a06ea964
NC
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203}
204
205/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207const char *
208aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211{
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228}
229
230/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232const char *
233aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236{
237 aarch64_field field = {0, 0};
4ad3b7ef
KT
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276}
277
278/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281const char *
282aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285{
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331}
332
333/* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335const char *
336aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339{
340 int64_t imm;
a06ea964
NC
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
b5464a68 345 insert_all_fields (self, code, imm);
a06ea964
NC
346 return NULL;
347}
348
349/* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351const char *
352aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 353 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
354{
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360}
361
362/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364const char *
365aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
a06ea964
NC
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
a06ea964
NC
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
aa2aa4c6
RS
418}
419
420/* Insert fields for an 8-bit floating-point immediate. */
421const char *
422aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
423 aarch64_insn *code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
425{
426 insert_all_fields (self, code, info->imm.value);
427 return NULL;
a06ea964
NC
428}
429
430/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
432const char *
433aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
436{
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
438 return NULL;
439}
440
441/* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
443const char *
444aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
446{
447 /* shift */
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
452 return NULL;
453}
454
455/* Insert logical/bitmask immediate for e.g. the last operand in
456 ORR <Wd|WSP>, <Wn>, #<imm>. */
457const char *
458aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
459 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
460{
461 aarch64_insn value;
462 uint64_t imm = info->imm.value;
42408347 463 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964
NC
464
465 if (inst->opcode->op == OP_BIC)
466 imm = ~imm;
42408347 467 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
a06ea964
NC
468 /* The constraint check should have guaranteed this wouldn't happen. */
469 assert (0);
470
471 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
472 self->fields[0]);
473 return NULL;
474}
475
476/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
477 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
478const char *
479aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
480 aarch64_insn *code, const aarch64_inst *inst)
481{
4ad3b7ef 482 aarch64_insn value = 0;
a06ea964
NC
483
484 assert (info->idx == 0);
485
486 /* Rt */
487 aarch64_ins_regno (self, info, code, inst);
488 if (inst->opcode->iclass == ldstpair_indexed
489 || inst->opcode->iclass == ldstnapair_offs
490 || inst->opcode->iclass == ldstpair_off
491 || inst->opcode->iclass == loadlit)
492 {
493 /* size */
494 switch (info->qualifier)
495 {
496 case AARCH64_OPND_QLF_S_S: value = 0; break;
497 case AARCH64_OPND_QLF_S_D: value = 1; break;
498 case AARCH64_OPND_QLF_S_Q: value = 2; break;
499 default: assert (0);
500 }
501 insert_field (FLD_ldst_size, code, value, 0);
502 }
503 else
504 {
505 /* opc[1]:size */
506 value = aarch64_get_qualifier_standard_value (info->qualifier);
507 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
508 }
509
510 return NULL;
511}
512
513/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
514const char *
515aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
516 const aarch64_opnd_info *info, aarch64_insn *code,
517 const aarch64_inst *inst ATTRIBUTE_UNUSED)
518{
519 /* Rn */
520 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
521 return NULL;
522}
523
524/* Encode the address operand for e.g.
525 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
526const char *
527aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
528 const aarch64_opnd_info *info, aarch64_insn *code,
529 const aarch64_inst *inst ATTRIBUTE_UNUSED)
530{
531 aarch64_insn S;
532 enum aarch64_modifier_kind kind = info->shifter.kind;
533
534 /* Rn */
535 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
536 /* Rm */
537 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
538 /* option */
539 if (kind == AARCH64_MOD_LSL)
540 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
541 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
542 /* S */
543 if (info->qualifier != AARCH64_OPND_QLF_S_B)
544 S = info->shifter.amount != 0;
545 else
546 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
547 S <amount>
548 0 [absent]
549 1 #0
550 Must be #0 if <extend> is explicitly LSL. */
551 S = info->shifter.operator_present && info->shifter.amount_present;
552 insert_field (FLD_S, code, S, 0);
553
554 return NULL;
555}
556
557/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
558const char *
559aarch64_ins_addr_simm (const aarch64_operand *self,
560 const aarch64_opnd_info *info,
062f38fa
RE
561 aarch64_insn *code,
562 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
563{
564 int imm;
565
566 /* Rn */
567 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
568 /* simm (imm9 or imm7) */
569 imm = info->addr.offset.imm;
570 if (self->fields[0] == FLD_imm7)
571 /* scaled immediate in ld/st pair instructions.. */
572 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
573 insert_field (self->fields[0], code, imm, 0);
574 /* pre/post- index */
575 if (info->addr.writeback)
576 {
577 assert (inst->opcode->iclass != ldst_unscaled
578 && inst->opcode->iclass != ldstnapair_offs
579 && inst->opcode->iclass != ldstpair_off
580 && inst->opcode->iclass != ldst_unpriv);
581 assert (info->addr.preind != info->addr.postind);
582 if (info->addr.preind)
583 insert_field (self->fields[1], code, 1, 0);
584 }
585
586 return NULL;
587}
588
589/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
590const char *
591aarch64_ins_addr_uimm12 (const aarch64_operand *self,
592 const aarch64_opnd_info *info,
593 aarch64_insn *code,
594 const aarch64_inst *inst ATTRIBUTE_UNUSED)
595{
596 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
597
598 /* Rn */
599 insert_field (self->fields[0], code, info->addr.base_regno, 0);
600 /* uimm12 */
601 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
602 return NULL;
603}
604
605/* Encode the address operand for e.g.
606 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
607const char *
608aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
611{
612 /* Rn */
613 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
614 /* Rm | #<amount> */
615 if (info->addr.offset.is_reg)
616 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
617 else
618 insert_field (FLD_Rm, code, 0x1f, 0);
619 return NULL;
620}
621
622/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
623const char *
624aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627{
628 /* cond */
629 insert_field (FLD_cond, code, info->cond->value, 0);
630 return NULL;
631}
632
633/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
634const char *
635aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
638{
639 /* op0:op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
642 return NULL;
643}
644
645/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
646const char *
647aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
648 const aarch64_opnd_info *info, aarch64_insn *code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
650{
651 /* op1:op2 */
652 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
653 FLD_op2, FLD_op1);
654 return NULL;
655}
656
657/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
658const char *
659aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
660 const aarch64_opnd_info *info, aarch64_insn *code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED)
662{
663 /* op1:CRn:CRm:op2 */
664 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
665 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
666 return NULL;
667}
668
669/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
670
671const char *
672aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
673 const aarch64_opnd_info *info, aarch64_insn *code,
674 const aarch64_inst *inst ATTRIBUTE_UNUSED)
675{
676 /* CRm */
677 insert_field (FLD_CRm, code, info->barrier->value, 0);
678 return NULL;
679}
680
681/* Encode the prefetch operation option operand for e.g.
682 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
683
684const char *
685aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
686 const aarch64_opnd_info *info, aarch64_insn *code,
687 const aarch64_inst *inst ATTRIBUTE_UNUSED)
688{
689 /* prfop in Rt */
690 insert_field (FLD_Rt, code, info->prfop->value, 0);
691 return NULL;
692}
693
9ed608f9
MW
694/* Encode the hint number for instructions that alias HINT but take an
695 operand. */
696
697const char *
698aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 const aarch64_opnd_info *info, aarch64_insn *code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701{
702 /* CRm:op2. */
703 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
704 return NULL;
705}
706
a06ea964
NC
707/* Encode the extended register operand for e.g.
708 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
709const char *
710aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
711 const aarch64_opnd_info *info, aarch64_insn *code,
712 const aarch64_inst *inst ATTRIBUTE_UNUSED)
713{
714 enum aarch64_modifier_kind kind;
715
716 /* Rm */
717 insert_field (FLD_Rm, code, info->reg.regno, 0);
718 /* option */
719 kind = info->shifter.kind;
720 if (kind == AARCH64_MOD_LSL)
721 kind = info->qualifier == AARCH64_OPND_QLF_W
722 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
723 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
724 /* imm3 */
725 insert_field (FLD_imm3, code, info->shifter.amount, 0);
726
727 return NULL;
728}
729
730/* Encode the shifted register operand for e.g.
731 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
732const char *
733aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
734 const aarch64_opnd_info *info, aarch64_insn *code,
735 const aarch64_inst *inst ATTRIBUTE_UNUSED)
736{
737 /* Rm */
738 insert_field (FLD_Rm, code, info->reg.regno, 0);
739 /* shift */
740 insert_field (FLD_shift, code,
741 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
742 /* imm6 */
743 insert_field (FLD_imm6, code, info->shifter.amount, 0);
744
745 return NULL;
746}
747
f11ad6bc
RS
748/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
749 array specifies which field to use for Zn. MM is encoded in the
750 concatenation of imm5 and SVE_tszh, with imm5 being the less
751 significant part. */
752const char *
753aarch64_ins_sve_index (const aarch64_operand *self,
754 const aarch64_opnd_info *info, aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED)
756{
757 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
758 insert_field (self->fields[0], code, info->reglane.regno, 0);
759 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
760 2, FLD_imm5, FLD_SVE_tszh);
761 return NULL;
762}
763
764/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
765 to use for Zn. */
766const char *
767aarch64_ins_sve_reglist (const aarch64_operand *self,
768 const aarch64_opnd_info *info, aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED)
770{
771 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
772 return NULL;
773}
774
2442d846
RS
775/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
776 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
777 field. */
778const char *
779aarch64_ins_sve_scale (const aarch64_operand *self,
780 const aarch64_opnd_info *info, aarch64_insn *code,
781 const aarch64_inst *inst ATTRIBUTE_UNUSED)
782{
783 insert_all_fields (self, code, info->imm.value);
784 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
785 return NULL;
786}
787
a06ea964
NC
788/* Miscellaneous encoding functions. */
789
790/* Encode size[0], i.e. bit 22, for
791 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
792
793static void
794encode_asimd_fcvt (aarch64_inst *inst)
795{
796 aarch64_insn value;
797 aarch64_field field = {0, 0};
798 enum aarch64_opnd_qualifier qualifier;
799
800 switch (inst->opcode->op)
801 {
802 case OP_FCVTN:
803 case OP_FCVTN2:
804 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
805 qualifier = inst->operands[1].qualifier;
806 break;
807 case OP_FCVTL:
808 case OP_FCVTL2:
809 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
810 qualifier = inst->operands[0].qualifier;
811 break;
812 default:
813 assert (0);
814 }
815 assert (qualifier == AARCH64_OPND_QLF_V_4S
816 || qualifier == AARCH64_OPND_QLF_V_2D);
817 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
818 gen_sub_field (FLD_size, 0, 1, &field);
819 insert_field_2 (&field, &inst->value, value, 0);
820}
821
822/* Encode size[0], i.e. bit 22, for
823 e.g. FCVTXN <Vb><d>, <Va><n>. */
824
825static void
826encode_asisd_fcvtxn (aarch64_inst *inst)
827{
828 aarch64_insn val = 1;
829 aarch64_field field = {0, 0};
830 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
831 gen_sub_field (FLD_size, 0, 1, &field);
832 insert_field_2 (&field, &inst->value, val, 0);
833}
834
835/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
836static void
837encode_fcvt (aarch64_inst *inst)
838{
839 aarch64_insn val;
840 const aarch64_field field = {15, 2};
841
842 /* opc dstsize */
843 switch (inst->operands[0].qualifier)
844 {
845 case AARCH64_OPND_QLF_S_S: val = 0; break;
846 case AARCH64_OPND_QLF_S_D: val = 1; break;
847 case AARCH64_OPND_QLF_S_H: val = 3; break;
848 default: abort ();
849 }
850 insert_field_2 (&field, &inst->value, val, 0);
851
852 return;
853}
854
855/* Do miscellaneous encodings that are not common enough to be driven by
856 flags. */
857
858static void
859do_misc_encoding (aarch64_inst *inst)
860{
861 switch (inst->opcode->op)
862 {
863 case OP_FCVT:
864 encode_fcvt (inst);
865 break;
866 case OP_FCVTN:
867 case OP_FCVTN2:
868 case OP_FCVTL:
869 case OP_FCVTL2:
870 encode_asimd_fcvt (inst);
871 break;
872 case OP_FCVTXN_S:
873 encode_asisd_fcvtxn (inst);
874 break;
875 default: break;
876 }
877}
878
879/* Encode the 'size' and 'Q' field for e.g. SHADD. */
880static void
881encode_sizeq (aarch64_inst *inst)
882{
883 aarch64_insn sizeq;
884 enum aarch64_field_kind kind;
885 int idx;
886
887 /* Get the index of the operand whose information we are going to use
888 to encode the size and Q fields.
889 This is deduced from the possible valid qualifier lists. */
890 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
891 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
892 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
893 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
894 /* Q */
895 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
896 /* size */
897 if (inst->opcode->iclass == asisdlse
898 || inst->opcode->iclass == asisdlsep
899 || inst->opcode->iclass == asisdlso
900 || inst->opcode->iclass == asisdlsop)
901 kind = FLD_vldst_size;
902 else
903 kind = FLD_size;
904 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
905}
906
907/* Opcodes that have fields shared by multiple operands are usually flagged
908 with flags. In this function, we detect such flags and use the
909 information in one of the related operands to do the encoding. The 'one'
910 operand is not any operand but one of the operands that has the enough
911 information for such an encoding. */
912
913static void
914do_special_encoding (struct aarch64_inst *inst)
915{
916 int idx;
4ad3b7ef 917 aarch64_insn value = 0;
a06ea964
NC
918
919 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
920
921 /* Condition for truly conditional executed instructions, e.g. b.cond. */
922 if (inst->opcode->flags & F_COND)
923 {
924 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
925 }
926 if (inst->opcode->flags & F_SF)
927 {
928 idx = select_operand_for_sf_field_coding (inst->opcode);
929 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
930 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
931 ? 1 : 0;
932 insert_field (FLD_sf, &inst->value, value, 0);
933 if (inst->opcode->flags & F_N)
934 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
935 }
ee804238
JW
936 if (inst->opcode->flags & F_LSE_SZ)
937 {
938 idx = select_operand_for_sf_field_coding (inst->opcode);
939 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
940 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
941 ? 1 : 0;
942 insert_field (FLD_lse_sz, &inst->value, value, 0);
943 }
a06ea964
NC
944 if (inst->opcode->flags & F_SIZEQ)
945 encode_sizeq (inst);
946 if (inst->opcode->flags & F_FPTYPE)
947 {
948 idx = select_operand_for_fptype_field_coding (inst->opcode);
949 switch (inst->operands[idx].qualifier)
950 {
951 case AARCH64_OPND_QLF_S_S: value = 0; break;
952 case AARCH64_OPND_QLF_S_D: value = 1; break;
953 case AARCH64_OPND_QLF_S_H: value = 3; break;
954 default: assert (0);
955 }
956 insert_field (FLD_type, &inst->value, value, 0);
957 }
958 if (inst->opcode->flags & F_SSIZE)
959 {
960 enum aarch64_opnd_qualifier qualifier;
961 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
962 qualifier = inst->operands[idx].qualifier;
963 assert (qualifier >= AARCH64_OPND_QLF_S_B
964 && qualifier <= AARCH64_OPND_QLF_S_Q);
965 value = aarch64_get_qualifier_standard_value (qualifier);
966 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
967 }
968 if (inst->opcode->flags & F_T)
969 {
970 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
971 aarch64_field field = {0, 0};
972 enum aarch64_opnd_qualifier qualifier;
973
974 idx = 0;
975 qualifier = inst->operands[idx].qualifier;
976 assert (aarch64_get_operand_class (inst->opcode->operands[0])
977 == AARCH64_OPND_CLASS_SIMD_REG
978 && qualifier >= AARCH64_OPND_QLF_V_8B
979 && qualifier <= AARCH64_OPND_QLF_V_2D);
980 /* imm5<3:0> q <t>
981 0000 x reserved
982 xxx1 0 8b
983 xxx1 1 16b
984 xx10 0 4h
985 xx10 1 8h
986 x100 0 2s
987 x100 1 4s
988 1000 0 reserved
989 1000 1 2d */
990 value = aarch64_get_qualifier_standard_value (qualifier);
991 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
992 num = (int) value >> 1;
993 assert (num >= 0 && num <= 3);
994 gen_sub_field (FLD_imm5, 0, num + 1, &field);
995 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
996 }
997 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
998 {
999 /* Use Rt to encode in the case of e.g.
1000 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1001 enum aarch64_opnd_qualifier qualifier;
1002 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1003 if (idx == -1)
1004 /* Otherwise use the result operand, which has to be a integer
1005 register. */
1006 idx = 0;
1007 assert (idx == 0 || idx == 1);
1008 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1009 == AARCH64_OPND_CLASS_INT_REG);
1010 qualifier = inst->operands[idx].qualifier;
1011 insert_field (FLD_Q, &inst->value,
1012 aarch64_get_qualifier_standard_value (qualifier), 0);
1013 }
1014 if (inst->opcode->flags & F_LDS_SIZE)
1015 {
1016 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1017 enum aarch64_opnd_qualifier qualifier;
1018 aarch64_field field = {0, 0};
1019 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1020 == AARCH64_OPND_CLASS_INT_REG);
1021 gen_sub_field (FLD_opc, 0, 1, &field);
1022 qualifier = inst->operands[0].qualifier;
1023 insert_field_2 (&field, &inst->value,
1024 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1025 }
1026 /* Miscellaneous encoding as the last step. */
1027 if (inst->opcode->flags & F_MISC)
1028 do_misc_encoding (inst);
1029
1030 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1031}
1032
1033/* Converters converting an alias opcode instruction to its real form. */
1034
1035/* ROR <Wd>, <Ws>, #<shift>
1036 is equivalent to:
1037 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1038static void
1039convert_ror_to_extr (aarch64_inst *inst)
1040{
1041 copy_operand_info (inst, 3, 2);
1042 copy_operand_info (inst, 2, 1);
1043}
1044
e30181a5
YZ
1045/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1046 is equivalent to:
1047 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1048static void
1049convert_xtl_to_shll (aarch64_inst *inst)
1050{
1051 inst->operands[2].qualifier = inst->operands[1].qualifier;
1052 inst->operands[2].imm.value = 0;
1053}
1054
a06ea964
NC
1055/* Convert
1056 LSR <Xd>, <Xn>, #<shift>
1057 to
1058 UBFM <Xd>, <Xn>, #<shift>, #63. */
1059static void
1060convert_sr_to_bfm (aarch64_inst *inst)
1061{
1062 inst->operands[3].imm.value =
1063 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1064}
1065
1066/* Convert MOV to ORR. */
1067static void
1068convert_mov_to_orr (aarch64_inst *inst)
1069{
1070 /* MOV <Vd>.<T>, <Vn>.<T>
1071 is equivalent to:
1072 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1073 copy_operand_info (inst, 2, 1);
1074}
1075
1076/* When <imms> >= <immr>, the instruction written:
1077 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1078 is equivalent to:
1079 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1080
1081static void
1082convert_bfx_to_bfm (aarch64_inst *inst)
1083{
1084 int64_t lsb, width;
1085
1086 /* Convert the operand. */
1087 lsb = inst->operands[2].imm.value;
1088 width = inst->operands[3].imm.value;
1089 inst->operands[2].imm.value = lsb;
1090 inst->operands[3].imm.value = lsb + width - 1;
1091}
1092
1093/* When <imms> < <immr>, the instruction written:
1094 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1095 is equivalent to:
1096 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1097
1098static void
1099convert_bfi_to_bfm (aarch64_inst *inst)
1100{
1101 int64_t lsb, width;
1102
1103 /* Convert the operand. */
1104 lsb = inst->operands[2].imm.value;
1105 width = inst->operands[3].imm.value;
1106 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1107 {
1108 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1109 inst->operands[3].imm.value = width - 1;
1110 }
1111 else
1112 {
1113 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1114 inst->operands[3].imm.value = width - 1;
1115 }
1116}
1117
d685192a
MW
1118/* The instruction written:
1119 BFC <Xd>, #<lsb>, #<width>
1120 is equivalent to:
1121 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1122
1123static void
1124convert_bfc_to_bfm (aarch64_inst *inst)
1125{
1126 int64_t lsb, width;
1127
1128 /* Insert XZR. */
1129 copy_operand_info (inst, 3, 2);
1130 copy_operand_info (inst, 2, 1);
1131 copy_operand_info (inst, 2, 0);
1132 inst->operands[1].reg.regno = 0x1f;
1133
1134 /* Convert the immedate operand. */
1135 lsb = inst->operands[2].imm.value;
1136 width = inst->operands[3].imm.value;
1137 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1138 {
1139 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1140 inst->operands[3].imm.value = width - 1;
1141 }
1142 else
1143 {
1144 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1145 inst->operands[3].imm.value = width - 1;
1146 }
1147}
1148
a06ea964
NC
1149/* The instruction written:
1150 LSL <Xd>, <Xn>, #<shift>
1151 is equivalent to:
1152 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1153
1154static void
1155convert_lsl_to_ubfm (aarch64_inst *inst)
1156{
1157 int64_t shift = inst->operands[2].imm.value;
1158
1159 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1160 {
1161 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1162 inst->operands[3].imm.value = 31 - shift;
1163 }
1164 else
1165 {
1166 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1167 inst->operands[3].imm.value = 63 - shift;
1168 }
1169}
1170
1171/* CINC <Wd>, <Wn>, <cond>
1172 is equivalent to:
1173 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1174
1175static void
1176convert_to_csel (aarch64_inst *inst)
1177{
1178 copy_operand_info (inst, 3, 2);
1179 copy_operand_info (inst, 2, 1);
1180 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1181}
1182
1183/* CSET <Wd>, <cond>
1184 is equivalent to:
1185 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1186
1187static void
1188convert_cset_to_csinc (aarch64_inst *inst)
1189{
1190 copy_operand_info (inst, 3, 1);
1191 copy_operand_info (inst, 2, 0);
1192 copy_operand_info (inst, 1, 0);
1193 inst->operands[1].reg.regno = 0x1f;
1194 inst->operands[2].reg.regno = 0x1f;
1195 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1196}
1197
1198/* MOV <Wd>, #<imm>
1199 is equivalent to:
1200 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1201
1202static void
1203convert_mov_to_movewide (aarch64_inst *inst)
1204{
1205 int is32;
1206 uint32_t shift_amount;
1207 uint64_t value;
1208
1209 switch (inst->opcode->op)
1210 {
1211 case OP_MOV_IMM_WIDE:
1212 value = inst->operands[1].imm.value;
1213 break;
1214 case OP_MOV_IMM_WIDEN:
1215 value = ~inst->operands[1].imm.value;
1216 break;
1217 default:
1218 assert (0);
1219 }
1220 inst->operands[1].type = AARCH64_OPND_HALF;
1221 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1222 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1223 /* The constraint check should have guaranteed this wouldn't happen. */
1224 assert (0);
a06ea964
NC
1225 value >>= shift_amount;
1226 value &= 0xffff;
1227 inst->operands[1].imm.value = value;
1228 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1229 inst->operands[1].shifter.amount = shift_amount;
1230}
1231
1232/* MOV <Wd>, #<imm>
1233 is equivalent to:
1234 ORR <Wd>, WZR, #<imm>. */
1235
1236static void
1237convert_mov_to_movebitmask (aarch64_inst *inst)
1238{
1239 copy_operand_info (inst, 2, 1);
1240 inst->operands[1].reg.regno = 0x1f;
1241 inst->operands[1].skip = 0;
1242}
1243
1244/* Some alias opcodes are assembled by being converted to their real-form. */
1245
1246static void
1247convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1248{
1249 const aarch64_opcode *alias = inst->opcode;
1250
1251 if ((alias->flags & F_CONV) == 0)
1252 goto convert_to_real_return;
1253
1254 switch (alias->op)
1255 {
1256 case OP_ASR_IMM:
1257 case OP_LSR_IMM:
1258 convert_sr_to_bfm (inst);
1259 break;
1260 case OP_LSL_IMM:
1261 convert_lsl_to_ubfm (inst);
1262 break;
1263 case OP_CINC:
1264 case OP_CINV:
1265 case OP_CNEG:
1266 convert_to_csel (inst);
1267 break;
1268 case OP_CSET:
1269 case OP_CSETM:
1270 convert_cset_to_csinc (inst);
1271 break;
1272 case OP_UBFX:
1273 case OP_BFXIL:
1274 case OP_SBFX:
1275 convert_bfx_to_bfm (inst);
1276 break;
1277 case OP_SBFIZ:
1278 case OP_BFI:
1279 case OP_UBFIZ:
1280 convert_bfi_to_bfm (inst);
1281 break;
d685192a
MW
1282 case OP_BFC:
1283 convert_bfc_to_bfm (inst);
1284 break;
a06ea964
NC
1285 case OP_MOV_V:
1286 convert_mov_to_orr (inst);
1287 break;
1288 case OP_MOV_IMM_WIDE:
1289 case OP_MOV_IMM_WIDEN:
1290 convert_mov_to_movewide (inst);
1291 break;
1292 case OP_MOV_IMM_LOG:
1293 convert_mov_to_movebitmask (inst);
1294 break;
1295 case OP_ROR_IMM:
1296 convert_ror_to_extr (inst);
1297 break;
e30181a5
YZ
1298 case OP_SXTL:
1299 case OP_SXTL2:
1300 case OP_UXTL:
1301 case OP_UXTL2:
1302 convert_xtl_to_shll (inst);
1303 break;
a06ea964
NC
1304 default:
1305 break;
1306 }
1307
1308convert_to_real_return:
1309 aarch64_replace_opcode (inst, real);
1310}
1311
1312/* Encode *INST_ORI of the opcode code OPCODE.
1313 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1314 matched operand qualifier sequence in *QLF_SEQ. */
1315
1316int
1317aarch64_opcode_encode (const aarch64_opcode *opcode,
1318 const aarch64_inst *inst_ori, aarch64_insn *code,
1319 aarch64_opnd_qualifier_t *qlf_seq,
1320 aarch64_operand_error *mismatch_detail)
1321{
1322 int i;
1323 const aarch64_opcode *aliased;
1324 aarch64_inst copy, *inst;
1325
1326 DEBUG_TRACE ("enter with %s", opcode->name);
1327
1328 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1329 copy = *inst_ori;
1330 inst = &copy;
1331
1332 assert (inst->opcode == NULL || inst->opcode == opcode);
1333 if (inst->opcode == NULL)
1334 inst->opcode = opcode;
1335
1336 /* Constrain the operands.
1337 After passing this, the encoding is guaranteed to succeed. */
1338 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1339 {
1340 DEBUG_TRACE ("FAIL since operand constraint not met");
1341 return 0;
1342 }
1343
1344 /* Get the base value.
1345 Note: this has to be before the aliasing handling below in order to
1346 get the base value from the alias opcode before we move on to the
1347 aliased opcode for encoding. */
1348 inst->value = opcode->opcode;
1349
1350 /* No need to do anything else if the opcode does not have any operand. */
1351 if (aarch64_num_of_operands (opcode) == 0)
1352 goto encoding_exit;
1353
1354 /* Assign operand indexes and check types. Also put the matched
1355 operand qualifiers in *QLF_SEQ to return. */
1356 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1357 {
1358 assert (opcode->operands[i] == inst->operands[i].type);
1359 inst->operands[i].idx = i;
1360 if (qlf_seq != NULL)
1361 *qlf_seq = inst->operands[i].qualifier;
1362 }
1363
1364 aliased = aarch64_find_real_opcode (opcode);
1365 /* If the opcode is an alias and it does not ask for direct encoding by
1366 itself, the instruction will be transformed to the form of real opcode
1367 and the encoding will be carried out using the rules for the aliased
1368 opcode. */
1369 if (aliased != NULL && (opcode->flags & F_CONV))
1370 {
1371 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1372 aliased->name, opcode->name);
1373 /* Convert the operands to the form of the real opcode. */
1374 convert_to_real (inst, aliased);
1375 opcode = aliased;
1376 }
1377
1378 aarch64_opnd_info *info = inst->operands;
1379
1380 /* Call the inserter of each operand. */
1381 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1382 {
1383 const aarch64_operand *opnd;
1384 enum aarch64_opnd type = opcode->operands[i];
1385 if (type == AARCH64_OPND_NIL)
1386 break;
1387 if (info->skip)
1388 {
1389 DEBUG_TRACE ("skip the incomplete operand %d", i);
1390 continue;
1391 }
1392 opnd = &aarch64_operands[type];
1393 if (operand_has_inserter (opnd))
1394 aarch64_insert_operand (opnd, info, &inst->value, inst);
1395 }
1396
1397 /* Call opcode encoders indicated by flags. */
1398 if (opcode_has_special_coder (opcode))
1399 do_special_encoding (inst);
1400
1401encoding_exit:
1402 DEBUG_TRACE ("exit with %s", opcode->name);
1403
1404 *code = inst->value;
1405
1406 return 1;
1407}