]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - opcodes/aarch64-asm.c
[AArch64][SVE 27/32] Add SVE integer immediate operands
[thirdparty/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
6f2750fe 2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147}
148
149/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150const char *
151aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154{
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160}
161
162/* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164const char *
165aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168{
4ad3b7ef 169 aarch64_insn value = 0;
a06ea964
NC
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203}
204
205/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207const char *
208aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211{
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228}
229
230/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232const char *
233aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236{
237 aarch64_field field = {0, 0};
4ad3b7ef
KT
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276}
277
278/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281const char *
282aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285{
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331}
332
333/* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335const char *
336aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339{
340 int64_t imm;
a06ea964
NC
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
b5464a68 345 insert_all_fields (self, code, imm);
a06ea964
NC
346 return NULL;
347}
348
349/* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351const char *
352aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 353 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
354{
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360}
361
362/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364const char *
365aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
a06ea964
NC
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
a06ea964
NC
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
aa2aa4c6
RS
418}
419
420/* Insert fields for an 8-bit floating-point immediate. */
421const char *
422aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
423 aarch64_insn *code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
425{
426 insert_all_fields (self, code, info->imm.value);
427 return NULL;
a06ea964
NC
428}
429
430/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
432const char *
433aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
436{
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
438 return NULL;
439}
440
441/* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
443const char *
444aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
446{
447 /* shift */
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
452 return NULL;
453}
454
e950b345
RS
455/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
456 the operand should be inverted before encoding. */
457static const char *
458aarch64_ins_limm_1 (const aarch64_operand *self,
459 const aarch64_opnd_info *info, aarch64_insn *code,
460 const aarch64_inst *inst, bfd_boolean invert_p)
a06ea964
NC
461{
462 aarch64_insn value;
463 uint64_t imm = info->imm.value;
42408347 464 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 465
e950b345 466 if (invert_p)
a06ea964 467 imm = ~imm;
42408347 468 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
a06ea964
NC
469 /* The constraint check should have guaranteed this wouldn't happen. */
470 assert (0);
471
472 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
473 self->fields[0]);
474 return NULL;
475}
476
e950b345
RS
477/* Insert logical/bitmask immediate for e.g. the last operand in
478 ORR <Wd|WSP>, <Wn>, #<imm>. */
479const char *
480aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
481 aarch64_insn *code, const aarch64_inst *inst)
482{
483 return aarch64_ins_limm_1 (self, info, code, inst,
484 inst->opcode->op == OP_BIC);
485}
486
487/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
488const char *
489aarch64_ins_inv_limm (const aarch64_operand *self,
490 const aarch64_opnd_info *info, aarch64_insn *code,
491 const aarch64_inst *inst)
492{
493 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
494}
495
a06ea964
NC
496/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
497 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
498const char *
499aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
500 aarch64_insn *code, const aarch64_inst *inst)
501{
4ad3b7ef 502 aarch64_insn value = 0;
a06ea964
NC
503
504 assert (info->idx == 0);
505
506 /* Rt */
507 aarch64_ins_regno (self, info, code, inst);
508 if (inst->opcode->iclass == ldstpair_indexed
509 || inst->opcode->iclass == ldstnapair_offs
510 || inst->opcode->iclass == ldstpair_off
511 || inst->opcode->iclass == loadlit)
512 {
513 /* size */
514 switch (info->qualifier)
515 {
516 case AARCH64_OPND_QLF_S_S: value = 0; break;
517 case AARCH64_OPND_QLF_S_D: value = 1; break;
518 case AARCH64_OPND_QLF_S_Q: value = 2; break;
519 default: assert (0);
520 }
521 insert_field (FLD_ldst_size, code, value, 0);
522 }
523 else
524 {
525 /* opc[1]:size */
526 value = aarch64_get_qualifier_standard_value (info->qualifier);
527 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
528 }
529
530 return NULL;
531}
532
533/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
534const char *
535aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
536 const aarch64_opnd_info *info, aarch64_insn *code,
537 const aarch64_inst *inst ATTRIBUTE_UNUSED)
538{
539 /* Rn */
540 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
541 return NULL;
542}
543
544/* Encode the address operand for e.g.
545 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
546const char *
547aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
548 const aarch64_opnd_info *info, aarch64_insn *code,
549 const aarch64_inst *inst ATTRIBUTE_UNUSED)
550{
551 aarch64_insn S;
552 enum aarch64_modifier_kind kind = info->shifter.kind;
553
554 /* Rn */
555 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
556 /* Rm */
557 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
558 /* option */
559 if (kind == AARCH64_MOD_LSL)
560 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
561 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
562 /* S */
563 if (info->qualifier != AARCH64_OPND_QLF_S_B)
564 S = info->shifter.amount != 0;
565 else
566 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
567 S <amount>
568 0 [absent]
569 1 #0
570 Must be #0 if <extend> is explicitly LSL. */
571 S = info->shifter.operator_present && info->shifter.amount_present;
572 insert_field (FLD_S, code, S, 0);
573
574 return NULL;
575}
576
577/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
578const char *
579aarch64_ins_addr_simm (const aarch64_operand *self,
580 const aarch64_opnd_info *info,
062f38fa
RE
581 aarch64_insn *code,
582 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
583{
584 int imm;
585
586 /* Rn */
587 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
588 /* simm (imm9 or imm7) */
589 imm = info->addr.offset.imm;
590 if (self->fields[0] == FLD_imm7)
591 /* scaled immediate in ld/st pair instructions.. */
592 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
593 insert_field (self->fields[0], code, imm, 0);
594 /* pre/post- index */
595 if (info->addr.writeback)
596 {
597 assert (inst->opcode->iclass != ldst_unscaled
598 && inst->opcode->iclass != ldstnapair_offs
599 && inst->opcode->iclass != ldstpair_off
600 && inst->opcode->iclass != ldst_unpriv);
601 assert (info->addr.preind != info->addr.postind);
602 if (info->addr.preind)
603 insert_field (self->fields[1], code, 1, 0);
604 }
605
606 return NULL;
607}
608
609/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
610const char *
611aarch64_ins_addr_uimm12 (const aarch64_operand *self,
612 const aarch64_opnd_info *info,
613 aarch64_insn *code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
615{
616 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
617
618 /* Rn */
619 insert_field (self->fields[0], code, info->addr.base_regno, 0);
620 /* uimm12 */
621 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
622 return NULL;
623}
624
625/* Encode the address operand for e.g.
626 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
627const char *
628aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
629 const aarch64_opnd_info *info, aarch64_insn *code,
630 const aarch64_inst *inst ATTRIBUTE_UNUSED)
631{
632 /* Rn */
633 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
634 /* Rm | #<amount> */
635 if (info->addr.offset.is_reg)
636 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
637 else
638 insert_field (FLD_Rm, code, 0x1f, 0);
639 return NULL;
640}
641
642/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
643const char *
644aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
645 const aarch64_opnd_info *info, aarch64_insn *code,
646 const aarch64_inst *inst ATTRIBUTE_UNUSED)
647{
648 /* cond */
649 insert_field (FLD_cond, code, info->cond->value, 0);
650 return NULL;
651}
652
653/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
654const char *
655aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 const aarch64_opnd_info *info, aarch64_insn *code,
657 const aarch64_inst *inst ATTRIBUTE_UNUSED)
658{
659 /* op0:op1:CRn:CRm:op2 */
660 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
661 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
662 return NULL;
663}
664
665/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
666const char *
667aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
668 const aarch64_opnd_info *info, aarch64_insn *code,
669 const aarch64_inst *inst ATTRIBUTE_UNUSED)
670{
671 /* op1:op2 */
672 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
673 FLD_op2, FLD_op1);
674 return NULL;
675}
676
677/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
678const char *
679aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 const aarch64_opnd_info *info, aarch64_insn *code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
682{
683 /* op1:CRn:CRm:op2 */
684 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
685 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
686 return NULL;
687}
688
689/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
690
691const char *
692aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
693 const aarch64_opnd_info *info, aarch64_insn *code,
694 const aarch64_inst *inst ATTRIBUTE_UNUSED)
695{
696 /* CRm */
697 insert_field (FLD_CRm, code, info->barrier->value, 0);
698 return NULL;
699}
700
701/* Encode the prefetch operation option operand for e.g.
702 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
703
704const char *
705aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
706 const aarch64_opnd_info *info, aarch64_insn *code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED)
708{
709 /* prfop in Rt */
710 insert_field (FLD_Rt, code, info->prfop->value, 0);
711 return NULL;
712}
713
9ed608f9
MW
714/* Encode the hint number for instructions that alias HINT but take an
715 operand. */
716
717const char *
718aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
719 const aarch64_opnd_info *info, aarch64_insn *code,
720 const aarch64_inst *inst ATTRIBUTE_UNUSED)
721{
722 /* CRm:op2. */
723 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
724 return NULL;
725}
726
a06ea964
NC
727/* Encode the extended register operand for e.g.
728 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
729const char *
730aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
731 const aarch64_opnd_info *info, aarch64_insn *code,
732 const aarch64_inst *inst ATTRIBUTE_UNUSED)
733{
734 enum aarch64_modifier_kind kind;
735
736 /* Rm */
737 insert_field (FLD_Rm, code, info->reg.regno, 0);
738 /* option */
739 kind = info->shifter.kind;
740 if (kind == AARCH64_MOD_LSL)
741 kind = info->qualifier == AARCH64_OPND_QLF_W
742 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
743 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
744 /* imm3 */
745 insert_field (FLD_imm3, code, info->shifter.amount, 0);
746
747 return NULL;
748}
749
750/* Encode the shifted register operand for e.g.
751 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
752const char *
753aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
754 const aarch64_opnd_info *info, aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED)
756{
757 /* Rm */
758 insert_field (FLD_Rm, code, info->reg.regno, 0);
759 /* shift */
760 insert_field (FLD_shift, code,
761 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
762 /* imm6 */
763 insert_field (FLD_imm6, code, info->shifter.amount, 0);
764
765 return NULL;
766}
767
98907a70
RS
768/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
769 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
770 SELF's operand-dependent value. fields[0] specifies the field that
771 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
772const char *
773aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
774 const aarch64_opnd_info *info,
775 aarch64_insn *code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED)
777{
778 int factor = 1 + get_operand_specific_data (self);
779 insert_field (self->fields[0], code, info->addr.base_regno, 0);
780 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
781 return NULL;
782}
783
784/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
785 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
786 SELF's operand-dependent value. fields[0] specifies the field that
787 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
788const char *
789aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
790 const aarch64_opnd_info *info,
791 aarch64_insn *code,
792 const aarch64_inst *inst ATTRIBUTE_UNUSED)
793{
794 int factor = 1 + get_operand_specific_data (self);
795 insert_field (self->fields[0], code, info->addr.base_regno, 0);
796 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
797 return NULL;
798}
799
800/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
801 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
802 SELF's operand-dependent value. fields[0] specifies the field that
803 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
804 and imm3 fields, with imm3 being the less-significant part. */
805const char *
806aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
807 const aarch64_opnd_info *info,
808 aarch64_insn *code,
809 const aarch64_inst *inst ATTRIBUTE_UNUSED)
810{
811 int factor = 1 + get_operand_specific_data (self);
812 insert_field (self->fields[0], code, info->addr.base_regno, 0);
813 insert_fields (code, info->addr.offset.imm / factor, 0,
814 2, FLD_imm3, FLD_SVE_imm6);
815 return NULL;
816}
817
4df068de
RS
818/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
819 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
820 value. fields[0] specifies the base register field. */
821const char *
822aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
823 const aarch64_opnd_info *info, aarch64_insn *code,
824 const aarch64_inst *inst ATTRIBUTE_UNUSED)
825{
826 int factor = 1 << get_operand_specific_data (self);
827 insert_field (self->fields[0], code, info->addr.base_regno, 0);
828 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
829 return NULL;
830}
831
832/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
833 is SELF's operand-dependent value. fields[0] specifies the base
834 register field and fields[1] specifies the offset register field. */
835const char *
836aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
837 const aarch64_opnd_info *info, aarch64_insn *code,
838 const aarch64_inst *inst ATTRIBUTE_UNUSED)
839{
840 insert_field (self->fields[0], code, info->addr.base_regno, 0);
841 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
842 return NULL;
843}
844
845/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
846 <shift> is SELF's operand-dependent value. fields[0] specifies the
847 base register field, fields[1] specifies the offset register field and
848 fields[2] is a single-bit field that selects SXTW over UXTW. */
849const char *
850aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
851 const aarch64_opnd_info *info, aarch64_insn *code,
852 const aarch64_inst *inst ATTRIBUTE_UNUSED)
853{
854 insert_field (self->fields[0], code, info->addr.base_regno, 0);
855 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
856 if (info->shifter.kind == AARCH64_MOD_UXTW)
857 insert_field (self->fields[2], code, 0, 0);
858 else
859 insert_field (self->fields[2], code, 1, 0);
860 return NULL;
861}
862
863/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
864 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
865 fields[0] specifies the base register field. */
866const char *
867aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
868 const aarch64_opnd_info *info, aarch64_insn *code,
869 const aarch64_inst *inst ATTRIBUTE_UNUSED)
870{
871 int factor = 1 << get_operand_specific_data (self);
872 insert_field (self->fields[0], code, info->addr.base_regno, 0);
873 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
874 return NULL;
875}
876
877/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
878 where <modifier> is fixed by the instruction and where <msz> is a
879 2-bit unsigned number. fields[0] specifies the base register field
880 and fields[1] specifies the offset register field. */
881static const char *
882aarch64_ext_sve_addr_zz (const aarch64_operand *self,
883 const aarch64_opnd_info *info, aarch64_insn *code)
884{
885 insert_field (self->fields[0], code, info->addr.base_regno, 0);
886 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
887 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
888 return NULL;
889}
890
891/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
892 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
893 field and fields[1] specifies the offset register field. */
894const char *
895aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
896 const aarch64_opnd_info *info, aarch64_insn *code,
897 const aarch64_inst *inst ATTRIBUTE_UNUSED)
898{
899 return aarch64_ext_sve_addr_zz (self, info, code);
900}
901
902/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
903 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
904 field and fields[1] specifies the offset register field. */
905const char *
906aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
907 const aarch64_opnd_info *info,
908 aarch64_insn *code,
909 const aarch64_inst *inst ATTRIBUTE_UNUSED)
910{
911 return aarch64_ext_sve_addr_zz (self, info, code);
912}
913
914/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
915 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
916 field and fields[1] specifies the offset register field. */
917const char *
918aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
919 const aarch64_opnd_info *info,
920 aarch64_insn *code,
921 const aarch64_inst *inst ATTRIBUTE_UNUSED)
922{
923 return aarch64_ext_sve_addr_zz (self, info, code);
924}
925
e950b345
RS
926/* Encode an SVE ADD/SUB immediate. */
927const char *
928aarch64_ins_sve_aimm (const aarch64_operand *self,
929 const aarch64_opnd_info *info, aarch64_insn *code,
930 const aarch64_inst *inst ATTRIBUTE_UNUSED)
931{
932 if (info->shifter.amount == 8)
933 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
934 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
935 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
936 else
937 insert_all_fields (self, code, info->imm.value & 0xff);
938 return NULL;
939}
940
941/* Encode an SVE CPY/DUP immediate. */
942const char *
943aarch64_ins_sve_asimm (const aarch64_operand *self,
944 const aarch64_opnd_info *info, aarch64_insn *code,
945 const aarch64_inst *inst)
946{
947 return aarch64_ins_sve_aimm (self, info, code, inst);
948}
949
f11ad6bc
RS
950/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
951 array specifies which field to use for Zn. MM is encoded in the
952 concatenation of imm5 and SVE_tszh, with imm5 being the less
953 significant part. */
954const char *
955aarch64_ins_sve_index (const aarch64_operand *self,
956 const aarch64_opnd_info *info, aarch64_insn *code,
957 const aarch64_inst *inst ATTRIBUTE_UNUSED)
958{
959 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
960 insert_field (self->fields[0], code, info->reglane.regno, 0);
961 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
962 2, FLD_imm5, FLD_SVE_tszh);
963 return NULL;
964}
965
e950b345
RS
966/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
967const char *
968aarch64_ins_sve_limm_mov (const aarch64_operand *self,
969 const aarch64_opnd_info *info, aarch64_insn *code,
970 const aarch64_inst *inst)
971{
972 return aarch64_ins_limm (self, info, code, inst);
973}
974
f11ad6bc
RS
975/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
976 to use for Zn. */
977const char *
978aarch64_ins_sve_reglist (const aarch64_operand *self,
979 const aarch64_opnd_info *info, aarch64_insn *code,
980 const aarch64_inst *inst ATTRIBUTE_UNUSED)
981{
982 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
983 return NULL;
984}
985
2442d846
RS
986/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
987 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
988 field. */
989const char *
990aarch64_ins_sve_scale (const aarch64_operand *self,
991 const aarch64_opnd_info *info, aarch64_insn *code,
992 const aarch64_inst *inst ATTRIBUTE_UNUSED)
993{
994 insert_all_fields (self, code, info->imm.value);
995 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
996 return NULL;
997}
998
e950b345
RS
999/* Encode an SVE shift left immediate. */
1000const char *
1001aarch64_ins_sve_shlimm (const aarch64_operand *self,
1002 const aarch64_opnd_info *info, aarch64_insn *code,
1003 const aarch64_inst *inst)
1004{
1005 const aarch64_opnd_info *prev_operand;
1006 unsigned int esize;
1007
1008 assert (info->idx > 0);
1009 prev_operand = &inst->operands[info->idx - 1];
1010 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1011 insert_all_fields (self, code, 8 * esize + info->imm.value);
1012 return NULL;
1013}
1014
1015/* Encode an SVE shift right immediate. */
1016const char *
1017aarch64_ins_sve_shrimm (const aarch64_operand *self,
1018 const aarch64_opnd_info *info, aarch64_insn *code,
1019 const aarch64_inst *inst)
1020{
1021 const aarch64_opnd_info *prev_operand;
1022 unsigned int esize;
1023
1024 assert (info->idx > 0);
1025 prev_operand = &inst->operands[info->idx - 1];
1026 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1027 insert_all_fields (self, code, 16 * esize - info->imm.value);
1028 return NULL;
1029}
1030
a06ea964
NC
1031/* Miscellaneous encoding functions. */
1032
1033/* Encode size[0], i.e. bit 22, for
1034 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1035
1036static void
1037encode_asimd_fcvt (aarch64_inst *inst)
1038{
1039 aarch64_insn value;
1040 aarch64_field field = {0, 0};
1041 enum aarch64_opnd_qualifier qualifier;
1042
1043 switch (inst->opcode->op)
1044 {
1045 case OP_FCVTN:
1046 case OP_FCVTN2:
1047 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1048 qualifier = inst->operands[1].qualifier;
1049 break;
1050 case OP_FCVTL:
1051 case OP_FCVTL2:
1052 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1053 qualifier = inst->operands[0].qualifier;
1054 break;
1055 default:
1056 assert (0);
1057 }
1058 assert (qualifier == AARCH64_OPND_QLF_V_4S
1059 || qualifier == AARCH64_OPND_QLF_V_2D);
1060 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1061 gen_sub_field (FLD_size, 0, 1, &field);
1062 insert_field_2 (&field, &inst->value, value, 0);
1063}
1064
1065/* Encode size[0], i.e. bit 22, for
1066 e.g. FCVTXN <Vb><d>, <Va><n>. */
1067
1068static void
1069encode_asisd_fcvtxn (aarch64_inst *inst)
1070{
1071 aarch64_insn val = 1;
1072 aarch64_field field = {0, 0};
1073 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1074 gen_sub_field (FLD_size, 0, 1, &field);
1075 insert_field_2 (&field, &inst->value, val, 0);
1076}
1077
1078/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1079static void
1080encode_fcvt (aarch64_inst *inst)
1081{
1082 aarch64_insn val;
1083 const aarch64_field field = {15, 2};
1084
1085 /* opc dstsize */
1086 switch (inst->operands[0].qualifier)
1087 {
1088 case AARCH64_OPND_QLF_S_S: val = 0; break;
1089 case AARCH64_OPND_QLF_S_D: val = 1; break;
1090 case AARCH64_OPND_QLF_S_H: val = 3; break;
1091 default: abort ();
1092 }
1093 insert_field_2 (&field, &inst->value, val, 0);
1094
1095 return;
1096}
1097
1098/* Do miscellaneous encodings that are not common enough to be driven by
1099 flags. */
1100
1101static void
1102do_misc_encoding (aarch64_inst *inst)
1103{
1104 switch (inst->opcode->op)
1105 {
1106 case OP_FCVT:
1107 encode_fcvt (inst);
1108 break;
1109 case OP_FCVTN:
1110 case OP_FCVTN2:
1111 case OP_FCVTL:
1112 case OP_FCVTL2:
1113 encode_asimd_fcvt (inst);
1114 break;
1115 case OP_FCVTXN_S:
1116 encode_asisd_fcvtxn (inst);
1117 break;
1118 default: break;
1119 }
1120}
1121
1122/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1123static void
1124encode_sizeq (aarch64_inst *inst)
1125{
1126 aarch64_insn sizeq;
1127 enum aarch64_field_kind kind;
1128 int idx;
1129
1130 /* Get the index of the operand whose information we are going to use
1131 to encode the size and Q fields.
1132 This is deduced from the possible valid qualifier lists. */
1133 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1134 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1135 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1136 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1137 /* Q */
1138 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1139 /* size */
1140 if (inst->opcode->iclass == asisdlse
1141 || inst->opcode->iclass == asisdlsep
1142 || inst->opcode->iclass == asisdlso
1143 || inst->opcode->iclass == asisdlsop)
1144 kind = FLD_vldst_size;
1145 else
1146 kind = FLD_size;
1147 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1148}
1149
1150/* Opcodes that have fields shared by multiple operands are usually flagged
1151 with flags. In this function, we detect such flags and use the
1152 information in one of the related operands to do the encoding. The 'one'
1153 operand is not any operand but one of the operands that has the enough
1154 information for such an encoding. */
1155
1156static void
1157do_special_encoding (struct aarch64_inst *inst)
1158{
1159 int idx;
4ad3b7ef 1160 aarch64_insn value = 0;
a06ea964
NC
1161
1162 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1163
1164 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1165 if (inst->opcode->flags & F_COND)
1166 {
1167 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1168 }
1169 if (inst->opcode->flags & F_SF)
1170 {
1171 idx = select_operand_for_sf_field_coding (inst->opcode);
1172 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1173 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1174 ? 1 : 0;
1175 insert_field (FLD_sf, &inst->value, value, 0);
1176 if (inst->opcode->flags & F_N)
1177 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1178 }
ee804238
JW
1179 if (inst->opcode->flags & F_LSE_SZ)
1180 {
1181 idx = select_operand_for_sf_field_coding (inst->opcode);
1182 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1183 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1184 ? 1 : 0;
1185 insert_field (FLD_lse_sz, &inst->value, value, 0);
1186 }
a06ea964
NC
1187 if (inst->opcode->flags & F_SIZEQ)
1188 encode_sizeq (inst);
1189 if (inst->opcode->flags & F_FPTYPE)
1190 {
1191 idx = select_operand_for_fptype_field_coding (inst->opcode);
1192 switch (inst->operands[idx].qualifier)
1193 {
1194 case AARCH64_OPND_QLF_S_S: value = 0; break;
1195 case AARCH64_OPND_QLF_S_D: value = 1; break;
1196 case AARCH64_OPND_QLF_S_H: value = 3; break;
1197 default: assert (0);
1198 }
1199 insert_field (FLD_type, &inst->value, value, 0);
1200 }
1201 if (inst->opcode->flags & F_SSIZE)
1202 {
1203 enum aarch64_opnd_qualifier qualifier;
1204 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1205 qualifier = inst->operands[idx].qualifier;
1206 assert (qualifier >= AARCH64_OPND_QLF_S_B
1207 && qualifier <= AARCH64_OPND_QLF_S_Q);
1208 value = aarch64_get_qualifier_standard_value (qualifier);
1209 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1210 }
1211 if (inst->opcode->flags & F_T)
1212 {
1213 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1214 aarch64_field field = {0, 0};
1215 enum aarch64_opnd_qualifier qualifier;
1216
1217 idx = 0;
1218 qualifier = inst->operands[idx].qualifier;
1219 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1220 == AARCH64_OPND_CLASS_SIMD_REG
1221 && qualifier >= AARCH64_OPND_QLF_V_8B
1222 && qualifier <= AARCH64_OPND_QLF_V_2D);
1223 /* imm5<3:0> q <t>
1224 0000 x reserved
1225 xxx1 0 8b
1226 xxx1 1 16b
1227 xx10 0 4h
1228 xx10 1 8h
1229 x100 0 2s
1230 x100 1 4s
1231 1000 0 reserved
1232 1000 1 2d */
1233 value = aarch64_get_qualifier_standard_value (qualifier);
1234 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1235 num = (int) value >> 1;
1236 assert (num >= 0 && num <= 3);
1237 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1238 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1239 }
1240 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1241 {
1242 /* Use Rt to encode in the case of e.g.
1243 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1244 enum aarch64_opnd_qualifier qualifier;
1245 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1246 if (idx == -1)
1247 /* Otherwise use the result operand, which has to be a integer
1248 register. */
1249 idx = 0;
1250 assert (idx == 0 || idx == 1);
1251 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1252 == AARCH64_OPND_CLASS_INT_REG);
1253 qualifier = inst->operands[idx].qualifier;
1254 insert_field (FLD_Q, &inst->value,
1255 aarch64_get_qualifier_standard_value (qualifier), 0);
1256 }
1257 if (inst->opcode->flags & F_LDS_SIZE)
1258 {
1259 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1260 enum aarch64_opnd_qualifier qualifier;
1261 aarch64_field field = {0, 0};
1262 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1263 == AARCH64_OPND_CLASS_INT_REG);
1264 gen_sub_field (FLD_opc, 0, 1, &field);
1265 qualifier = inst->operands[0].qualifier;
1266 insert_field_2 (&field, &inst->value,
1267 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1268 }
1269 /* Miscellaneous encoding as the last step. */
1270 if (inst->opcode->flags & F_MISC)
1271 do_misc_encoding (inst);
1272
1273 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1274}
1275
1276/* Converters converting an alias opcode instruction to its real form. */
1277
1278/* ROR <Wd>, <Ws>, #<shift>
1279 is equivalent to:
1280 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1281static void
1282convert_ror_to_extr (aarch64_inst *inst)
1283{
1284 copy_operand_info (inst, 3, 2);
1285 copy_operand_info (inst, 2, 1);
1286}
1287
e30181a5
YZ
1288/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1289 is equivalent to:
1290 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1291static void
1292convert_xtl_to_shll (aarch64_inst *inst)
1293{
1294 inst->operands[2].qualifier = inst->operands[1].qualifier;
1295 inst->operands[2].imm.value = 0;
1296}
1297
a06ea964
NC
1298/* Convert
1299 LSR <Xd>, <Xn>, #<shift>
1300 to
1301 UBFM <Xd>, <Xn>, #<shift>, #63. */
1302static void
1303convert_sr_to_bfm (aarch64_inst *inst)
1304{
1305 inst->operands[3].imm.value =
1306 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1307}
1308
1309/* Convert MOV to ORR. */
1310static void
1311convert_mov_to_orr (aarch64_inst *inst)
1312{
1313 /* MOV <Vd>.<T>, <Vn>.<T>
1314 is equivalent to:
1315 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1316 copy_operand_info (inst, 2, 1);
1317}
1318
1319/* When <imms> >= <immr>, the instruction written:
1320 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1321 is equivalent to:
1322 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1323
1324static void
1325convert_bfx_to_bfm (aarch64_inst *inst)
1326{
1327 int64_t lsb, width;
1328
1329 /* Convert the operand. */
1330 lsb = inst->operands[2].imm.value;
1331 width = inst->operands[3].imm.value;
1332 inst->operands[2].imm.value = lsb;
1333 inst->operands[3].imm.value = lsb + width - 1;
1334}
1335
1336/* When <imms> < <immr>, the instruction written:
1337 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1338 is equivalent to:
1339 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1340
1341static void
1342convert_bfi_to_bfm (aarch64_inst *inst)
1343{
1344 int64_t lsb, width;
1345
1346 /* Convert the operand. */
1347 lsb = inst->operands[2].imm.value;
1348 width = inst->operands[3].imm.value;
1349 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1350 {
1351 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1352 inst->operands[3].imm.value = width - 1;
1353 }
1354 else
1355 {
1356 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1357 inst->operands[3].imm.value = width - 1;
1358 }
1359}
1360
d685192a
MW
1361/* The instruction written:
1362 BFC <Xd>, #<lsb>, #<width>
1363 is equivalent to:
1364 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1365
1366static void
1367convert_bfc_to_bfm (aarch64_inst *inst)
1368{
1369 int64_t lsb, width;
1370
1371 /* Insert XZR. */
1372 copy_operand_info (inst, 3, 2);
1373 copy_operand_info (inst, 2, 1);
1374 copy_operand_info (inst, 2, 0);
1375 inst->operands[1].reg.regno = 0x1f;
1376
1377 /* Convert the immedate operand. */
1378 lsb = inst->operands[2].imm.value;
1379 width = inst->operands[3].imm.value;
1380 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1381 {
1382 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1383 inst->operands[3].imm.value = width - 1;
1384 }
1385 else
1386 {
1387 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1388 inst->operands[3].imm.value = width - 1;
1389 }
1390}
1391
a06ea964
NC
1392/* The instruction written:
1393 LSL <Xd>, <Xn>, #<shift>
1394 is equivalent to:
1395 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1396
1397static void
1398convert_lsl_to_ubfm (aarch64_inst *inst)
1399{
1400 int64_t shift = inst->operands[2].imm.value;
1401
1402 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1403 {
1404 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1405 inst->operands[3].imm.value = 31 - shift;
1406 }
1407 else
1408 {
1409 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1410 inst->operands[3].imm.value = 63 - shift;
1411 }
1412}
1413
1414/* CINC <Wd>, <Wn>, <cond>
1415 is equivalent to:
1416 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1417
1418static void
1419convert_to_csel (aarch64_inst *inst)
1420{
1421 copy_operand_info (inst, 3, 2);
1422 copy_operand_info (inst, 2, 1);
1423 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1424}
1425
1426/* CSET <Wd>, <cond>
1427 is equivalent to:
1428 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1429
1430static void
1431convert_cset_to_csinc (aarch64_inst *inst)
1432{
1433 copy_operand_info (inst, 3, 1);
1434 copy_operand_info (inst, 2, 0);
1435 copy_operand_info (inst, 1, 0);
1436 inst->operands[1].reg.regno = 0x1f;
1437 inst->operands[2].reg.regno = 0x1f;
1438 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1439}
1440
1441/* MOV <Wd>, #<imm>
1442 is equivalent to:
1443 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1444
1445static void
1446convert_mov_to_movewide (aarch64_inst *inst)
1447{
1448 int is32;
1449 uint32_t shift_amount;
1450 uint64_t value;
1451
1452 switch (inst->opcode->op)
1453 {
1454 case OP_MOV_IMM_WIDE:
1455 value = inst->operands[1].imm.value;
1456 break;
1457 case OP_MOV_IMM_WIDEN:
1458 value = ~inst->operands[1].imm.value;
1459 break;
1460 default:
1461 assert (0);
1462 }
1463 inst->operands[1].type = AARCH64_OPND_HALF;
1464 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1465 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1466 /* The constraint check should have guaranteed this wouldn't happen. */
1467 assert (0);
a06ea964
NC
1468 value >>= shift_amount;
1469 value &= 0xffff;
1470 inst->operands[1].imm.value = value;
1471 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1472 inst->operands[1].shifter.amount = shift_amount;
1473}
1474
1475/* MOV <Wd>, #<imm>
1476 is equivalent to:
1477 ORR <Wd>, WZR, #<imm>. */
1478
1479static void
1480convert_mov_to_movebitmask (aarch64_inst *inst)
1481{
1482 copy_operand_info (inst, 2, 1);
1483 inst->operands[1].reg.regno = 0x1f;
1484 inst->operands[1].skip = 0;
1485}
1486
1487/* Some alias opcodes are assembled by being converted to their real-form. */
1488
1489static void
1490convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1491{
1492 const aarch64_opcode *alias = inst->opcode;
1493
1494 if ((alias->flags & F_CONV) == 0)
1495 goto convert_to_real_return;
1496
1497 switch (alias->op)
1498 {
1499 case OP_ASR_IMM:
1500 case OP_LSR_IMM:
1501 convert_sr_to_bfm (inst);
1502 break;
1503 case OP_LSL_IMM:
1504 convert_lsl_to_ubfm (inst);
1505 break;
1506 case OP_CINC:
1507 case OP_CINV:
1508 case OP_CNEG:
1509 convert_to_csel (inst);
1510 break;
1511 case OP_CSET:
1512 case OP_CSETM:
1513 convert_cset_to_csinc (inst);
1514 break;
1515 case OP_UBFX:
1516 case OP_BFXIL:
1517 case OP_SBFX:
1518 convert_bfx_to_bfm (inst);
1519 break;
1520 case OP_SBFIZ:
1521 case OP_BFI:
1522 case OP_UBFIZ:
1523 convert_bfi_to_bfm (inst);
1524 break;
d685192a
MW
1525 case OP_BFC:
1526 convert_bfc_to_bfm (inst);
1527 break;
a06ea964
NC
1528 case OP_MOV_V:
1529 convert_mov_to_orr (inst);
1530 break;
1531 case OP_MOV_IMM_WIDE:
1532 case OP_MOV_IMM_WIDEN:
1533 convert_mov_to_movewide (inst);
1534 break;
1535 case OP_MOV_IMM_LOG:
1536 convert_mov_to_movebitmask (inst);
1537 break;
1538 case OP_ROR_IMM:
1539 convert_ror_to_extr (inst);
1540 break;
e30181a5
YZ
1541 case OP_SXTL:
1542 case OP_SXTL2:
1543 case OP_UXTL:
1544 case OP_UXTL2:
1545 convert_xtl_to_shll (inst);
1546 break;
a06ea964
NC
1547 default:
1548 break;
1549 }
1550
1551convert_to_real_return:
1552 aarch64_replace_opcode (inst, real);
1553}
1554
1555/* Encode *INST_ORI of the opcode code OPCODE.
1556 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1557 matched operand qualifier sequence in *QLF_SEQ. */
1558
1559int
1560aarch64_opcode_encode (const aarch64_opcode *opcode,
1561 const aarch64_inst *inst_ori, aarch64_insn *code,
1562 aarch64_opnd_qualifier_t *qlf_seq,
1563 aarch64_operand_error *mismatch_detail)
1564{
1565 int i;
1566 const aarch64_opcode *aliased;
1567 aarch64_inst copy, *inst;
1568
1569 DEBUG_TRACE ("enter with %s", opcode->name);
1570
1571 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1572 copy = *inst_ori;
1573 inst = &copy;
1574
1575 assert (inst->opcode == NULL || inst->opcode == opcode);
1576 if (inst->opcode == NULL)
1577 inst->opcode = opcode;
1578
1579 /* Constrain the operands.
1580 After passing this, the encoding is guaranteed to succeed. */
1581 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1582 {
1583 DEBUG_TRACE ("FAIL since operand constraint not met");
1584 return 0;
1585 }
1586
1587 /* Get the base value.
1588 Note: this has to be before the aliasing handling below in order to
1589 get the base value from the alias opcode before we move on to the
1590 aliased opcode for encoding. */
1591 inst->value = opcode->opcode;
1592
1593 /* No need to do anything else if the opcode does not have any operand. */
1594 if (aarch64_num_of_operands (opcode) == 0)
1595 goto encoding_exit;
1596
1597 /* Assign operand indexes and check types. Also put the matched
1598 operand qualifiers in *QLF_SEQ to return. */
1599 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1600 {
1601 assert (opcode->operands[i] == inst->operands[i].type);
1602 inst->operands[i].idx = i;
1603 if (qlf_seq != NULL)
1604 *qlf_seq = inst->operands[i].qualifier;
1605 }
1606
1607 aliased = aarch64_find_real_opcode (opcode);
1608 /* If the opcode is an alias and it does not ask for direct encoding by
1609 itself, the instruction will be transformed to the form of real opcode
1610 and the encoding will be carried out using the rules for the aliased
1611 opcode. */
1612 if (aliased != NULL && (opcode->flags & F_CONV))
1613 {
1614 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1615 aliased->name, opcode->name);
1616 /* Convert the operands to the form of the real opcode. */
1617 convert_to_real (inst, aliased);
1618 opcode = aliased;
1619 }
1620
1621 aarch64_opnd_info *info = inst->operands;
1622
1623 /* Call the inserter of each operand. */
1624 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1625 {
1626 const aarch64_operand *opnd;
1627 enum aarch64_opnd type = opcode->operands[i];
1628 if (type == AARCH64_OPND_NIL)
1629 break;
1630 if (info->skip)
1631 {
1632 DEBUG_TRACE ("skip the incomplete operand %d", i);
1633 continue;
1634 }
1635 opnd = &aarch64_operands[type];
1636 if (operand_has_inserter (opnd))
1637 aarch64_insert_operand (opnd, info, &inst->value, inst);
1638 }
1639
1640 /* Call opcode encoders indicated by flags. */
1641 if (opcode_has_special_coder (opcode))
1642 do_special_encoding (inst);
1643
1644encoding_exit:
1645 DEBUG_TRACE ("exit with %s", opcode->name);
1646
1647 *code = inst->value;
1648
1649 return 1;
1650}