]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - opcodes/aarch64-asm.c
[AArch64] Add ARMv8.3 weaker release consistency load instructions
[thirdparty/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
6f2750fe 2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147}
148
149/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150const char *
151aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154{
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160}
161
162/* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164const char *
165aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168{
4ad3b7ef 169 aarch64_insn value = 0;
a06ea964
NC
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203}
204
205/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207const char *
208aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211{
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228}
229
230/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232const char *
233aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236{
237 aarch64_field field = {0, 0};
4ad3b7ef
KT
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276}
277
278/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281const char *
282aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285{
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331}
332
333/* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335const char *
336aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339{
340 int64_t imm;
a06ea964
NC
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
b5464a68 345 insert_all_fields (self, code, imm);
a06ea964
NC
346 return NULL;
347}
348
349/* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351const char *
352aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 353 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
354{
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360}
361
362/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364const char *
365aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
a06ea964
NC
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
a06ea964
NC
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
aa2aa4c6
RS
418}
419
420/* Insert fields for an 8-bit floating-point immediate. */
421const char *
422aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
423 aarch64_insn *code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
425{
426 insert_all_fields (self, code, info->imm.value);
427 return NULL;
a06ea964
NC
428}
429
430/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
432const char *
433aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
436{
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
438 return NULL;
439}
440
441/* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
443const char *
444aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
446{
447 /* shift */
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
452 return NULL;
453}
454
e950b345
RS
455/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
456 the operand should be inverted before encoding. */
457static const char *
458aarch64_ins_limm_1 (const aarch64_operand *self,
459 const aarch64_opnd_info *info, aarch64_insn *code,
460 const aarch64_inst *inst, bfd_boolean invert_p)
a06ea964
NC
461{
462 aarch64_insn value;
463 uint64_t imm = info->imm.value;
42408347 464 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964 465
e950b345 466 if (invert_p)
a06ea964 467 imm = ~imm;
42408347 468 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
a06ea964
NC
469 /* The constraint check should have guaranteed this wouldn't happen. */
470 assert (0);
471
472 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
473 self->fields[0]);
474 return NULL;
475}
476
e950b345
RS
477/* Insert logical/bitmask immediate for e.g. the last operand in
478 ORR <Wd|WSP>, <Wn>, #<imm>. */
479const char *
480aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
481 aarch64_insn *code, const aarch64_inst *inst)
482{
483 return aarch64_ins_limm_1 (self, info, code, inst,
484 inst->opcode->op == OP_BIC);
485}
486
487/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
488const char *
489aarch64_ins_inv_limm (const aarch64_operand *self,
490 const aarch64_opnd_info *info, aarch64_insn *code,
491 const aarch64_inst *inst)
492{
493 return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
494}
495
a06ea964
NC
496/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
497 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
498const char *
499aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
500 aarch64_insn *code, const aarch64_inst *inst)
501{
4ad3b7ef 502 aarch64_insn value = 0;
a06ea964
NC
503
504 assert (info->idx == 0);
505
506 /* Rt */
507 aarch64_ins_regno (self, info, code, inst);
508 if (inst->opcode->iclass == ldstpair_indexed
509 || inst->opcode->iclass == ldstnapair_offs
510 || inst->opcode->iclass == ldstpair_off
511 || inst->opcode->iclass == loadlit)
512 {
513 /* size */
514 switch (info->qualifier)
515 {
516 case AARCH64_OPND_QLF_S_S: value = 0; break;
517 case AARCH64_OPND_QLF_S_D: value = 1; break;
518 case AARCH64_OPND_QLF_S_Q: value = 2; break;
519 default: assert (0);
520 }
521 insert_field (FLD_ldst_size, code, value, 0);
522 }
523 else
524 {
525 /* opc[1]:size */
526 value = aarch64_get_qualifier_standard_value (info->qualifier);
527 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
528 }
529
530 return NULL;
531}
532
533/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
534const char *
535aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
536 const aarch64_opnd_info *info, aarch64_insn *code,
537 const aarch64_inst *inst ATTRIBUTE_UNUSED)
538{
539 /* Rn */
540 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
541 return NULL;
542}
543
544/* Encode the address operand for e.g.
545 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
546const char *
547aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
548 const aarch64_opnd_info *info, aarch64_insn *code,
549 const aarch64_inst *inst ATTRIBUTE_UNUSED)
550{
551 aarch64_insn S;
552 enum aarch64_modifier_kind kind = info->shifter.kind;
553
554 /* Rn */
555 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
556 /* Rm */
557 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
558 /* option */
559 if (kind == AARCH64_MOD_LSL)
560 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
561 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
562 /* S */
563 if (info->qualifier != AARCH64_OPND_QLF_S_B)
564 S = info->shifter.amount != 0;
565 else
566 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
567 S <amount>
568 0 [absent]
569 1 #0
570 Must be #0 if <extend> is explicitly LSL. */
571 S = info->shifter.operator_present && info->shifter.amount_present;
572 insert_field (FLD_S, code, S, 0);
573
574 return NULL;
575}
576
577/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
578const char *
579aarch64_ins_addr_simm (const aarch64_operand *self,
580 const aarch64_opnd_info *info,
062f38fa
RE
581 aarch64_insn *code,
582 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
583{
584 int imm;
585
586 /* Rn */
587 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
588 /* simm (imm9 or imm7) */
589 imm = info->addr.offset.imm;
590 if (self->fields[0] == FLD_imm7)
591 /* scaled immediate in ld/st pair instructions.. */
592 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
593 insert_field (self->fields[0], code, imm, 0);
594 /* pre/post- index */
595 if (info->addr.writeback)
596 {
597 assert (inst->opcode->iclass != ldst_unscaled
598 && inst->opcode->iclass != ldstnapair_offs
599 && inst->opcode->iclass != ldstpair_off
600 && inst->opcode->iclass != ldst_unpriv);
601 assert (info->addr.preind != info->addr.postind);
602 if (info->addr.preind)
603 insert_field (self->fields[1], code, 1, 0);
604 }
605
606 return NULL;
607}
608
3f06e550
SN
609/* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
610const char *
611aarch64_ins_addr_simm10 (const aarch64_operand *self,
612 const aarch64_opnd_info *info,
613 aarch64_insn *code,
614 const aarch64_inst *inst ATTRIBUTE_UNUSED)
615{
616 int imm;
617
618 /* Rn */
619 insert_field (self->fields[0], code, info->addr.base_regno, 0);
620 /* simm10 */
621 imm = info->addr.offset.imm >> 3;
622 insert_field (self->fields[1], code, imm >> 9, 0);
623 insert_field (self->fields[2], code, imm, 0);
624 /* writeback */
625 if (info->addr.writeback)
626 {
627 assert (info->addr.preind == 1 && info->addr.postind == 0);
628 insert_field (self->fields[3], code, 1, 0);
629 }
630 return NULL;
631}
632
a06ea964
NC
633/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
634const char *
635aarch64_ins_addr_uimm12 (const aarch64_operand *self,
636 const aarch64_opnd_info *info,
637 aarch64_insn *code,
638 const aarch64_inst *inst ATTRIBUTE_UNUSED)
639{
640 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
641
642 /* Rn */
643 insert_field (self->fields[0], code, info->addr.base_regno, 0);
644 /* uimm12 */
645 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
646 return NULL;
647}
648
649/* Encode the address operand for e.g.
650 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
651const char *
652aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
653 const aarch64_opnd_info *info, aarch64_insn *code,
654 const aarch64_inst *inst ATTRIBUTE_UNUSED)
655{
656 /* Rn */
657 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
658 /* Rm | #<amount> */
659 if (info->addr.offset.is_reg)
660 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
661 else
662 insert_field (FLD_Rm, code, 0x1f, 0);
663 return NULL;
664}
665
666/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
667const char *
668aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
669 const aarch64_opnd_info *info, aarch64_insn *code,
670 const aarch64_inst *inst ATTRIBUTE_UNUSED)
671{
672 /* cond */
673 insert_field (FLD_cond, code, info->cond->value, 0);
674 return NULL;
675}
676
677/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
678const char *
679aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
680 const aarch64_opnd_info *info, aarch64_insn *code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
682{
683 /* op0:op1:CRn:CRm:op2 */
684 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
685 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
686 return NULL;
687}
688
689/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
690const char *
691aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
692 const aarch64_opnd_info *info, aarch64_insn *code,
693 const aarch64_inst *inst ATTRIBUTE_UNUSED)
694{
695 /* op1:op2 */
696 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
697 FLD_op2, FLD_op1);
698 return NULL;
699}
700
701/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
702const char *
703aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
704 const aarch64_opnd_info *info, aarch64_insn *code,
705 const aarch64_inst *inst ATTRIBUTE_UNUSED)
706{
707 /* op1:CRn:CRm:op2 */
708 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
709 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
710 return NULL;
711}
712
713/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
714
715const char *
716aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
717 const aarch64_opnd_info *info, aarch64_insn *code,
718 const aarch64_inst *inst ATTRIBUTE_UNUSED)
719{
720 /* CRm */
721 insert_field (FLD_CRm, code, info->barrier->value, 0);
722 return NULL;
723}
724
725/* Encode the prefetch operation option operand for e.g.
726 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
727
728const char *
729aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
730 const aarch64_opnd_info *info, aarch64_insn *code,
731 const aarch64_inst *inst ATTRIBUTE_UNUSED)
732{
733 /* prfop in Rt */
734 insert_field (FLD_Rt, code, info->prfop->value, 0);
735 return NULL;
736}
737
9ed608f9
MW
738/* Encode the hint number for instructions that alias HINT but take an
739 operand. */
740
741const char *
742aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
743 const aarch64_opnd_info *info, aarch64_insn *code,
744 const aarch64_inst *inst ATTRIBUTE_UNUSED)
745{
746 /* CRm:op2. */
747 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
748 return NULL;
749}
750
a06ea964
NC
751/* Encode the extended register operand for e.g.
752 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
753const char *
754aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
755 const aarch64_opnd_info *info, aarch64_insn *code,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED)
757{
758 enum aarch64_modifier_kind kind;
759
760 /* Rm */
761 insert_field (FLD_Rm, code, info->reg.regno, 0);
762 /* option */
763 kind = info->shifter.kind;
764 if (kind == AARCH64_MOD_LSL)
765 kind = info->qualifier == AARCH64_OPND_QLF_W
766 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
767 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
768 /* imm3 */
769 insert_field (FLD_imm3, code, info->shifter.amount, 0);
770
771 return NULL;
772}
773
774/* Encode the shifted register operand for e.g.
775 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
776const char *
777aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
778 const aarch64_opnd_info *info, aarch64_insn *code,
779 const aarch64_inst *inst ATTRIBUTE_UNUSED)
780{
781 /* Rm */
782 insert_field (FLD_Rm, code, info->reg.regno, 0);
783 /* shift */
784 insert_field (FLD_shift, code,
785 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
786 /* imm6 */
787 insert_field (FLD_imm6, code, info->shifter.amount, 0);
788
789 return NULL;
790}
791
98907a70
RS
792/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
793 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
794 SELF's operand-dependent value. fields[0] specifies the field that
795 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
796const char *
797aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
798 const aarch64_opnd_info *info,
799 aarch64_insn *code,
800 const aarch64_inst *inst ATTRIBUTE_UNUSED)
801{
802 int factor = 1 + get_operand_specific_data (self);
803 insert_field (self->fields[0], code, info->addr.base_regno, 0);
804 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
805 return NULL;
806}
807
808/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
809 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
810 SELF's operand-dependent value. fields[0] specifies the field that
811 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
812const char *
813aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
814 const aarch64_opnd_info *info,
815 aarch64_insn *code,
816 const aarch64_inst *inst ATTRIBUTE_UNUSED)
817{
818 int factor = 1 + get_operand_specific_data (self);
819 insert_field (self->fields[0], code, info->addr.base_regno, 0);
820 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
821 return NULL;
822}
823
824/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
825 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
826 SELF's operand-dependent value. fields[0] specifies the field that
827 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
828 and imm3 fields, with imm3 being the less-significant part. */
829const char *
830aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
831 const aarch64_opnd_info *info,
832 aarch64_insn *code,
833 const aarch64_inst *inst ATTRIBUTE_UNUSED)
834{
835 int factor = 1 + get_operand_specific_data (self);
836 insert_field (self->fields[0], code, info->addr.base_regno, 0);
837 insert_fields (code, info->addr.offset.imm / factor, 0,
838 2, FLD_imm3, FLD_SVE_imm6);
839 return NULL;
840}
841
4df068de
RS
842/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
843 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
844 value. fields[0] specifies the base register field. */
845const char *
846aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
847 const aarch64_opnd_info *info, aarch64_insn *code,
848 const aarch64_inst *inst ATTRIBUTE_UNUSED)
849{
850 int factor = 1 << get_operand_specific_data (self);
851 insert_field (self->fields[0], code, info->addr.base_regno, 0);
852 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
853 return NULL;
854}
855
856/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
857 is SELF's operand-dependent value. fields[0] specifies the base
858 register field and fields[1] specifies the offset register field. */
859const char *
860aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
861 const aarch64_opnd_info *info, aarch64_insn *code,
862 const aarch64_inst *inst ATTRIBUTE_UNUSED)
863{
864 insert_field (self->fields[0], code, info->addr.base_regno, 0);
865 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
866 return NULL;
867}
868
869/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
870 <shift> is SELF's operand-dependent value. fields[0] specifies the
871 base register field, fields[1] specifies the offset register field and
872 fields[2] is a single-bit field that selects SXTW over UXTW. */
873const char *
874aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
875 const aarch64_opnd_info *info, aarch64_insn *code,
876 const aarch64_inst *inst ATTRIBUTE_UNUSED)
877{
878 insert_field (self->fields[0], code, info->addr.base_regno, 0);
879 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
880 if (info->shifter.kind == AARCH64_MOD_UXTW)
881 insert_field (self->fields[2], code, 0, 0);
882 else
883 insert_field (self->fields[2], code, 1, 0);
884 return NULL;
885}
886
887/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
888 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
889 fields[0] specifies the base register field. */
890const char *
891aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
892 const aarch64_opnd_info *info, aarch64_insn *code,
893 const aarch64_inst *inst ATTRIBUTE_UNUSED)
894{
895 int factor = 1 << get_operand_specific_data (self);
896 insert_field (self->fields[0], code, info->addr.base_regno, 0);
897 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
898 return NULL;
899}
900
901/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
902 where <modifier> is fixed by the instruction and where <msz> is a
903 2-bit unsigned number. fields[0] specifies the base register field
904 and fields[1] specifies the offset register field. */
905static const char *
906aarch64_ext_sve_addr_zz (const aarch64_operand *self,
907 const aarch64_opnd_info *info, aarch64_insn *code)
908{
909 insert_field (self->fields[0], code, info->addr.base_regno, 0);
910 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
911 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
912 return NULL;
913}
914
915/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
916 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
917 field and fields[1] specifies the offset register field. */
918const char *
919aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
920 const aarch64_opnd_info *info, aarch64_insn *code,
921 const aarch64_inst *inst ATTRIBUTE_UNUSED)
922{
923 return aarch64_ext_sve_addr_zz (self, info, code);
924}
925
926/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
927 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
928 field and fields[1] specifies the offset register field. */
929const char *
930aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
931 const aarch64_opnd_info *info,
932 aarch64_insn *code,
933 const aarch64_inst *inst ATTRIBUTE_UNUSED)
934{
935 return aarch64_ext_sve_addr_zz (self, info, code);
936}
937
938/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
939 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
940 field and fields[1] specifies the offset register field. */
941const char *
942aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
943 const aarch64_opnd_info *info,
944 aarch64_insn *code,
945 const aarch64_inst *inst ATTRIBUTE_UNUSED)
946{
947 return aarch64_ext_sve_addr_zz (self, info, code);
948}
949
e950b345
RS
950/* Encode an SVE ADD/SUB immediate. */
951const char *
952aarch64_ins_sve_aimm (const aarch64_operand *self,
953 const aarch64_opnd_info *info, aarch64_insn *code,
954 const aarch64_inst *inst ATTRIBUTE_UNUSED)
955{
956 if (info->shifter.amount == 8)
957 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
958 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
959 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
960 else
961 insert_all_fields (self, code, info->imm.value & 0xff);
962 return NULL;
963}
964
965/* Encode an SVE CPY/DUP immediate. */
966const char *
967aarch64_ins_sve_asimm (const aarch64_operand *self,
968 const aarch64_opnd_info *info, aarch64_insn *code,
969 const aarch64_inst *inst)
970{
971 return aarch64_ins_sve_aimm (self, info, code, inst);
972}
973
f11ad6bc
RS
974/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
975 array specifies which field to use for Zn. MM is encoded in the
976 concatenation of imm5 and SVE_tszh, with imm5 being the less
977 significant part. */
978const char *
979aarch64_ins_sve_index (const aarch64_operand *self,
980 const aarch64_opnd_info *info, aarch64_insn *code,
981 const aarch64_inst *inst ATTRIBUTE_UNUSED)
982{
983 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
984 insert_field (self->fields[0], code, info->reglane.regno, 0);
985 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
986 2, FLD_imm5, FLD_SVE_tszh);
987 return NULL;
988}
989
e950b345
RS
990/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
991const char *
992aarch64_ins_sve_limm_mov (const aarch64_operand *self,
993 const aarch64_opnd_info *info, aarch64_insn *code,
994 const aarch64_inst *inst)
995{
996 return aarch64_ins_limm (self, info, code, inst);
997}
998
f11ad6bc
RS
999/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1000 to use for Zn. */
1001const char *
1002aarch64_ins_sve_reglist (const aarch64_operand *self,
1003 const aarch64_opnd_info *info, aarch64_insn *code,
1004 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1005{
1006 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1007 return NULL;
1008}
1009
2442d846
RS
1010/* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1011 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1012 field. */
1013const char *
1014aarch64_ins_sve_scale (const aarch64_operand *self,
1015 const aarch64_opnd_info *info, aarch64_insn *code,
1016 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1017{
1018 insert_all_fields (self, code, info->imm.value);
1019 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1020 return NULL;
1021}
1022
e950b345
RS
1023/* Encode an SVE shift left immediate. */
1024const char *
1025aarch64_ins_sve_shlimm (const aarch64_operand *self,
1026 const aarch64_opnd_info *info, aarch64_insn *code,
1027 const aarch64_inst *inst)
1028{
1029 const aarch64_opnd_info *prev_operand;
1030 unsigned int esize;
1031
1032 assert (info->idx > 0);
1033 prev_operand = &inst->operands[info->idx - 1];
1034 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1035 insert_all_fields (self, code, 8 * esize + info->imm.value);
1036 return NULL;
1037}
1038
1039/* Encode an SVE shift right immediate. */
1040const char *
1041aarch64_ins_sve_shrimm (const aarch64_operand *self,
1042 const aarch64_opnd_info *info, aarch64_insn *code,
1043 const aarch64_inst *inst)
1044{
1045 const aarch64_opnd_info *prev_operand;
1046 unsigned int esize;
1047
1048 assert (info->idx > 0);
1049 prev_operand = &inst->operands[info->idx - 1];
1050 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1051 insert_all_fields (self, code, 16 * esize - info->imm.value);
1052 return NULL;
1053}
1054
165d4950
RS
1055/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1056 The fields array specifies which field to use. */
1057const char *
1058aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1059 const aarch64_opnd_info *info,
1060 aarch64_insn *code,
1061 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1062{
1063 if (info->imm.value == 0x3f000000)
1064 insert_field (self->fields[0], code, 0, 0);
1065 else
1066 insert_field (self->fields[0], code, 1, 0);
1067 return NULL;
1068}
1069
1070/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1071 The fields array specifies which field to use. */
1072const char *
1073aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1074 const aarch64_opnd_info *info,
1075 aarch64_insn *code,
1076 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1077{
1078 if (info->imm.value == 0x3f000000)
1079 insert_field (self->fields[0], code, 0, 0);
1080 else
1081 insert_field (self->fields[0], code, 1, 0);
1082 return NULL;
1083}
1084
1085/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1086 The fields array specifies which field to use. */
1087const char *
1088aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1089 const aarch64_opnd_info *info,
1090 aarch64_insn *code,
1091 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1092{
1093 if (info->imm.value == 0)
1094 insert_field (self->fields[0], code, 0, 0);
1095 else
1096 insert_field (self->fields[0], code, 1, 0);
1097 return NULL;
1098}
1099
a06ea964
NC
1100/* Miscellaneous encoding functions. */
1101
1102/* Encode size[0], i.e. bit 22, for
1103 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1104
1105static void
1106encode_asimd_fcvt (aarch64_inst *inst)
1107{
1108 aarch64_insn value;
1109 aarch64_field field = {0, 0};
1110 enum aarch64_opnd_qualifier qualifier;
1111
1112 switch (inst->opcode->op)
1113 {
1114 case OP_FCVTN:
1115 case OP_FCVTN2:
1116 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1117 qualifier = inst->operands[1].qualifier;
1118 break;
1119 case OP_FCVTL:
1120 case OP_FCVTL2:
1121 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1122 qualifier = inst->operands[0].qualifier;
1123 break;
1124 default:
1125 assert (0);
1126 }
1127 assert (qualifier == AARCH64_OPND_QLF_V_4S
1128 || qualifier == AARCH64_OPND_QLF_V_2D);
1129 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1130 gen_sub_field (FLD_size, 0, 1, &field);
1131 insert_field_2 (&field, &inst->value, value, 0);
1132}
1133
1134/* Encode size[0], i.e. bit 22, for
1135 e.g. FCVTXN <Vb><d>, <Va><n>. */
1136
1137static void
1138encode_asisd_fcvtxn (aarch64_inst *inst)
1139{
1140 aarch64_insn val = 1;
1141 aarch64_field field = {0, 0};
1142 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1143 gen_sub_field (FLD_size, 0, 1, &field);
1144 insert_field_2 (&field, &inst->value, val, 0);
1145}
1146
1147/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1148static void
1149encode_fcvt (aarch64_inst *inst)
1150{
1151 aarch64_insn val;
1152 const aarch64_field field = {15, 2};
1153
1154 /* opc dstsize */
1155 switch (inst->operands[0].qualifier)
1156 {
1157 case AARCH64_OPND_QLF_S_S: val = 0; break;
1158 case AARCH64_OPND_QLF_S_D: val = 1; break;
1159 case AARCH64_OPND_QLF_S_H: val = 3; break;
1160 default: abort ();
1161 }
1162 insert_field_2 (&field, &inst->value, val, 0);
1163
1164 return;
1165}
1166
116b6019
RS
1167/* Return the index in qualifiers_list that INST is using. Should only
1168 be called once the qualifiers are known to be valid. */
1169
1170static int
1171aarch64_get_variant (struct aarch64_inst *inst)
1172{
1173 int i, nops, variant;
1174
1175 nops = aarch64_num_of_operands (inst->opcode);
1176 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1177 {
1178 for (i = 0; i < nops; ++i)
1179 if (inst->opcode->qualifiers_list[variant][i]
1180 != inst->operands[i].qualifier)
1181 break;
1182 if (i == nops)
1183 return variant;
1184 }
1185 abort ();
1186}
1187
a06ea964
NC
1188/* Do miscellaneous encodings that are not common enough to be driven by
1189 flags. */
1190
1191static void
1192do_misc_encoding (aarch64_inst *inst)
1193{
c0890d26
RS
1194 unsigned int value;
1195
a06ea964
NC
1196 switch (inst->opcode->op)
1197 {
1198 case OP_FCVT:
1199 encode_fcvt (inst);
1200 break;
1201 case OP_FCVTN:
1202 case OP_FCVTN2:
1203 case OP_FCVTL:
1204 case OP_FCVTL2:
1205 encode_asimd_fcvt (inst);
1206 break;
1207 case OP_FCVTXN_S:
1208 encode_asisd_fcvtxn (inst);
1209 break;
c0890d26
RS
1210 case OP_MOV_P_P:
1211 case OP_MOVS_P_P:
1212 /* Copy Pn to Pm and Pg. */
1213 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1214 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1215 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1216 break;
1217 case OP_MOV_Z_P_Z:
1218 /* Copy Zd to Zm. */
1219 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1220 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1221 break;
1222 case OP_MOV_Z_V:
1223 /* Fill in the zero immediate. */
1224 insert_field (FLD_SVE_tsz, &inst->value,
1225 1 << aarch64_get_variant (inst), 0);
1226 break;
1227 case OP_MOV_Z_Z:
1228 /* Copy Zn to Zm. */
1229 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1230 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1231 break;
1232 case OP_MOV_Z_Zi:
1233 break;
1234 case OP_MOVM_P_P_P:
1235 /* Copy Pd to Pm. */
1236 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1237 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1238 break;
1239 case OP_MOVZS_P_P_P:
1240 case OP_MOVZ_P_P_P:
1241 /* Copy Pn to Pm. */
1242 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1243 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1244 break;
1245 case OP_NOTS_P_P_P_Z:
1246 case OP_NOT_P_P_P_Z:
1247 /* Copy Pg to Pm. */
1248 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1249 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1250 break;
a06ea964
NC
1251 default: break;
1252 }
1253}
1254
1255/* Encode the 'size' and 'Q' field for e.g. SHADD. */
1256static void
1257encode_sizeq (aarch64_inst *inst)
1258{
1259 aarch64_insn sizeq;
1260 enum aarch64_field_kind kind;
1261 int idx;
1262
1263 /* Get the index of the operand whose information we are going to use
1264 to encode the size and Q fields.
1265 This is deduced from the possible valid qualifier lists. */
1266 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1267 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1268 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1269 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1270 /* Q */
1271 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1272 /* size */
1273 if (inst->opcode->iclass == asisdlse
1274 || inst->opcode->iclass == asisdlsep
1275 || inst->opcode->iclass == asisdlso
1276 || inst->opcode->iclass == asisdlsop)
1277 kind = FLD_vldst_size;
1278 else
1279 kind = FLD_size;
1280 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1281}
1282
1283/* Opcodes that have fields shared by multiple operands are usually flagged
1284 with flags. In this function, we detect such flags and use the
1285 information in one of the related operands to do the encoding. The 'one'
1286 operand is not any operand but one of the operands that has the enough
1287 information for such an encoding. */
1288
1289static void
1290do_special_encoding (struct aarch64_inst *inst)
1291{
1292 int idx;
4ad3b7ef 1293 aarch64_insn value = 0;
a06ea964
NC
1294
1295 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1296
1297 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1298 if (inst->opcode->flags & F_COND)
1299 {
1300 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1301 }
1302 if (inst->opcode->flags & F_SF)
1303 {
1304 idx = select_operand_for_sf_field_coding (inst->opcode);
1305 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1306 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1307 ? 1 : 0;
1308 insert_field (FLD_sf, &inst->value, value, 0);
1309 if (inst->opcode->flags & F_N)
1310 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1311 }
ee804238
JW
1312 if (inst->opcode->flags & F_LSE_SZ)
1313 {
1314 idx = select_operand_for_sf_field_coding (inst->opcode);
1315 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1316 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1317 ? 1 : 0;
1318 insert_field (FLD_lse_sz, &inst->value, value, 0);
1319 }
a06ea964
NC
1320 if (inst->opcode->flags & F_SIZEQ)
1321 encode_sizeq (inst);
1322 if (inst->opcode->flags & F_FPTYPE)
1323 {
1324 idx = select_operand_for_fptype_field_coding (inst->opcode);
1325 switch (inst->operands[idx].qualifier)
1326 {
1327 case AARCH64_OPND_QLF_S_S: value = 0; break;
1328 case AARCH64_OPND_QLF_S_D: value = 1; break;
1329 case AARCH64_OPND_QLF_S_H: value = 3; break;
1330 default: assert (0);
1331 }
1332 insert_field (FLD_type, &inst->value, value, 0);
1333 }
1334 if (inst->opcode->flags & F_SSIZE)
1335 {
1336 enum aarch64_opnd_qualifier qualifier;
1337 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1338 qualifier = inst->operands[idx].qualifier;
1339 assert (qualifier >= AARCH64_OPND_QLF_S_B
1340 && qualifier <= AARCH64_OPND_QLF_S_Q);
1341 value = aarch64_get_qualifier_standard_value (qualifier);
1342 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1343 }
1344 if (inst->opcode->flags & F_T)
1345 {
1346 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1347 aarch64_field field = {0, 0};
1348 enum aarch64_opnd_qualifier qualifier;
1349
1350 idx = 0;
1351 qualifier = inst->operands[idx].qualifier;
1352 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1353 == AARCH64_OPND_CLASS_SIMD_REG
1354 && qualifier >= AARCH64_OPND_QLF_V_8B
1355 && qualifier <= AARCH64_OPND_QLF_V_2D);
1356 /* imm5<3:0> q <t>
1357 0000 x reserved
1358 xxx1 0 8b
1359 xxx1 1 16b
1360 xx10 0 4h
1361 xx10 1 8h
1362 x100 0 2s
1363 x100 1 4s
1364 1000 0 reserved
1365 1000 1 2d */
1366 value = aarch64_get_qualifier_standard_value (qualifier);
1367 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1368 num = (int) value >> 1;
1369 assert (num >= 0 && num <= 3);
1370 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1371 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1372 }
1373 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1374 {
1375 /* Use Rt to encode in the case of e.g.
1376 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1377 enum aarch64_opnd_qualifier qualifier;
1378 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1379 if (idx == -1)
1380 /* Otherwise use the result operand, which has to be a integer
1381 register. */
1382 idx = 0;
1383 assert (idx == 0 || idx == 1);
1384 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1385 == AARCH64_OPND_CLASS_INT_REG);
1386 qualifier = inst->operands[idx].qualifier;
1387 insert_field (FLD_Q, &inst->value,
1388 aarch64_get_qualifier_standard_value (qualifier), 0);
1389 }
1390 if (inst->opcode->flags & F_LDS_SIZE)
1391 {
1392 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1393 enum aarch64_opnd_qualifier qualifier;
1394 aarch64_field field = {0, 0};
1395 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1396 == AARCH64_OPND_CLASS_INT_REG);
1397 gen_sub_field (FLD_opc, 0, 1, &field);
1398 qualifier = inst->operands[0].qualifier;
1399 insert_field_2 (&field, &inst->value,
1400 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1401 }
1402 /* Miscellaneous encoding as the last step. */
1403 if (inst->opcode->flags & F_MISC)
1404 do_misc_encoding (inst);
1405
1406 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1407}
1408
116b6019
RS
1409/* Some instructions (including all SVE ones) use the instruction class
1410 to describe how a qualifiers_list index is represented in the instruction
1411 encoding. If INST is such an instruction, encode the chosen qualifier
1412 variant. */
1413
1414static void
1415aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1416{
1417 switch (inst->opcode->iclass)
1418 {
1419 case sve_cpy:
1420 insert_fields (&inst->value, aarch64_get_variant (inst),
1421 0, 2, FLD_SVE_M_14, FLD_size);
1422 break;
1423
1424 case sve_index:
1425 case sve_shift_pred:
1426 case sve_shift_unpred:
1427 /* For indices and shift amounts, the variant is encoded as
1428 part of the immediate. */
1429 break;
1430
1431 case sve_limm:
1432 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1433 and depend on the immediate. They don't have a separate
1434 encoding. */
1435 break;
1436
1437 case sve_misc:
1438 /* sve_misc instructions have only a single variant. */
1439 break;
1440
1441 case sve_movprfx:
1442 insert_fields (&inst->value, aarch64_get_variant (inst),
1443 0, 2, FLD_SVE_M_16, FLD_size);
1444 break;
1445
1446 case sve_pred_zm:
1447 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1448 break;
1449
1450 case sve_size_bhs:
1451 case sve_size_bhsd:
1452 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1453 break;
1454
1455 case sve_size_hsd:
1456 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1457 break;
1458
1459 case sve_size_sd:
1460 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1461 break;
1462
1463 default:
1464 break;
1465 }
1466}
1467
a06ea964
NC
1468/* Converters converting an alias opcode instruction to its real form. */
1469
1470/* ROR <Wd>, <Ws>, #<shift>
1471 is equivalent to:
1472 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1473static void
1474convert_ror_to_extr (aarch64_inst *inst)
1475{
1476 copy_operand_info (inst, 3, 2);
1477 copy_operand_info (inst, 2, 1);
1478}
1479
e30181a5
YZ
1480/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1481 is equivalent to:
1482 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1483static void
1484convert_xtl_to_shll (aarch64_inst *inst)
1485{
1486 inst->operands[2].qualifier = inst->operands[1].qualifier;
1487 inst->operands[2].imm.value = 0;
1488}
1489
a06ea964
NC
1490/* Convert
1491 LSR <Xd>, <Xn>, #<shift>
1492 to
1493 UBFM <Xd>, <Xn>, #<shift>, #63. */
1494static void
1495convert_sr_to_bfm (aarch64_inst *inst)
1496{
1497 inst->operands[3].imm.value =
1498 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1499}
1500
1501/* Convert MOV to ORR. */
1502static void
1503convert_mov_to_orr (aarch64_inst *inst)
1504{
1505 /* MOV <Vd>.<T>, <Vn>.<T>
1506 is equivalent to:
1507 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1508 copy_operand_info (inst, 2, 1);
1509}
1510
1511/* When <imms> >= <immr>, the instruction written:
1512 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1513 is equivalent to:
1514 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1515
1516static void
1517convert_bfx_to_bfm (aarch64_inst *inst)
1518{
1519 int64_t lsb, width;
1520
1521 /* Convert the operand. */
1522 lsb = inst->operands[2].imm.value;
1523 width = inst->operands[3].imm.value;
1524 inst->operands[2].imm.value = lsb;
1525 inst->operands[3].imm.value = lsb + width - 1;
1526}
1527
1528/* When <imms> < <immr>, the instruction written:
1529 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1530 is equivalent to:
1531 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1532
1533static void
1534convert_bfi_to_bfm (aarch64_inst *inst)
1535{
1536 int64_t lsb, width;
1537
1538 /* Convert the operand. */
1539 lsb = inst->operands[2].imm.value;
1540 width = inst->operands[3].imm.value;
1541 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1542 {
1543 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1544 inst->operands[3].imm.value = width - 1;
1545 }
1546 else
1547 {
1548 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1549 inst->operands[3].imm.value = width - 1;
1550 }
1551}
1552
d685192a
MW
1553/* The instruction written:
1554 BFC <Xd>, #<lsb>, #<width>
1555 is equivalent to:
1556 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1557
1558static void
1559convert_bfc_to_bfm (aarch64_inst *inst)
1560{
1561 int64_t lsb, width;
1562
1563 /* Insert XZR. */
1564 copy_operand_info (inst, 3, 2);
1565 copy_operand_info (inst, 2, 1);
93562a34 1566 copy_operand_info (inst, 0, 0);
d685192a
MW
1567 inst->operands[1].reg.regno = 0x1f;
1568
1569 /* Convert the immedate operand. */
1570 lsb = inst->operands[2].imm.value;
1571 width = inst->operands[3].imm.value;
1572 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1573 {
1574 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1575 inst->operands[3].imm.value = width - 1;
1576 }
1577 else
1578 {
1579 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1580 inst->operands[3].imm.value = width - 1;
1581 }
1582}
1583
a06ea964
NC
1584/* The instruction written:
1585 LSL <Xd>, <Xn>, #<shift>
1586 is equivalent to:
1587 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1588
1589static void
1590convert_lsl_to_ubfm (aarch64_inst *inst)
1591{
1592 int64_t shift = inst->operands[2].imm.value;
1593
1594 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1595 {
1596 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1597 inst->operands[3].imm.value = 31 - shift;
1598 }
1599 else
1600 {
1601 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1602 inst->operands[3].imm.value = 63 - shift;
1603 }
1604}
1605
1606/* CINC <Wd>, <Wn>, <cond>
1607 is equivalent to:
1608 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1609
1610static void
1611convert_to_csel (aarch64_inst *inst)
1612{
1613 copy_operand_info (inst, 3, 2);
1614 copy_operand_info (inst, 2, 1);
1615 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1616}
1617
1618/* CSET <Wd>, <cond>
1619 is equivalent to:
1620 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1621
1622static void
1623convert_cset_to_csinc (aarch64_inst *inst)
1624{
1625 copy_operand_info (inst, 3, 1);
1626 copy_operand_info (inst, 2, 0);
1627 copy_operand_info (inst, 1, 0);
1628 inst->operands[1].reg.regno = 0x1f;
1629 inst->operands[2].reg.regno = 0x1f;
1630 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1631}
1632
1633/* MOV <Wd>, #<imm>
1634 is equivalent to:
1635 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1636
1637static void
1638convert_mov_to_movewide (aarch64_inst *inst)
1639{
1640 int is32;
1641 uint32_t shift_amount;
1642 uint64_t value;
1643
1644 switch (inst->opcode->op)
1645 {
1646 case OP_MOV_IMM_WIDE:
1647 value = inst->operands[1].imm.value;
1648 break;
1649 case OP_MOV_IMM_WIDEN:
1650 value = ~inst->operands[1].imm.value;
1651 break;
1652 default:
1653 assert (0);
1654 }
1655 inst->operands[1].type = AARCH64_OPND_HALF;
1656 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1657 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1658 /* The constraint check should have guaranteed this wouldn't happen. */
1659 assert (0);
a06ea964
NC
1660 value >>= shift_amount;
1661 value &= 0xffff;
1662 inst->operands[1].imm.value = value;
1663 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1664 inst->operands[1].shifter.amount = shift_amount;
1665}
1666
1667/* MOV <Wd>, #<imm>
1668 is equivalent to:
1669 ORR <Wd>, WZR, #<imm>. */
1670
1671static void
1672convert_mov_to_movebitmask (aarch64_inst *inst)
1673{
1674 copy_operand_info (inst, 2, 1);
1675 inst->operands[1].reg.regno = 0x1f;
1676 inst->operands[1].skip = 0;
1677}
1678
1679/* Some alias opcodes are assembled by being converted to their real-form. */
1680
1681static void
1682convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1683{
1684 const aarch64_opcode *alias = inst->opcode;
1685
1686 if ((alias->flags & F_CONV) == 0)
1687 goto convert_to_real_return;
1688
1689 switch (alias->op)
1690 {
1691 case OP_ASR_IMM:
1692 case OP_LSR_IMM:
1693 convert_sr_to_bfm (inst);
1694 break;
1695 case OP_LSL_IMM:
1696 convert_lsl_to_ubfm (inst);
1697 break;
1698 case OP_CINC:
1699 case OP_CINV:
1700 case OP_CNEG:
1701 convert_to_csel (inst);
1702 break;
1703 case OP_CSET:
1704 case OP_CSETM:
1705 convert_cset_to_csinc (inst);
1706 break;
1707 case OP_UBFX:
1708 case OP_BFXIL:
1709 case OP_SBFX:
1710 convert_bfx_to_bfm (inst);
1711 break;
1712 case OP_SBFIZ:
1713 case OP_BFI:
1714 case OP_UBFIZ:
1715 convert_bfi_to_bfm (inst);
1716 break;
d685192a
MW
1717 case OP_BFC:
1718 convert_bfc_to_bfm (inst);
1719 break;
a06ea964
NC
1720 case OP_MOV_V:
1721 convert_mov_to_orr (inst);
1722 break;
1723 case OP_MOV_IMM_WIDE:
1724 case OP_MOV_IMM_WIDEN:
1725 convert_mov_to_movewide (inst);
1726 break;
1727 case OP_MOV_IMM_LOG:
1728 convert_mov_to_movebitmask (inst);
1729 break;
1730 case OP_ROR_IMM:
1731 convert_ror_to_extr (inst);
1732 break;
e30181a5
YZ
1733 case OP_SXTL:
1734 case OP_SXTL2:
1735 case OP_UXTL:
1736 case OP_UXTL2:
1737 convert_xtl_to_shll (inst);
1738 break;
a06ea964
NC
1739 default:
1740 break;
1741 }
1742
1743convert_to_real_return:
1744 aarch64_replace_opcode (inst, real);
1745}
1746
1747/* Encode *INST_ORI of the opcode code OPCODE.
1748 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1749 matched operand qualifier sequence in *QLF_SEQ. */
1750
1751int
1752aarch64_opcode_encode (const aarch64_opcode *opcode,
1753 const aarch64_inst *inst_ori, aarch64_insn *code,
1754 aarch64_opnd_qualifier_t *qlf_seq,
1755 aarch64_operand_error *mismatch_detail)
1756{
1757 int i;
1758 const aarch64_opcode *aliased;
1759 aarch64_inst copy, *inst;
1760
1761 DEBUG_TRACE ("enter with %s", opcode->name);
1762
1763 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1764 copy = *inst_ori;
1765 inst = &copy;
1766
1767 assert (inst->opcode == NULL || inst->opcode == opcode);
1768 if (inst->opcode == NULL)
1769 inst->opcode = opcode;
1770
1771 /* Constrain the operands.
1772 After passing this, the encoding is guaranteed to succeed. */
1773 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1774 {
1775 DEBUG_TRACE ("FAIL since operand constraint not met");
1776 return 0;
1777 }
1778
1779 /* Get the base value.
1780 Note: this has to be before the aliasing handling below in order to
1781 get the base value from the alias opcode before we move on to the
1782 aliased opcode for encoding. */
1783 inst->value = opcode->opcode;
1784
1785 /* No need to do anything else if the opcode does not have any operand. */
1786 if (aarch64_num_of_operands (opcode) == 0)
1787 goto encoding_exit;
1788
1789 /* Assign operand indexes and check types. Also put the matched
1790 operand qualifiers in *QLF_SEQ to return. */
1791 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1792 {
1793 assert (opcode->operands[i] == inst->operands[i].type);
1794 inst->operands[i].idx = i;
1795 if (qlf_seq != NULL)
1796 *qlf_seq = inst->operands[i].qualifier;
1797 }
1798
1799 aliased = aarch64_find_real_opcode (opcode);
1800 /* If the opcode is an alias and it does not ask for direct encoding by
1801 itself, the instruction will be transformed to the form of real opcode
1802 and the encoding will be carried out using the rules for the aliased
1803 opcode. */
1804 if (aliased != NULL && (opcode->flags & F_CONV))
1805 {
1806 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1807 aliased->name, opcode->name);
1808 /* Convert the operands to the form of the real opcode. */
1809 convert_to_real (inst, aliased);
1810 opcode = aliased;
1811 }
1812
1813 aarch64_opnd_info *info = inst->operands;
1814
1815 /* Call the inserter of each operand. */
1816 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1817 {
1818 const aarch64_operand *opnd;
1819 enum aarch64_opnd type = opcode->operands[i];
1820 if (type == AARCH64_OPND_NIL)
1821 break;
1822 if (info->skip)
1823 {
1824 DEBUG_TRACE ("skip the incomplete operand %d", i);
1825 continue;
1826 }
1827 opnd = &aarch64_operands[type];
1828 if (operand_has_inserter (opnd))
1829 aarch64_insert_operand (opnd, info, &inst->value, inst);
1830 }
1831
1832 /* Call opcode encoders indicated by flags. */
1833 if (opcode_has_special_coder (opcode))
1834 do_special_encoding (inst);
1835
116b6019
RS
1836 /* Possibly use the instruction class to encode the chosen qualifier
1837 variant. */
1838 aarch64_encode_variant_using_iclass (inst);
1839
a06ea964
NC
1840encoding_exit:
1841 DEBUG_TRACE ("exit with %s", opcode->name);
1842
1843 *code = inst->value;
1844
1845 return 1;
1846}