]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Add an error code for out-of-range registers
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
165 data fields contain the following information:
166
167 data[0].i:
168 A mask of register types that would have been acceptable as bare
169 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
170 is set if a general parsing error occured for an operand (that is,
171 an error not related to registers, and having no error string).
172
173 data[1].i:
174 A mask of register types that would have been acceptable inside
175 a register list. In addition, SEF_IN_REGLIST is set if the
176 operand contained a '{' and if we got to the point of trying
177 to parse a register inside a list.
178
179 data[2].i:
180 The mask associated with the register that was actually seen, or 0
181 if none. A nonzero value describes a register inside a register
182 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
183 register.
184
185 The idea is that stringless errors from multiple opcode templates can
186 be ORed together to give a summary of the available alternatives. */
187 #define SEF_DEFAULT_ERROR (1U << 31)
188 #define SEF_IN_REGLIST (1U << 31)
189
190 /* Diagnostics inline function utilities.
191
192 These are lightweight utilities which should only be called by parse_operands
193 and other parsers. GAS processes each assembly line by parsing it against
194 instruction template(s), in the case of multiple templates (for the same
195 mnemonic name), those templates are tried one by one until one succeeds or
196 all fail. An assembly line may fail a few templates before being
197 successfully parsed; an error saved here in most cases is not a user error
198 but an error indicating the current template is not the right template.
199 Therefore it is very important that errors can be saved at a low cost during
200 the parsing; we don't want to slow down the whole parsing by recording
201 non-user errors in detail.
202
203 Remember that the objective is to help GAS pick up the most appropriate
204 error message in the case of multiple templates, e.g. FMOV which has 8
205 templates. */
206
207 static inline void
208 clear_error (void)
209 {
210 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
211 inst.parsing_error.kind = AARCH64_OPDE_NIL;
212 }
213
214 static inline bool
215 error_p (void)
216 {
217 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
224 inst.parsing_error.index = -1;
225 inst.parsing_error.kind = kind;
226 inst.parsing_error.error = error;
227 }
228
229 static inline void
230 set_recoverable_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_RECOVERABLE, error);
233 }
234
235 /* Use the DESC field of the corresponding aarch64_operand entry to compose
236 the error message. */
237 static inline void
238 set_default_error (void)
239 {
240 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
241 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
242 }
243
244 static inline void
245 set_expected_error (unsigned int flags)
246 {
247 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
248 inst.parsing_error.data[0].i = flags;
249 }
250
251 static inline void
252 set_syntax_error (const char *error)
253 {
254 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
255 }
256
257 static inline void
258 set_first_syntax_error (const char *error)
259 {
260 if (! error_p ())
261 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
262 }
263
264 static inline void
265 set_fatal_syntax_error (const char *error)
266 {
267 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
268 }
269 \f
270 /* Return value for certain parsers when the parsing fails; those parsers
271 return the information of the parsed result, e.g. register number, on
272 success. */
273 #define PARSE_FAIL -1
274
275 /* This is an invalid condition code that means no conditional field is
276 present. */
277 #define COND_ALWAYS 0x10
278
279 typedef struct
280 {
281 const char *template;
282 uint32_t value;
283 } asm_nzcv;
284
285 struct reloc_entry
286 {
287 char *name;
288 bfd_reloc_code_real_type reloc;
289 };
290
291 /* Macros to define the register types and masks for the purpose
292 of parsing. */
293
294 #undef AARCH64_REG_TYPES
295 #define AARCH64_REG_TYPES \
296 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
297 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
298 BASIC_REG_TYPE(SP_32) /* wsp */ \
299 BASIC_REG_TYPE(SP_64) /* sp */ \
300 BASIC_REG_TYPE(Z_32) /* wzr */ \
301 BASIC_REG_TYPE(Z_64) /* xzr */ \
302 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
303 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
304 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
305 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
306 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
307 BASIC_REG_TYPE(VN) /* v[0-31] */ \
308 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
309 BASIC_REG_TYPE(PN) /* p[0-15] */ \
310 BASIC_REG_TYPE(ZA) /* za */ \
311 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
312 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
313 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
314 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
315 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
316 /* Typecheck: same, plus SVE registers. */ \
317 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
318 | REG_TYPE(ZN)) \
319 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
320 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
322 /* Typecheck: same, plus SVE registers. */ \
323 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
324 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
325 | REG_TYPE(ZN)) \
326 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
327 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
329 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
330 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
331 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
332 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
333 /* Typecheck: any [BHSDQ]P FP. */ \
334 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
335 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
336 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
337 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
338 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
339 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
340 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
341 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
342 be used for SVE instructions, since Zn and Pn are valid symbols \
343 in other contexts. */ \
344 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
345 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
346 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
347 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
348 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
349 | REG_TYPE(ZN) | REG_TYPE(PN)) \
350 /* Any integer register; used for error messages only. */ \
351 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
352 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
353 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
354 /* Any vector register. */ \
355 MULTI_REG_TYPE(VZ, REG_TYPE(VN) | REG_TYPE(ZN)) \
356 /* An SVE vector or predicate register. */ \
357 MULTI_REG_TYPE(ZP, REG_TYPE(ZN) | REG_TYPE(PN)) \
358 /* Any vector or predicate register. */ \
359 MULTI_REG_TYPE(VZP, REG_TYPE(VN) | REG_TYPE(ZN) | REG_TYPE(PN)) \
360 /* The whole of ZA or a single tile. */ \
361 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
362 /* A horizontal or vertical slice of a ZA tile. */ \
363 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
364 /* Pseudo type to mark the end of the enumerator sequence. */ \
365 END_REG_TYPE(MAX)
366
367 #undef BASIC_REG_TYPE
368 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
369 #undef MULTI_REG_TYPE
370 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
371 #undef END_REG_TYPE
372 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
373
374 /* Register type enumerators. */
375 typedef enum aarch64_reg_type_
376 {
377 /* A list of REG_TYPE_*. */
378 AARCH64_REG_TYPES
379 } aarch64_reg_type;
380
381 #undef BASIC_REG_TYPE
382 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
383 #undef REG_TYPE
384 #define REG_TYPE(T) (1 << REG_TYPE_##T)
385 #undef MULTI_REG_TYPE
386 #define MULTI_REG_TYPE(T,V) V,
387 #undef END_REG_TYPE
388 #define END_REG_TYPE(T) 0
389
390 /* Structure for a hash table entry for a register. */
391 typedef struct
392 {
393 const char *name;
394 unsigned char number;
395 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
396 unsigned char builtin;
397 } reg_entry;
398
399 /* Values indexed by aarch64_reg_type to assist the type checking. */
400 static const unsigned reg_type_masks[] =
401 {
402 AARCH64_REG_TYPES
403 };
404
405 #undef BASIC_REG_TYPE
406 #undef REG_TYPE
407 #undef MULTI_REG_TYPE
408 #undef END_REG_TYPE
409 #undef AARCH64_REG_TYPES
410
411 /* We expected one of the registers in MASK to be specified. If a register
412 of some kind was specified, SEEN is a mask that contains that register,
413 otherwise it is zero.
414
415 If it is possible to provide a relatively pithy message that describes
416 the error exactly, return a string that does so, reporting the error
417 against "operand %d". Return null otherwise.
418
419 From a QoI perspective, any REG_TYPE_* that is passed as the first
420 argument to set_expected_reg_error should generally have its own message.
421 Providing messages for combinations of such REG_TYPE_*s can be useful if
422 it is possible to summarize the combination in a relatively natural way.
423 On the other hand, it seems better to avoid long lists of unrelated
424 things. */
425
426 static const char *
427 get_reg_expected_msg (unsigned int mask, unsigned int seen)
428 {
429 /* First handle messages that use SEEN. */
430 if ((mask & reg_type_masks[REG_TYPE_ZAT])
431 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
432 return N_("expected an unsuffixed ZA tile at operand %d");
433
434 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
435 && (seen & reg_type_masks[REG_TYPE_ZAT]))
436 return N_("missing horizontal or vertical suffix at operand %d");
437
438 if ((mask & reg_type_masks[REG_TYPE_ZA])
439 && (seen & (reg_type_masks[REG_TYPE_ZAT]
440 | reg_type_masks[REG_TYPE_ZATHV])))
441 return N_("expected 'za' rather than a ZA tile at operand %d");
442
443 /* Integer, zero and stack registers. */
444 if (mask == reg_type_masks[REG_TYPE_R_64])
445 return N_("expected a 64-bit integer register at operand %d");
446 if (mask == reg_type_masks[REG_TYPE_R_Z])
447 return N_("expected an integer or zero register at operand %d");
448 if (mask == reg_type_masks[REG_TYPE_R_SP])
449 return N_("expected an integer or stack pointer register at operand %d");
450
451 /* Floating-point and SIMD registers. */
452 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
453 return N_("expected a scalar SIMD or floating-point register"
454 " at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_VN])
456 return N_("expected an Advanced SIMD vector register at operand %d");
457 if (mask == reg_type_masks[REG_TYPE_ZN])
458 return N_("expected an SVE vector register at operand %d");
459 if (mask == reg_type_masks[REG_TYPE_PN])
460 return N_("expected an SVE predicate register at operand %d");
461 if (mask == reg_type_masks[REG_TYPE_VZ])
462 return N_("expected a vector register at operand %d");
463 if (mask == reg_type_masks[REG_TYPE_ZP])
464 return N_("expected an SVE vector or predicate register at operand %d");
465 if (mask == reg_type_masks[REG_TYPE_VZP])
466 return N_("expected a vector or predicate register at operand %d");
467
468 /* ZA-related registers. */
469 if (mask == reg_type_masks[REG_TYPE_ZA])
470 return N_("expected a ZA array vector at operand %d");
471 if (mask == reg_type_masks[REG_TYPE_ZA_ZAT])
472 return N_("expected 'za' or a ZA tile at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_ZAT])
474 return N_("expected a ZA tile at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_ZATHV])
476 return N_("expected a ZA tile slice at operand %d");
477
478 /* Integer and vector combos. */
479 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VN]))
480 return N_("expected an integer register or Advanced SIMD vector register"
481 " at operand %d");
482 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_ZN]))
483 return N_("expected an integer register or SVE vector register"
484 " at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZ]))
486 return N_("expected an integer or vector register at operand %d");
487 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_PN]))
488 return N_("expected an integer or predicate register at operand %d");
489 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZP]))
490 return N_("expected an integer, vector or predicate register"
491 " at operand %d");
492
493 /* SVE and SME combos. */
494 if (mask == (reg_type_masks[REG_TYPE_ZN] | reg_type_masks[REG_TYPE_ZATHV]))
495 return N_("expected an SVE vector register or ZA tile slice"
496 " at operand %d");
497
498 return NULL;
499 }
500
501 /* Record that we expected a register of type TYPE but didn't see one.
502 REG is the register that we actually saw, or null if we didn't see a
503 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
504 contents of a register list, otherwise it is zero. */
505
506 static inline void
507 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
508 unsigned int flags)
509 {
510 assert (flags == 0 || flags == SEF_IN_REGLIST);
511 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
512 if (flags & SEF_IN_REGLIST)
513 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
514 else
515 inst.parsing_error.data[0].i = reg_type_masks[type];
516 if (reg)
517 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
518 }
519
520 /* Record that we expected a register list containing registers of type TYPE,
521 but didn't see the opening '{'. If we saw a register instead, REG is the
522 register that we saw, otherwise it is null. */
523
524 static inline void
525 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
526 {
527 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
528 inst.parsing_error.data[1].i = reg_type_masks[type];
529 if (reg)
530 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
531 }
532
533 /* Some well known registers that we refer to directly elsewhere. */
534 #define REG_SP 31
535 #define REG_ZR 31
536
537 /* Instructions take 4 bytes in the object file. */
538 #define INSN_SIZE 4
539
540 static htab_t aarch64_ops_hsh;
541 static htab_t aarch64_cond_hsh;
542 static htab_t aarch64_shift_hsh;
543 static htab_t aarch64_sys_regs_hsh;
544 static htab_t aarch64_pstatefield_hsh;
545 static htab_t aarch64_sys_regs_ic_hsh;
546 static htab_t aarch64_sys_regs_dc_hsh;
547 static htab_t aarch64_sys_regs_at_hsh;
548 static htab_t aarch64_sys_regs_tlbi_hsh;
549 static htab_t aarch64_sys_regs_sr_hsh;
550 static htab_t aarch64_reg_hsh;
551 static htab_t aarch64_barrier_opt_hsh;
552 static htab_t aarch64_nzcv_hsh;
553 static htab_t aarch64_pldop_hsh;
554 static htab_t aarch64_hint_opt_hsh;
555
556 /* Stuff needed to resolve the label ambiguity
557 As:
558 ...
559 label: <insn>
560 may differ from:
561 ...
562 label:
563 <insn> */
564
565 static symbolS *last_label_seen;
566
567 /* Literal pool structure. Held on a per-section
568 and per-sub-section basis. */
569
570 #define MAX_LITERAL_POOL_SIZE 1024
571 typedef struct literal_expression
572 {
573 expressionS exp;
574 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
575 LITTLENUM_TYPE * bignum;
576 } literal_expression;
577
578 typedef struct literal_pool
579 {
580 literal_expression literals[MAX_LITERAL_POOL_SIZE];
581 unsigned int next_free_entry;
582 unsigned int id;
583 symbolS *symbol;
584 segT section;
585 subsegT sub_section;
586 int size;
587 struct literal_pool *next;
588 } literal_pool;
589
590 /* Pointer to a linked list of literal pools. */
591 static literal_pool *list_of_pools = NULL;
592 \f
593 /* Pure syntax. */
594
595 /* This array holds the chars that always start a comment. If the
596 pre-processor is disabled, these aren't very useful. */
597 const char comment_chars[] = "";
598
599 /* This array holds the chars that only start a comment at the beginning of
600 a line. If the line seems to have the form '# 123 filename'
601 .line and .file directives will appear in the pre-processed output. */
602 /* Note that input_file.c hand checks for '#' at the beginning of the
603 first line of the input file. This is because the compiler outputs
604 #NO_APP at the beginning of its output. */
605 /* Also note that comments like this one will always work. */
606 const char line_comment_chars[] = "#";
607
608 const char line_separator_chars[] = ";";
609
610 /* Chars that can be used to separate mant
611 from exp in floating point numbers. */
612 const char EXP_CHARS[] = "eE";
613
614 /* Chars that mean this number is a floating point constant. */
615 /* As in 0f12.456 */
616 /* or 0d1.2345e12 */
617
618 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
619
620 /* Prefix character that indicates the start of an immediate value. */
621 #define is_immediate_prefix(C) ((C) == '#')
622
623 /* Separator character handling. */
624
625 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
626
627 static inline bool
628 skip_past_char (char **str, char c)
629 {
630 if (**str == c)
631 {
632 (*str)++;
633 return true;
634 }
635 else
636 return false;
637 }
638
639 #define skip_past_comma(str) skip_past_char (str, ',')
640
641 /* Arithmetic expressions (possibly involving symbols). */
642
643 static bool in_aarch64_get_expression = false;
644
645 /* Third argument to aarch64_get_expression. */
646 #define GE_NO_PREFIX false
647 #define GE_OPT_PREFIX true
648
649 /* Fourth argument to aarch64_get_expression. */
650 #define ALLOW_ABSENT false
651 #define REJECT_ABSENT true
652
653 /* Return TRUE if the string pointed by *STR is successfully parsed
654 as an valid expression; *EP will be filled with the information of
655 such an expression. Otherwise return FALSE.
656
657 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
658 If REJECT_ABSENT is true then trat missing expressions as an error. */
659
660 static bool
661 aarch64_get_expression (expressionS * ep,
662 char ** str,
663 bool allow_immediate_prefix,
664 bool reject_absent)
665 {
666 char *save_in;
667 segT seg;
668 bool prefix_present = false;
669
670 if (allow_immediate_prefix)
671 {
672 if (is_immediate_prefix (**str))
673 {
674 (*str)++;
675 prefix_present = true;
676 }
677 }
678
679 memset (ep, 0, sizeof (expressionS));
680
681 save_in = input_line_pointer;
682 input_line_pointer = *str;
683 in_aarch64_get_expression = true;
684 seg = expression (ep);
685 in_aarch64_get_expression = false;
686
687 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
688 {
689 /* We found a bad expression in md_operand(). */
690 *str = input_line_pointer;
691 input_line_pointer = save_in;
692 if (prefix_present && ! error_p ())
693 set_fatal_syntax_error (_("bad expression"));
694 else
695 set_first_syntax_error (_("bad expression"));
696 return false;
697 }
698
699 #ifdef OBJ_AOUT
700 if (seg != absolute_section
701 && seg != text_section
702 && seg != data_section
703 && seg != bss_section
704 && seg != undefined_section)
705 {
706 set_syntax_error (_("bad segment"));
707 *str = input_line_pointer;
708 input_line_pointer = save_in;
709 return false;
710 }
711 #else
712 (void) seg;
713 #endif
714
715 *str = input_line_pointer;
716 input_line_pointer = save_in;
717 return true;
718 }
719
720 /* Turn a string in input_line_pointer into a floating point constant
721 of type TYPE, and store the appropriate bytes in *LITP. The number
722 of LITTLENUMS emitted is stored in *SIZEP. An error message is
723 returned, or NULL on OK. */
724
725 const char *
726 md_atof (int type, char *litP, int *sizeP)
727 {
728 return ieee_md_atof (type, litP, sizeP, target_big_endian);
729 }
730
731 /* We handle all bad expressions here, so that we can report the faulty
732 instruction in the error message. */
733 void
734 md_operand (expressionS * exp)
735 {
736 if (in_aarch64_get_expression)
737 exp->X_op = O_illegal;
738 }
739
740 /* Immediate values. */
741
742 /* Errors may be set multiple times during parsing or bit encoding
743 (particularly in the Neon bits), but usually the earliest error which is set
744 will be the most meaningful. Avoid overwriting it with later (cascading)
745 errors by calling this function. */
746
747 static void
748 first_error (const char *error)
749 {
750 if (! error_p ())
751 set_syntax_error (error);
752 }
753
754 /* Similar to first_error, but this function accepts formatted error
755 message. */
756 static void
757 first_error_fmt (const char *format, ...)
758 {
759 va_list args;
760 enum
761 { size = 100 };
762 /* N.B. this single buffer will not cause error messages for different
763 instructions to pollute each other; this is because at the end of
764 processing of each assembly line, error message if any will be
765 collected by as_bad. */
766 static char buffer[size];
767
768 if (! error_p ())
769 {
770 int ret ATTRIBUTE_UNUSED;
771 va_start (args, format);
772 ret = vsnprintf (buffer, size, format, args);
773 know (ret <= size - 1 && ret >= 0);
774 va_end (args);
775 set_syntax_error (buffer);
776 }
777 }
778
779 /* Internal helper routine converting a vector_type_el structure *VECTYPE
780 to a corresponding operand qualifier. */
781
782 static inline aarch64_opnd_qualifier_t
783 vectype_to_qualifier (const struct vector_type_el *vectype)
784 {
785 /* Element size in bytes indexed by vector_el_type. */
786 const unsigned char ele_size[5]
787 = {1, 2, 4, 8, 16};
788 const unsigned int ele_base [5] =
789 {
790 AARCH64_OPND_QLF_V_4B,
791 AARCH64_OPND_QLF_V_2H,
792 AARCH64_OPND_QLF_V_2S,
793 AARCH64_OPND_QLF_V_1D,
794 AARCH64_OPND_QLF_V_1Q
795 };
796
797 if (!vectype->defined || vectype->type == NT_invtype)
798 goto vectype_conversion_fail;
799
800 if (vectype->type == NT_zero)
801 return AARCH64_OPND_QLF_P_Z;
802 if (vectype->type == NT_merge)
803 return AARCH64_OPND_QLF_P_M;
804
805 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
806
807 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
808 {
809 /* Special case S_4B. */
810 if (vectype->type == NT_b && vectype->width == 4)
811 return AARCH64_OPND_QLF_S_4B;
812
813 /* Special case S_2H. */
814 if (vectype->type == NT_h && vectype->width == 2)
815 return AARCH64_OPND_QLF_S_2H;
816
817 /* Vector element register. */
818 return AARCH64_OPND_QLF_S_B + vectype->type;
819 }
820 else
821 {
822 /* Vector register. */
823 int reg_size = ele_size[vectype->type] * vectype->width;
824 unsigned offset;
825 unsigned shift;
826 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
827 goto vectype_conversion_fail;
828
829 /* The conversion is by calculating the offset from the base operand
830 qualifier for the vector type. The operand qualifiers are regular
831 enough that the offset can established by shifting the vector width by
832 a vector-type dependent amount. */
833 shift = 0;
834 if (vectype->type == NT_b)
835 shift = 3;
836 else if (vectype->type == NT_h || vectype->type == NT_s)
837 shift = 2;
838 else if (vectype->type >= NT_d)
839 shift = 1;
840 else
841 gas_assert (0);
842
843 offset = ele_base [vectype->type] + (vectype->width >> shift);
844 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
845 && offset <= AARCH64_OPND_QLF_V_1Q);
846 return offset;
847 }
848
849 vectype_conversion_fail:
850 first_error (_("bad vector arrangement type"));
851 return AARCH64_OPND_QLF_NIL;
852 }
853
854 /* Register parsing. */
855
856 /* Generic register parser which is called by other specialized
857 register parsers.
858 CCP points to what should be the beginning of a register name.
859 If it is indeed a valid register name, advance CCP over it and
860 return the reg_entry structure; otherwise return NULL.
861 It does not issue diagnostics. */
862
863 static reg_entry *
864 parse_reg (char **ccp)
865 {
866 char *start = *ccp;
867 char *p;
868 reg_entry *reg;
869
870 #ifdef REGISTER_PREFIX
871 if (*start != REGISTER_PREFIX)
872 return NULL;
873 start++;
874 #endif
875
876 p = start;
877 if (!ISALPHA (*p) || !is_name_beginner (*p))
878 return NULL;
879
880 do
881 p++;
882 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
883
884 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
885
886 if (!reg)
887 return NULL;
888
889 *ccp = p;
890 return reg;
891 }
892
893 /* Return the operand qualifier associated with all uses of REG, or
894 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
895 that qualifiers don't apply to REG or that qualifiers are added
896 using suffixes. */
897
898 static aarch64_opnd_qualifier_t
899 inherent_reg_qualifier (const reg_entry *reg)
900 {
901 switch (reg->type)
902 {
903 case REG_TYPE_R_32:
904 case REG_TYPE_SP_32:
905 case REG_TYPE_Z_32:
906 return AARCH64_OPND_QLF_W;
907
908 case REG_TYPE_R_64:
909 case REG_TYPE_SP_64:
910 case REG_TYPE_Z_64:
911 return AARCH64_OPND_QLF_X;
912
913 case REG_TYPE_FP_B:
914 case REG_TYPE_FP_H:
915 case REG_TYPE_FP_S:
916 case REG_TYPE_FP_D:
917 case REG_TYPE_FP_Q:
918 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
919
920 default:
921 return AARCH64_OPND_QLF_NIL;
922 }
923 }
924
925 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
926 return FALSE. */
927 static bool
928 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
929 {
930 return (reg_type_masks[type] & (1 << reg->type)) != 0;
931 }
932
933 /* Try to parse a base or offset register. Allow SVE base and offset
934 registers if REG_TYPE includes SVE registers. Return the register
935 entry on success, setting *QUALIFIER to the register qualifier.
936 Return null otherwise.
937
938 Note that this function does not issue any diagnostics. */
939
940 static const reg_entry *
941 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
942 aarch64_opnd_qualifier_t *qualifier)
943 {
944 char *str = *ccp;
945 const reg_entry *reg = parse_reg (&str);
946
947 if (reg == NULL)
948 return NULL;
949
950 switch (reg->type)
951 {
952 case REG_TYPE_ZN:
953 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
954 || str[0] != '.')
955 return NULL;
956 switch (TOLOWER (str[1]))
957 {
958 case 's':
959 *qualifier = AARCH64_OPND_QLF_S_S;
960 break;
961 case 'd':
962 *qualifier = AARCH64_OPND_QLF_S_D;
963 break;
964 default:
965 return NULL;
966 }
967 str += 2;
968 break;
969
970 default:
971 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
972 return NULL;
973 *qualifier = inherent_reg_qualifier (reg);
974 break;
975 }
976
977 *ccp = str;
978
979 return reg;
980 }
981
982 /* Try to parse a base or offset register. Return the register entry
983 on success, setting *QUALIFIER to the register qualifier. Return null
984 otherwise.
985
986 Note that this function does not issue any diagnostics. */
987
988 static const reg_entry *
989 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
990 {
991 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
992 }
993
994 /* Parse the qualifier of a vector register or vector element of type
995 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
996 succeeds; otherwise return FALSE.
997
998 Accept only one occurrence of:
999 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1000 b h s d q */
1001 static bool
1002 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1003 struct vector_type_el *parsed_type, char **str)
1004 {
1005 char *ptr = *str;
1006 unsigned width;
1007 unsigned element_size;
1008 enum vector_el_type type;
1009
1010 /* skip '.' */
1011 gas_assert (*ptr == '.');
1012 ptr++;
1013
1014 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
1015 {
1016 width = 0;
1017 goto elt_size;
1018 }
1019 width = strtoul (ptr, &ptr, 10);
1020 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1021 {
1022 first_error_fmt (_("bad size %d in vector width specifier"), width);
1023 return false;
1024 }
1025
1026 elt_size:
1027 switch (TOLOWER (*ptr))
1028 {
1029 case 'b':
1030 type = NT_b;
1031 element_size = 8;
1032 break;
1033 case 'h':
1034 type = NT_h;
1035 element_size = 16;
1036 break;
1037 case 's':
1038 type = NT_s;
1039 element_size = 32;
1040 break;
1041 case 'd':
1042 type = NT_d;
1043 element_size = 64;
1044 break;
1045 case 'q':
1046 if (reg_type != REG_TYPE_VN || width == 1)
1047 {
1048 type = NT_q;
1049 element_size = 128;
1050 break;
1051 }
1052 /* fall through. */
1053 default:
1054 if (*ptr != '\0')
1055 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1056 else
1057 first_error (_("missing element size"));
1058 return false;
1059 }
1060 if (width != 0 && width * element_size != 64
1061 && width * element_size != 128
1062 && !(width == 2 && element_size == 16)
1063 && !(width == 4 && element_size == 8))
1064 {
1065 first_error_fmt (_
1066 ("invalid element size %d and vector size combination %c"),
1067 width, *ptr);
1068 return false;
1069 }
1070 ptr++;
1071
1072 parsed_type->type = type;
1073 parsed_type->width = width;
1074 parsed_type->element_size = element_size;
1075
1076 *str = ptr;
1077
1078 return true;
1079 }
1080
1081 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1082 *PARSED_TYPE and point *STR at the end of the suffix. */
1083
1084 static bool
1085 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1086 {
1087 char *ptr = *str;
1088
1089 /* Skip '/'. */
1090 gas_assert (*ptr == '/');
1091 ptr++;
1092 switch (TOLOWER (*ptr))
1093 {
1094 case 'z':
1095 parsed_type->type = NT_zero;
1096 break;
1097 case 'm':
1098 parsed_type->type = NT_merge;
1099 break;
1100 default:
1101 if (*ptr != '\0' && *ptr != ',')
1102 first_error_fmt (_("unexpected character `%c' in predication type"),
1103 *ptr);
1104 else
1105 first_error (_("missing predication type"));
1106 return false;
1107 }
1108 parsed_type->width = 0;
1109 *str = ptr + 1;
1110 return true;
1111 }
1112
1113 /* Return true if CH is a valid suffix character for registers of
1114 type TYPE. */
1115
1116 static bool
1117 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1118 {
1119 switch (type)
1120 {
1121 case REG_TYPE_VN:
1122 case REG_TYPE_ZN:
1123 case REG_TYPE_ZA:
1124 case REG_TYPE_ZAT:
1125 case REG_TYPE_ZATH:
1126 case REG_TYPE_ZATV:
1127 return ch == '.';
1128
1129 case REG_TYPE_PN:
1130 return ch == '.' || ch == '/';
1131
1132 default:
1133 return false;
1134 }
1135 }
1136
1137 /* Parse an index expression at *STR, storing it in *IMM on success. */
1138
1139 static bool
1140 parse_index_expression (char **str, int64_t *imm)
1141 {
1142 expressionS exp;
1143
1144 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1145 if (exp.X_op != O_constant)
1146 {
1147 first_error (_("constant expression required"));
1148 return false;
1149 }
1150 *imm = exp.X_add_number;
1151 return true;
1152 }
1153
1154 /* Parse a register of the type TYPE.
1155
1156 Return null if the string pointed to by *CCP is not a valid register
1157 name or the parsed register is not of TYPE.
1158
1159 Otherwise return the register, and optionally return the register
1160 shape and element index information in *TYPEINFO.
1161
1162 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1163
1164 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1165 register index.
1166
1167 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1168 an operand that we can be confident that it is a good match. */
1169
1170 #define PTR_IN_REGLIST (1U << 0)
1171 #define PTR_FULL_REG (1U << 1)
1172 #define PTR_GOOD_MATCH (1U << 2)
1173
1174 static const reg_entry *
1175 parse_typed_reg (char **ccp, aarch64_reg_type type,
1176 struct vector_type_el *typeinfo, unsigned int flags)
1177 {
1178 char *str = *ccp;
1179 bool isalpha = ISALPHA (*str);
1180 const reg_entry *reg = parse_reg (&str);
1181 struct vector_type_el atype;
1182 struct vector_type_el parsetype;
1183 bool is_typed_vecreg = false;
1184 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1185
1186 atype.defined = 0;
1187 atype.type = NT_invtype;
1188 atype.width = -1;
1189 atype.element_size = 0;
1190 atype.index = 0;
1191
1192 if (reg == NULL)
1193 {
1194 if (typeinfo)
1195 *typeinfo = atype;
1196 if (!isalpha && (flags & PTR_IN_REGLIST))
1197 set_fatal_syntax_error (_("syntax error in register list"));
1198 else if (flags & PTR_GOOD_MATCH)
1199 set_fatal_syntax_error (NULL);
1200 else
1201 set_expected_reg_error (type, reg, err_flags);
1202 return NULL;
1203 }
1204
1205 if (! aarch64_check_reg_type (reg, type))
1206 {
1207 DEBUG_TRACE ("reg type check failed");
1208 if (flags & PTR_GOOD_MATCH)
1209 set_fatal_syntax_error (NULL);
1210 else
1211 set_expected_reg_error (type, reg, err_flags);
1212 return NULL;
1213 }
1214 type = reg->type;
1215
1216 if (aarch64_valid_suffix_char_p (reg->type, *str))
1217 {
1218 if (*str == '.')
1219 {
1220 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1221 return NULL;
1222 if ((reg->type == REG_TYPE_ZAT
1223 || reg->type == REG_TYPE_ZATH
1224 || reg->type == REG_TYPE_ZATV)
1225 && reg->number * 8 >= parsetype.element_size)
1226 {
1227 set_syntax_error (_("ZA tile number out of range"));
1228 return NULL;
1229 }
1230 }
1231 else
1232 {
1233 if (!parse_predication_for_operand (&parsetype, &str))
1234 return NULL;
1235 }
1236
1237 /* Register if of the form Vn.[bhsdq]. */
1238 is_typed_vecreg = true;
1239
1240 if (type != REG_TYPE_VN)
1241 {
1242 /* The width is always variable; we don't allow an integer width
1243 to be specified. */
1244 gas_assert (parsetype.width == 0);
1245 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1246 }
1247 else if (parsetype.width == 0)
1248 /* Expect index. In the new scheme we cannot have
1249 Vn.[bhsdq] represent a scalar. Therefore any
1250 Vn.[bhsdq] should have an index following it.
1251 Except in reglists of course. */
1252 atype.defined |= NTA_HASINDEX;
1253 else
1254 atype.defined |= NTA_HASTYPE;
1255
1256 atype.type = parsetype.type;
1257 atype.width = parsetype.width;
1258 }
1259
1260 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1261 {
1262 /* Reject Sn[index] syntax. */
1263 if (!is_typed_vecreg)
1264 {
1265 first_error (_("this type of register can't be indexed"));
1266 return NULL;
1267 }
1268
1269 if (flags & PTR_IN_REGLIST)
1270 {
1271 first_error (_("index not allowed inside register list"));
1272 return NULL;
1273 }
1274
1275 atype.defined |= NTA_HASINDEX;
1276
1277 if (!parse_index_expression (&str, &atype.index))
1278 return NULL;
1279
1280 if (! skip_past_char (&str, ']'))
1281 return NULL;
1282 }
1283 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1284 {
1285 /* Indexed vector register expected. */
1286 first_error (_("indexed vector register expected"));
1287 return NULL;
1288 }
1289
1290 /* A vector reg Vn should be typed or indexed. */
1291 if (type == REG_TYPE_VN && atype.defined == 0)
1292 {
1293 first_error (_("invalid use of vector register"));
1294 }
1295
1296 if (typeinfo)
1297 *typeinfo = atype;
1298
1299 *ccp = str;
1300
1301 return reg;
1302 }
1303
1304 /* Parse register.
1305
1306 Return the register on success; return null otherwise.
1307
1308 If this is a NEON vector register with additional type information, fill
1309 in the struct pointed to by VECTYPE (if non-NULL).
1310
1311 This parser does not handle register lists. */
1312
1313 static const reg_entry *
1314 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1315 struct vector_type_el *vectype)
1316 {
1317 return parse_typed_reg (ccp, type, vectype, 0);
1318 }
1319
1320 static inline bool
1321 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1322 {
1323 return (e1.type == e2.type
1324 && e1.defined == e2.defined
1325 && e1.width == e2.width
1326 && e1.element_size == e2.element_size
1327 && e1.index == e2.index);
1328 }
1329
1330 /* This function parses a list of vector registers of type TYPE.
1331 On success, it returns the parsed register list information in the
1332 following encoded format:
1333
1334 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1335 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1336
1337 The information of the register shape and/or index is returned in
1338 *VECTYPE.
1339
1340 It returns PARSE_FAIL if the register list is invalid.
1341
1342 The list contains one to four registers.
1343 Each register can be one of:
1344 <Vt>.<T>[<index>]
1345 <Vt>.<T>
1346 All <T> should be identical.
1347 All <index> should be identical.
1348 There are restrictions on <Vt> numbers which are checked later
1349 (by reg_list_valid_p). */
1350
1351 static int
1352 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1353 struct vector_type_el *vectype)
1354 {
1355 char *str = *ccp;
1356 int nb_regs;
1357 struct vector_type_el typeinfo, typeinfo_first;
1358 int val, val_range;
1359 int in_range;
1360 int ret_val;
1361 int i;
1362 bool error = false;
1363 bool expect_index = false;
1364 unsigned int ptr_flags = PTR_IN_REGLIST;
1365
1366 if (*str != '{')
1367 {
1368 set_expected_reglist_error (type, parse_reg (&str));
1369 return PARSE_FAIL;
1370 }
1371 str++;
1372
1373 nb_regs = 0;
1374 typeinfo_first.defined = 0;
1375 typeinfo_first.type = NT_invtype;
1376 typeinfo_first.width = -1;
1377 typeinfo_first.element_size = 0;
1378 typeinfo_first.index = 0;
1379 ret_val = 0;
1380 val = -1;
1381 val_range = -1;
1382 in_range = 0;
1383 do
1384 {
1385 if (in_range)
1386 {
1387 str++; /* skip over '-' */
1388 val_range = val;
1389 }
1390 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1391 ptr_flags);
1392 if (!reg)
1393 {
1394 set_first_syntax_error (_("invalid vector register in list"));
1395 error = true;
1396 continue;
1397 }
1398 val = reg->number;
1399 /* reject [bhsd]n */
1400 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1401 {
1402 set_first_syntax_error (_("invalid scalar register in list"));
1403 error = true;
1404 continue;
1405 }
1406
1407 if (typeinfo.defined & NTA_HASINDEX)
1408 expect_index = true;
1409
1410 if (in_range)
1411 {
1412 if (val < val_range)
1413 {
1414 set_first_syntax_error
1415 (_("invalid range in vector register list"));
1416 error = true;
1417 }
1418 val_range++;
1419 }
1420 else
1421 {
1422 val_range = val;
1423 if (nb_regs == 0)
1424 typeinfo_first = typeinfo;
1425 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1426 {
1427 set_first_syntax_error
1428 (_("type mismatch in vector register list"));
1429 error = true;
1430 }
1431 }
1432 if (! error)
1433 for (i = val_range; i <= val; i++)
1434 {
1435 ret_val |= i << (5 * nb_regs);
1436 nb_regs++;
1437 }
1438 in_range = 0;
1439 ptr_flags |= PTR_GOOD_MATCH;
1440 }
1441 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1442
1443 skip_whitespace (str);
1444 if (*str != '}')
1445 {
1446 set_first_syntax_error (_("end of vector register list not found"));
1447 error = true;
1448 }
1449 str++;
1450
1451 skip_whitespace (str);
1452
1453 if (expect_index)
1454 {
1455 if (skip_past_char (&str, '['))
1456 {
1457 if (!parse_index_expression (&str, &typeinfo_first.index))
1458 error = true;
1459 if (! skip_past_char (&str, ']'))
1460 error = true;
1461 }
1462 else
1463 {
1464 set_first_syntax_error (_("expected index"));
1465 error = true;
1466 }
1467 }
1468
1469 if (nb_regs > 4)
1470 {
1471 set_first_syntax_error (_("too many registers in vector register list"));
1472 error = true;
1473 }
1474 else if (nb_regs == 0)
1475 {
1476 set_first_syntax_error (_("empty vector register list"));
1477 error = true;
1478 }
1479
1480 *ccp = str;
1481 if (! error)
1482 *vectype = typeinfo_first;
1483
1484 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1485 }
1486
1487 /* Directives: register aliases. */
1488
1489 static reg_entry *
1490 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1491 {
1492 reg_entry *new;
1493 const char *name;
1494
1495 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1496 {
1497 if (new->builtin)
1498 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1499 str);
1500
1501 /* Only warn about a redefinition if it's not defined as the
1502 same register. */
1503 else if (new->number != number || new->type != type)
1504 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1505
1506 return NULL;
1507 }
1508
1509 name = xstrdup (str);
1510 new = XNEW (reg_entry);
1511
1512 new->name = name;
1513 new->number = number;
1514 new->type = type;
1515 new->builtin = false;
1516
1517 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1518
1519 return new;
1520 }
1521
1522 /* Look for the .req directive. This is of the form:
1523
1524 new_register_name .req existing_register_name
1525
1526 If we find one, or if it looks sufficiently like one that we want to
1527 handle any error here, return TRUE. Otherwise return FALSE. */
1528
1529 static bool
1530 create_register_alias (char *newname, char *p)
1531 {
1532 const reg_entry *old;
1533 char *oldname, *nbuf;
1534 size_t nlen;
1535
1536 /* The input scrubber ensures that whitespace after the mnemonic is
1537 collapsed to single spaces. */
1538 oldname = p;
1539 if (!startswith (oldname, " .req "))
1540 return false;
1541
1542 oldname += 6;
1543 if (*oldname == '\0')
1544 return false;
1545
1546 old = str_hash_find (aarch64_reg_hsh, oldname);
1547 if (!old)
1548 {
1549 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1550 return true;
1551 }
1552
1553 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1554 the desired alias name, and p points to its end. If not, then
1555 the desired alias name is in the global original_case_string. */
1556 #ifdef TC_CASE_SENSITIVE
1557 nlen = p - newname;
1558 #else
1559 newname = original_case_string;
1560 nlen = strlen (newname);
1561 #endif
1562
1563 nbuf = xmemdup0 (newname, nlen);
1564
1565 /* Create aliases under the new name as stated; an all-lowercase
1566 version of the new name; and an all-uppercase version of the new
1567 name. */
1568 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1569 {
1570 for (p = nbuf; *p; p++)
1571 *p = TOUPPER (*p);
1572
1573 if (strncmp (nbuf, newname, nlen))
1574 {
1575 /* If this attempt to create an additional alias fails, do not bother
1576 trying to create the all-lower case alias. We will fail and issue
1577 a second, duplicate error message. This situation arises when the
1578 programmer does something like:
1579 foo .req r0
1580 Foo .req r1
1581 The second .req creates the "Foo" alias but then fails to create
1582 the artificial FOO alias because it has already been created by the
1583 first .req. */
1584 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1585 {
1586 free (nbuf);
1587 return true;
1588 }
1589 }
1590
1591 for (p = nbuf; *p; p++)
1592 *p = TOLOWER (*p);
1593
1594 if (strncmp (nbuf, newname, nlen))
1595 insert_reg_alias (nbuf, old->number, old->type);
1596 }
1597
1598 free (nbuf);
1599 return true;
1600 }
1601
1602 /* Should never be called, as .req goes between the alias and the
1603 register name, not at the beginning of the line. */
1604 static void
1605 s_req (int a ATTRIBUTE_UNUSED)
1606 {
1607 as_bad (_("invalid syntax for .req directive"));
1608 }
1609
1610 /* The .unreq directive deletes an alias which was previously defined
1611 by .req. For example:
1612
1613 my_alias .req r11
1614 .unreq my_alias */
1615
1616 static void
1617 s_unreq (int a ATTRIBUTE_UNUSED)
1618 {
1619 char *name;
1620 char saved_char;
1621
1622 name = input_line_pointer;
1623 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1624 saved_char = *input_line_pointer;
1625 *input_line_pointer = 0;
1626
1627 if (!*name)
1628 as_bad (_("invalid syntax for .unreq directive"));
1629 else
1630 {
1631 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1632
1633 if (!reg)
1634 as_bad (_("unknown register alias '%s'"), name);
1635 else if (reg->builtin)
1636 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1637 name);
1638 else
1639 {
1640 char *p;
1641 char *nbuf;
1642
1643 str_hash_delete (aarch64_reg_hsh, name);
1644 free ((char *) reg->name);
1645 free (reg);
1646
1647 /* Also locate the all upper case and all lower case versions.
1648 Do not complain if we cannot find one or the other as it
1649 was probably deleted above. */
1650
1651 nbuf = strdup (name);
1652 for (p = nbuf; *p; p++)
1653 *p = TOUPPER (*p);
1654 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1655 if (reg)
1656 {
1657 str_hash_delete (aarch64_reg_hsh, nbuf);
1658 free ((char *) reg->name);
1659 free (reg);
1660 }
1661
1662 for (p = nbuf; *p; p++)
1663 *p = TOLOWER (*p);
1664 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1665 if (reg)
1666 {
1667 str_hash_delete (aarch64_reg_hsh, nbuf);
1668 free ((char *) reg->name);
1669 free (reg);
1670 }
1671
1672 free (nbuf);
1673 }
1674 }
1675
1676 *input_line_pointer = saved_char;
1677 demand_empty_rest_of_line ();
1678 }
1679
1680 /* Directives: Instruction set selection. */
1681
1682 #if defined OBJ_ELF || defined OBJ_COFF
1683 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1684 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1685 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1686 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1687
1688 /* Create a new mapping symbol for the transition to STATE. */
1689
1690 static void
1691 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1692 {
1693 symbolS *symbolP;
1694 const char *symname;
1695 int type;
1696
1697 switch (state)
1698 {
1699 case MAP_DATA:
1700 symname = "$d";
1701 type = BSF_NO_FLAGS;
1702 break;
1703 case MAP_INSN:
1704 symname = "$x";
1705 type = BSF_NO_FLAGS;
1706 break;
1707 default:
1708 abort ();
1709 }
1710
1711 symbolP = symbol_new (symname, now_seg, frag, value);
1712 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1713
1714 /* Save the mapping symbols for future reference. Also check that
1715 we do not place two mapping symbols at the same offset within a
1716 frag. We'll handle overlap between frags in
1717 check_mapping_symbols.
1718
1719 If .fill or other data filling directive generates zero sized data,
1720 the mapping symbol for the following code will have the same value
1721 as the one generated for the data filling directive. In this case,
1722 we replace the old symbol with the new one at the same address. */
1723 if (value == 0)
1724 {
1725 if (frag->tc_frag_data.first_map != NULL)
1726 {
1727 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1728 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1729 &symbol_lastP);
1730 }
1731 frag->tc_frag_data.first_map = symbolP;
1732 }
1733 if (frag->tc_frag_data.last_map != NULL)
1734 {
1735 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1736 S_GET_VALUE (symbolP));
1737 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1738 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1739 &symbol_lastP);
1740 }
1741 frag->tc_frag_data.last_map = symbolP;
1742 }
1743
1744 /* We must sometimes convert a region marked as code to data during
1745 code alignment, if an odd number of bytes have to be padded. The
1746 code mapping symbol is pushed to an aligned address. */
1747
1748 static void
1749 insert_data_mapping_symbol (enum mstate state,
1750 valueT value, fragS * frag, offsetT bytes)
1751 {
1752 /* If there was already a mapping symbol, remove it. */
1753 if (frag->tc_frag_data.last_map != NULL
1754 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1755 frag->fr_address + value)
1756 {
1757 symbolS *symp = frag->tc_frag_data.last_map;
1758
1759 if (value == 0)
1760 {
1761 know (frag->tc_frag_data.first_map == symp);
1762 frag->tc_frag_data.first_map = NULL;
1763 }
1764 frag->tc_frag_data.last_map = NULL;
1765 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1766 }
1767
1768 make_mapping_symbol (MAP_DATA, value, frag);
1769 make_mapping_symbol (state, value + bytes, frag);
1770 }
1771
1772 static void mapping_state_2 (enum mstate state, int max_chars);
1773
1774 /* Set the mapping state to STATE. Only call this when about to
1775 emit some STATE bytes to the file. */
1776
1777 void
1778 mapping_state (enum mstate state)
1779 {
1780 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1781
1782 if (state == MAP_INSN)
1783 /* AArch64 instructions require 4-byte alignment. When emitting
1784 instructions into any section, record the appropriate section
1785 alignment. */
1786 record_alignment (now_seg, 2);
1787
1788 if (mapstate == state)
1789 /* The mapping symbol has already been emitted.
1790 There is nothing else to do. */
1791 return;
1792
1793 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1794 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1795 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1796 evaluated later in the next else. */
1797 return;
1798 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1799 {
1800 /* Only add the symbol if the offset is > 0:
1801 if we're at the first frag, check it's size > 0;
1802 if we're not at the first frag, then for sure
1803 the offset is > 0. */
1804 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1805 const int add_symbol = (frag_now != frag_first)
1806 || (frag_now_fix () > 0);
1807
1808 if (add_symbol)
1809 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1810 }
1811 #undef TRANSITION
1812
1813 mapping_state_2 (state, 0);
1814 }
1815
1816 /* Same as mapping_state, but MAX_CHARS bytes have already been
1817 allocated. Put the mapping symbol that far back. */
1818
1819 static void
1820 mapping_state_2 (enum mstate state, int max_chars)
1821 {
1822 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1823
1824 if (!SEG_NORMAL (now_seg))
1825 return;
1826
1827 if (mapstate == state)
1828 /* The mapping symbol has already been emitted.
1829 There is nothing else to do. */
1830 return;
1831
1832 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1833 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1834 }
1835 #else
1836 #define mapping_state(x) /* nothing */
1837 #define mapping_state_2(x, y) /* nothing */
1838 #endif
1839
1840 /* Directives: sectioning and alignment. */
1841
1842 static void
1843 s_bss (int ignore ATTRIBUTE_UNUSED)
1844 {
1845 /* We don't support putting frags in the BSS segment, we fake it by
1846 marking in_bss, then looking at s_skip for clues. */
1847 subseg_set (bss_section, 0);
1848 demand_empty_rest_of_line ();
1849 mapping_state (MAP_DATA);
1850 }
1851
1852 static void
1853 s_even (int ignore ATTRIBUTE_UNUSED)
1854 {
1855 /* Never make frag if expect extra pass. */
1856 if (!need_pass_2)
1857 frag_align (1, 0, 0);
1858
1859 record_alignment (now_seg, 1);
1860
1861 demand_empty_rest_of_line ();
1862 }
1863
1864 /* Directives: Literal pools. */
1865
1866 static literal_pool *
1867 find_literal_pool (int size)
1868 {
1869 literal_pool *pool;
1870
1871 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1872 {
1873 if (pool->section == now_seg
1874 && pool->sub_section == now_subseg && pool->size == size)
1875 break;
1876 }
1877
1878 return pool;
1879 }
1880
1881 static literal_pool *
1882 find_or_make_literal_pool (int size)
1883 {
1884 /* Next literal pool ID number. */
1885 static unsigned int latest_pool_num = 1;
1886 literal_pool *pool;
1887
1888 pool = find_literal_pool (size);
1889
1890 if (pool == NULL)
1891 {
1892 /* Create a new pool. */
1893 pool = XNEW (literal_pool);
1894 if (!pool)
1895 return NULL;
1896
1897 /* Currently we always put the literal pool in the current text
1898 section. If we were generating "small" model code where we
1899 knew that all code and initialised data was within 1MB then
1900 we could output literals to mergeable, read-only data
1901 sections. */
1902
1903 pool->next_free_entry = 0;
1904 pool->section = now_seg;
1905 pool->sub_section = now_subseg;
1906 pool->size = size;
1907 pool->next = list_of_pools;
1908 pool->symbol = NULL;
1909
1910 /* Add it to the list. */
1911 list_of_pools = pool;
1912 }
1913
1914 /* New pools, and emptied pools, will have a NULL symbol. */
1915 if (pool->symbol == NULL)
1916 {
1917 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1918 &zero_address_frag, 0);
1919 pool->id = latest_pool_num++;
1920 }
1921
1922 /* Done. */
1923 return pool;
1924 }
1925
1926 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1927 Return TRUE on success, otherwise return FALSE. */
1928 static bool
1929 add_to_lit_pool (expressionS *exp, int size)
1930 {
1931 literal_pool *pool;
1932 unsigned int entry;
1933
1934 pool = find_or_make_literal_pool (size);
1935
1936 /* Check if this literal value is already in the pool. */
1937 for (entry = 0; entry < pool->next_free_entry; entry++)
1938 {
1939 expressionS * litexp = & pool->literals[entry].exp;
1940
1941 if ((litexp->X_op == exp->X_op)
1942 && (exp->X_op == O_constant)
1943 && (litexp->X_add_number == exp->X_add_number)
1944 && (litexp->X_unsigned == exp->X_unsigned))
1945 break;
1946
1947 if ((litexp->X_op == exp->X_op)
1948 && (exp->X_op == O_symbol)
1949 && (litexp->X_add_number == exp->X_add_number)
1950 && (litexp->X_add_symbol == exp->X_add_symbol)
1951 && (litexp->X_op_symbol == exp->X_op_symbol))
1952 break;
1953 }
1954
1955 /* Do we need to create a new entry? */
1956 if (entry == pool->next_free_entry)
1957 {
1958 if (entry >= MAX_LITERAL_POOL_SIZE)
1959 {
1960 set_syntax_error (_("literal pool overflow"));
1961 return false;
1962 }
1963
1964 pool->literals[entry].exp = *exp;
1965 pool->next_free_entry += 1;
1966 if (exp->X_op == O_big)
1967 {
1968 /* PR 16688: Bignums are held in a single global array. We must
1969 copy and preserve that value now, before it is overwritten. */
1970 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1971 exp->X_add_number);
1972 memcpy (pool->literals[entry].bignum, generic_bignum,
1973 CHARS_PER_LITTLENUM * exp->X_add_number);
1974 }
1975 else
1976 pool->literals[entry].bignum = NULL;
1977 }
1978
1979 exp->X_op = O_symbol;
1980 exp->X_add_number = ((int) entry) * size;
1981 exp->X_add_symbol = pool->symbol;
1982
1983 return true;
1984 }
1985
1986 /* Can't use symbol_new here, so have to create a symbol and then at
1987 a later date assign it a value. That's what these functions do. */
1988
1989 static void
1990 symbol_locate (symbolS * symbolP,
1991 const char *name,/* It is copied, the caller can modify. */
1992 segT segment, /* Segment identifier (SEG_<something>). */
1993 valueT valu, /* Symbol value. */
1994 fragS * frag) /* Associated fragment. */
1995 {
1996 size_t name_length;
1997 char *preserved_copy_of_name;
1998
1999 name_length = strlen (name) + 1; /* +1 for \0. */
2000 obstack_grow (&notes, name, name_length);
2001 preserved_copy_of_name = obstack_finish (&notes);
2002
2003 #ifdef tc_canonicalize_symbol_name
2004 preserved_copy_of_name =
2005 tc_canonicalize_symbol_name (preserved_copy_of_name);
2006 #endif
2007
2008 S_SET_NAME (symbolP, preserved_copy_of_name);
2009
2010 S_SET_SEGMENT (symbolP, segment);
2011 S_SET_VALUE (symbolP, valu);
2012 symbol_clear_list_pointers (symbolP);
2013
2014 symbol_set_frag (symbolP, frag);
2015
2016 /* Link to end of symbol chain. */
2017 {
2018 extern int symbol_table_frozen;
2019
2020 if (symbol_table_frozen)
2021 abort ();
2022 }
2023
2024 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2025
2026 obj_symbol_new_hook (symbolP);
2027
2028 #ifdef tc_symbol_new_hook
2029 tc_symbol_new_hook (symbolP);
2030 #endif
2031
2032 #ifdef DEBUG_SYMS
2033 verify_symbol_chain (symbol_rootP, symbol_lastP);
2034 #endif /* DEBUG_SYMS */
2035 }
2036
2037
2038 static void
2039 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2040 {
2041 unsigned int entry;
2042 literal_pool *pool;
2043 char sym_name[20];
2044 int align;
2045
2046 for (align = 2; align <= 4; align++)
2047 {
2048 int size = 1 << align;
2049
2050 pool = find_literal_pool (size);
2051 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2052 continue;
2053
2054 /* Align pool as you have word accesses.
2055 Only make a frag if we have to. */
2056 if (!need_pass_2)
2057 frag_align (align, 0, 0);
2058
2059 mapping_state (MAP_DATA);
2060
2061 record_alignment (now_seg, align);
2062
2063 sprintf (sym_name, "$$lit_\002%x", pool->id);
2064
2065 symbol_locate (pool->symbol, sym_name, now_seg,
2066 (valueT) frag_now_fix (), frag_now);
2067 symbol_table_insert (pool->symbol);
2068
2069 for (entry = 0; entry < pool->next_free_entry; entry++)
2070 {
2071 expressionS * exp = & pool->literals[entry].exp;
2072
2073 if (exp->X_op == O_big)
2074 {
2075 /* PR 16688: Restore the global bignum value. */
2076 gas_assert (pool->literals[entry].bignum != NULL);
2077 memcpy (generic_bignum, pool->literals[entry].bignum,
2078 CHARS_PER_LITTLENUM * exp->X_add_number);
2079 }
2080
2081 /* First output the expression in the instruction to the pool. */
2082 emit_expr (exp, size); /* .word|.xword */
2083
2084 if (exp->X_op == O_big)
2085 {
2086 free (pool->literals[entry].bignum);
2087 pool->literals[entry].bignum = NULL;
2088 }
2089 }
2090
2091 /* Mark the pool as empty. */
2092 pool->next_free_entry = 0;
2093 pool->symbol = NULL;
2094 }
2095 }
2096
2097 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2098 /* Forward declarations for functions below, in the MD interface
2099 section. */
2100 static struct reloc_table_entry * find_reloc_table_entry (char **);
2101
2102 /* Directives: Data. */
2103 /* N.B. the support for relocation suffix in this directive needs to be
2104 implemented properly. */
2105
2106 static void
2107 s_aarch64_cons (int nbytes)
2108 {
2109 expressionS exp;
2110
2111 #ifdef md_flush_pending_output
2112 md_flush_pending_output ();
2113 #endif
2114
2115 if (is_it_end_of_statement ())
2116 {
2117 demand_empty_rest_of_line ();
2118 return;
2119 }
2120
2121 #ifdef md_cons_align
2122 md_cons_align (nbytes);
2123 #endif
2124
2125 mapping_state (MAP_DATA);
2126 do
2127 {
2128 struct reloc_table_entry *reloc;
2129
2130 expression (&exp);
2131
2132 if (exp.X_op != O_symbol)
2133 emit_expr (&exp, (unsigned int) nbytes);
2134 else
2135 {
2136 skip_past_char (&input_line_pointer, '#');
2137 if (skip_past_char (&input_line_pointer, ':'))
2138 {
2139 reloc = find_reloc_table_entry (&input_line_pointer);
2140 if (reloc == NULL)
2141 as_bad (_("unrecognized relocation suffix"));
2142 else
2143 as_bad (_("unimplemented relocation suffix"));
2144 ignore_rest_of_line ();
2145 return;
2146 }
2147 else
2148 emit_expr (&exp, (unsigned int) nbytes);
2149 }
2150 }
2151 while (*input_line_pointer++ == ',');
2152
2153 /* Put terminator back into stream. */
2154 input_line_pointer--;
2155 demand_empty_rest_of_line ();
2156 }
2157 #endif
2158
2159 #ifdef OBJ_ELF
2160 /* Forward declarations for functions below, in the MD interface
2161 section. */
2162 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2163
2164 /* Mark symbol that it follows a variant PCS convention. */
2165
2166 static void
2167 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2168 {
2169 char *name;
2170 char c;
2171 symbolS *sym;
2172 asymbol *bfdsym;
2173 elf_symbol_type *elfsym;
2174
2175 c = get_symbol_name (&name);
2176 if (!*name)
2177 as_bad (_("Missing symbol name in directive"));
2178 sym = symbol_find_or_make (name);
2179 restore_line_pointer (c);
2180 demand_empty_rest_of_line ();
2181 bfdsym = symbol_get_bfdsym (sym);
2182 elfsym = elf_symbol_from (bfdsym);
2183 gas_assert (elfsym);
2184 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2185 }
2186 #endif /* OBJ_ELF */
2187
2188 /* Output a 32-bit word, but mark as an instruction. */
2189
2190 static void
2191 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2192 {
2193 expressionS exp;
2194 unsigned n = 0;
2195
2196 #ifdef md_flush_pending_output
2197 md_flush_pending_output ();
2198 #endif
2199
2200 if (is_it_end_of_statement ())
2201 {
2202 demand_empty_rest_of_line ();
2203 return;
2204 }
2205
2206 /* Sections are assumed to start aligned. In executable section, there is no
2207 MAP_DATA symbol pending. So we only align the address during
2208 MAP_DATA --> MAP_INSN transition.
2209 For other sections, this is not guaranteed. */
2210 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2211 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2212 frag_align_code (2, 0);
2213
2214 #ifdef OBJ_ELF
2215 mapping_state (MAP_INSN);
2216 #endif
2217
2218 do
2219 {
2220 expression (&exp);
2221 if (exp.X_op != O_constant)
2222 {
2223 as_bad (_("constant expression required"));
2224 ignore_rest_of_line ();
2225 return;
2226 }
2227
2228 if (target_big_endian)
2229 {
2230 unsigned int val = exp.X_add_number;
2231 exp.X_add_number = SWAP_32 (val);
2232 }
2233 emit_expr (&exp, INSN_SIZE);
2234 ++n;
2235 }
2236 while (*input_line_pointer++ == ',');
2237
2238 dwarf2_emit_insn (n * INSN_SIZE);
2239
2240 /* Put terminator back into stream. */
2241 input_line_pointer--;
2242 demand_empty_rest_of_line ();
2243 }
2244
2245 static void
2246 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2247 {
2248 demand_empty_rest_of_line ();
2249 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2250 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2251 }
2252
2253 #ifdef OBJ_ELF
2254 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2255
2256 static void
2257 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2258 {
2259 expressionS exp;
2260
2261 expression (&exp);
2262 frag_grow (4);
2263 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2264 BFD_RELOC_AARCH64_TLSDESC_ADD);
2265
2266 demand_empty_rest_of_line ();
2267 }
2268
2269 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2270
2271 static void
2272 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2273 {
2274 expressionS exp;
2275
2276 /* Since we're just labelling the code, there's no need to define a
2277 mapping symbol. */
2278 expression (&exp);
2279 /* Make sure there is enough room in this frag for the following
2280 blr. This trick only works if the blr follows immediately after
2281 the .tlsdesc directive. */
2282 frag_grow (4);
2283 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2284 BFD_RELOC_AARCH64_TLSDESC_CALL);
2285
2286 demand_empty_rest_of_line ();
2287 }
2288
2289 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2290
2291 static void
2292 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2293 {
2294 expressionS exp;
2295
2296 expression (&exp);
2297 frag_grow (4);
2298 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2299 BFD_RELOC_AARCH64_TLSDESC_LDR);
2300
2301 demand_empty_rest_of_line ();
2302 }
2303 #endif /* OBJ_ELF */
2304
2305 #ifdef TE_PE
2306 static void
2307 s_secrel (int dummy ATTRIBUTE_UNUSED)
2308 {
2309 expressionS exp;
2310
2311 do
2312 {
2313 expression (&exp);
2314 if (exp.X_op == O_symbol)
2315 exp.X_op = O_secrel;
2316
2317 emit_expr (&exp, 4);
2318 }
2319 while (*input_line_pointer++ == ',');
2320
2321 input_line_pointer--;
2322 demand_empty_rest_of_line ();
2323 }
2324
2325 void
2326 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2327 {
2328 expressionS exp;
2329
2330 exp.X_op = O_secrel;
2331 exp.X_add_symbol = symbol;
2332 exp.X_add_number = 0;
2333 emit_expr (&exp, size);
2334 }
2335
2336 static void
2337 s_secidx (int dummy ATTRIBUTE_UNUSED)
2338 {
2339 expressionS exp;
2340
2341 do
2342 {
2343 expression (&exp);
2344 if (exp.X_op == O_symbol)
2345 exp.X_op = O_secidx;
2346
2347 emit_expr (&exp, 2);
2348 }
2349 while (*input_line_pointer++ == ',');
2350
2351 input_line_pointer--;
2352 demand_empty_rest_of_line ();
2353 }
2354 #endif /* TE_PE */
2355
2356 static void s_aarch64_arch (int);
2357 static void s_aarch64_cpu (int);
2358 static void s_aarch64_arch_extension (int);
2359
2360 /* This table describes all the machine specific pseudo-ops the assembler
2361 has to support. The fields are:
2362 pseudo-op name without dot
2363 function to call to execute this pseudo-op
2364 Integer arg to pass to the function. */
2365
2366 const pseudo_typeS md_pseudo_table[] = {
2367 /* Never called because '.req' does not start a line. */
2368 {"req", s_req, 0},
2369 {"unreq", s_unreq, 0},
2370 {"bss", s_bss, 0},
2371 {"even", s_even, 0},
2372 {"ltorg", s_ltorg, 0},
2373 {"pool", s_ltorg, 0},
2374 {"cpu", s_aarch64_cpu, 0},
2375 {"arch", s_aarch64_arch, 0},
2376 {"arch_extension", s_aarch64_arch_extension, 0},
2377 {"inst", s_aarch64_inst, 0},
2378 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2379 #ifdef OBJ_ELF
2380 {"tlsdescadd", s_tlsdescadd, 0},
2381 {"tlsdesccall", s_tlsdesccall, 0},
2382 {"tlsdescldr", s_tlsdescldr, 0},
2383 {"variant_pcs", s_variant_pcs, 0},
2384 #endif
2385 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2386 {"word", s_aarch64_cons, 4},
2387 {"long", s_aarch64_cons, 4},
2388 {"xword", s_aarch64_cons, 8},
2389 {"dword", s_aarch64_cons, 8},
2390 #endif
2391 #ifdef TE_PE
2392 {"secrel32", s_secrel, 0},
2393 {"secidx", s_secidx, 0},
2394 #endif
2395 {"float16", float_cons, 'h'},
2396 {"bfloat16", float_cons, 'b'},
2397 {0, 0, 0}
2398 };
2399 \f
2400
2401 /* Check whether STR points to a register name followed by a comma or the
2402 end of line; REG_TYPE indicates which register types are checked
2403 against. Return TRUE if STR is such a register name; otherwise return
2404 FALSE. The function does not intend to produce any diagnostics, but since
2405 the register parser aarch64_reg_parse, which is called by this function,
2406 does produce diagnostics, we call clear_error to clear any diagnostics
2407 that may be generated by aarch64_reg_parse.
2408 Also, the function returns FALSE directly if there is any user error
2409 present at the function entry. This prevents the existing diagnostics
2410 state from being spoiled.
2411 The function currently serves parse_constant_immediate and
2412 parse_big_immediate only. */
2413 static bool
2414 reg_name_p (char *str, aarch64_reg_type reg_type)
2415 {
2416 const reg_entry *reg;
2417
2418 /* Prevent the diagnostics state from being spoiled. */
2419 if (error_p ())
2420 return false;
2421
2422 reg = aarch64_reg_parse (&str, reg_type, NULL);
2423
2424 /* Clear the parsing error that may be set by the reg parser. */
2425 clear_error ();
2426
2427 if (!reg)
2428 return false;
2429
2430 skip_whitespace (str);
2431 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2432 return true;
2433
2434 return false;
2435 }
2436
2437 /* Parser functions used exclusively in instruction operands. */
2438
2439 /* Parse an immediate expression which may not be constant.
2440
2441 To prevent the expression parser from pushing a register name
2442 into the symbol table as an undefined symbol, firstly a check is
2443 done to find out whether STR is a register of type REG_TYPE followed
2444 by a comma or the end of line. Return FALSE if STR is such a string. */
2445
2446 static bool
2447 parse_immediate_expression (char **str, expressionS *exp,
2448 aarch64_reg_type reg_type)
2449 {
2450 if (reg_name_p (*str, reg_type))
2451 {
2452 set_recoverable_error (_("immediate operand required"));
2453 return false;
2454 }
2455
2456 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2457
2458 if (exp->X_op == O_absent)
2459 {
2460 set_fatal_syntax_error (_("missing immediate expression"));
2461 return false;
2462 }
2463
2464 return true;
2465 }
2466
2467 /* Constant immediate-value read function for use in insn parsing.
2468 STR points to the beginning of the immediate (with the optional
2469 leading #); *VAL receives the value. REG_TYPE says which register
2470 names should be treated as registers rather than as symbolic immediates.
2471
2472 Return TRUE on success; otherwise return FALSE. */
2473
2474 static bool
2475 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2476 {
2477 expressionS exp;
2478
2479 if (! parse_immediate_expression (str, &exp, reg_type))
2480 return false;
2481
2482 if (exp.X_op != O_constant)
2483 {
2484 set_syntax_error (_("constant expression required"));
2485 return false;
2486 }
2487
2488 *val = exp.X_add_number;
2489 return true;
2490 }
2491
2492 static uint32_t
2493 encode_imm_float_bits (uint32_t imm)
2494 {
2495 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2496 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2497 }
2498
2499 /* Return TRUE if the single-precision floating-point value encoded in IMM
2500 can be expressed in the AArch64 8-bit signed floating-point format with
2501 3-bit exponent and normalized 4 bits of precision; in other words, the
2502 floating-point value must be expressable as
2503 (+/-) n / 16 * power (2, r)
2504 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2505
2506 static bool
2507 aarch64_imm_float_p (uint32_t imm)
2508 {
2509 /* If a single-precision floating-point value has the following bit
2510 pattern, it can be expressed in the AArch64 8-bit floating-point
2511 format:
2512
2513 3 32222222 2221111111111
2514 1 09876543 21098765432109876543210
2515 n Eeeeeexx xxxx0000000000000000000
2516
2517 where n, e and each x are either 0 or 1 independently, with
2518 E == ~ e. */
2519
2520 uint32_t pattern;
2521
2522 /* Prepare the pattern for 'Eeeeee'. */
2523 if (((imm >> 30) & 0x1) == 0)
2524 pattern = 0x3e000000;
2525 else
2526 pattern = 0x40000000;
2527
2528 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2529 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2530 }
2531
2532 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2533 as an IEEE float without any loss of precision. Store the value in
2534 *FPWORD if so. */
2535
2536 static bool
2537 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2538 {
2539 /* If a double-precision floating-point value has the following bit
2540 pattern, it can be expressed in a float:
2541
2542 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2543 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2544 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2545
2546 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2547 if Eeee_eeee != 1111_1111
2548
2549 where n, e, s and S are either 0 or 1 independently and where ~ is the
2550 inverse of E. */
2551
2552 uint32_t pattern;
2553 uint32_t high32 = imm >> 32;
2554 uint32_t low32 = imm;
2555
2556 /* Lower 29 bits need to be 0s. */
2557 if ((imm & 0x1fffffff) != 0)
2558 return false;
2559
2560 /* Prepare the pattern for 'Eeeeeeeee'. */
2561 if (((high32 >> 30) & 0x1) == 0)
2562 pattern = 0x38000000;
2563 else
2564 pattern = 0x40000000;
2565
2566 /* Check E~~~. */
2567 if ((high32 & 0x78000000) != pattern)
2568 return false;
2569
2570 /* Check Eeee_eeee != 1111_1111. */
2571 if ((high32 & 0x7ff00000) == 0x47f00000)
2572 return false;
2573
2574 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2575 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2576 | (low32 >> 29)); /* 3 S bits. */
2577 return true;
2578 }
2579
2580 /* Return true if we should treat OPERAND as a double-precision
2581 floating-point operand rather than a single-precision one. */
2582 static bool
2583 double_precision_operand_p (const aarch64_opnd_info *operand)
2584 {
2585 /* Check for unsuffixed SVE registers, which are allowed
2586 for LDR and STR but not in instructions that require an
2587 immediate. We get better error messages if we arbitrarily
2588 pick one size, parse the immediate normally, and then
2589 report the match failure in the normal way. */
2590 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2591 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2592 }
2593
2594 /* Parse a floating-point immediate. Return TRUE on success and return the
2595 value in *IMMED in the format of IEEE754 single-precision encoding.
2596 *CCP points to the start of the string; DP_P is TRUE when the immediate
2597 is expected to be in double-precision (N.B. this only matters when
2598 hexadecimal representation is involved). REG_TYPE says which register
2599 names should be treated as registers rather than as symbolic immediates.
2600
2601 This routine accepts any IEEE float; it is up to the callers to reject
2602 invalid ones. */
2603
2604 static bool
2605 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2606 aarch64_reg_type reg_type)
2607 {
2608 char *str = *ccp;
2609 char *fpnum;
2610 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2611 int64_t val = 0;
2612 unsigned fpword = 0;
2613 bool hex_p = false;
2614
2615 skip_past_char (&str, '#');
2616
2617 fpnum = str;
2618 skip_whitespace (fpnum);
2619
2620 if (startswith (fpnum, "0x"))
2621 {
2622 /* Support the hexadecimal representation of the IEEE754 encoding.
2623 Double-precision is expected when DP_P is TRUE, otherwise the
2624 representation should be in single-precision. */
2625 if (! parse_constant_immediate (&str, &val, reg_type))
2626 goto invalid_fp;
2627
2628 if (dp_p)
2629 {
2630 if (!can_convert_double_to_float (val, &fpword))
2631 goto invalid_fp;
2632 }
2633 else if ((uint64_t) val > 0xffffffff)
2634 goto invalid_fp;
2635 else
2636 fpword = val;
2637
2638 hex_p = true;
2639 }
2640 else if (reg_name_p (str, reg_type))
2641 {
2642 set_recoverable_error (_("immediate operand required"));
2643 return false;
2644 }
2645
2646 if (! hex_p)
2647 {
2648 int i;
2649
2650 if ((str = atof_ieee (str, 's', words)) == NULL)
2651 goto invalid_fp;
2652
2653 /* Our FP word must be 32 bits (single-precision FP). */
2654 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2655 {
2656 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2657 fpword |= words[i];
2658 }
2659 }
2660
2661 *immed = fpword;
2662 *ccp = str;
2663 return true;
2664
2665 invalid_fp:
2666 set_fatal_syntax_error (_("invalid floating-point constant"));
2667 return false;
2668 }
2669
2670 /* Less-generic immediate-value read function with the possibility of loading
2671 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2672 instructions.
2673
2674 To prevent the expression parser from pushing a register name into the
2675 symbol table as an undefined symbol, a check is firstly done to find
2676 out whether STR is a register of type REG_TYPE followed by a comma or
2677 the end of line. Return FALSE if STR is such a register. */
2678
2679 static bool
2680 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2681 {
2682 char *ptr = *str;
2683
2684 if (reg_name_p (ptr, reg_type))
2685 {
2686 set_syntax_error (_("immediate operand required"));
2687 return false;
2688 }
2689
2690 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2691
2692 if (inst.reloc.exp.X_op == O_constant)
2693 *imm = inst.reloc.exp.X_add_number;
2694
2695 *str = ptr;
2696
2697 return true;
2698 }
2699
2700 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2701 if NEED_LIBOPCODES is non-zero, the fixup will need
2702 assistance from the libopcodes. */
2703
2704 static inline void
2705 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2706 const aarch64_opnd_info *operand,
2707 int need_libopcodes_p)
2708 {
2709 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2710 reloc->opnd = operand->type;
2711 if (need_libopcodes_p)
2712 reloc->need_libopcodes_p = 1;
2713 };
2714
2715 /* Return TRUE if the instruction needs to be fixed up later internally by
2716 the GAS; otherwise return FALSE. */
2717
2718 static inline bool
2719 aarch64_gas_internal_fixup_p (void)
2720 {
2721 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2722 }
2723
2724 /* Assign the immediate value to the relevant field in *OPERAND if
2725 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2726 needs an internal fixup in a later stage.
2727 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2728 IMM.VALUE that may get assigned with the constant. */
2729 static inline void
2730 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2731 aarch64_opnd_info *operand,
2732 int addr_off_p,
2733 int need_libopcodes_p,
2734 int skip_p)
2735 {
2736 if (reloc->exp.X_op == O_constant)
2737 {
2738 if (addr_off_p)
2739 operand->addr.offset.imm = reloc->exp.X_add_number;
2740 else
2741 operand->imm.value = reloc->exp.X_add_number;
2742 reloc->type = BFD_RELOC_UNUSED;
2743 }
2744 else
2745 {
2746 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2747 /* Tell libopcodes to ignore this operand or not. This is helpful
2748 when one of the operands needs to be fixed up later but we need
2749 libopcodes to check the other operands. */
2750 operand->skip = skip_p;
2751 }
2752 }
2753
2754 /* Relocation modifiers. Each entry in the table contains the textual
2755 name for the relocation which may be placed before a symbol used as
2756 a load/store offset, or add immediate. It must be surrounded by a
2757 leading and trailing colon, for example:
2758
2759 ldr x0, [x1, #:rello:varsym]
2760 add x0, x1, #:rello:varsym */
2761
2762 struct reloc_table_entry
2763 {
2764 const char *name;
2765 int pc_rel;
2766 bfd_reloc_code_real_type adr_type;
2767 bfd_reloc_code_real_type adrp_type;
2768 bfd_reloc_code_real_type movw_type;
2769 bfd_reloc_code_real_type add_type;
2770 bfd_reloc_code_real_type ldst_type;
2771 bfd_reloc_code_real_type ld_literal_type;
2772 };
2773
2774 static struct reloc_table_entry reloc_table[] =
2775 {
2776 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2777 {"lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_ADD_LO12,
2782 BFD_RELOC_AARCH64_LDST_LO12,
2783 0},
2784
2785 /* Higher 21 bits of pc-relative page offset: ADRP */
2786 {"pg_hi21", 1,
2787 0, /* adr_type */
2788 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2789 0,
2790 0,
2791 0,
2792 0},
2793
2794 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2795 {"pg_hi21_nc", 1,
2796 0, /* adr_type */
2797 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2798 0,
2799 0,
2800 0,
2801 0},
2802
2803 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2804 {"abs_g0", 0,
2805 0, /* adr_type */
2806 0,
2807 BFD_RELOC_AARCH64_MOVW_G0,
2808 0,
2809 0,
2810 0},
2811
2812 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2813 {"abs_g0_s", 0,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_MOVW_G0_S,
2817 0,
2818 0,
2819 0},
2820
2821 /* Less significant bits 0-15 of address/value: MOVK, no check */
2822 {"abs_g0_nc", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_G0_NC,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2831 {"abs_g1", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_G1,
2835 0,
2836 0,
2837 0},
2838
2839 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2840 {"abs_g1_s", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_MOVW_G1_S,
2844 0,
2845 0,
2846 0},
2847
2848 /* Less significant bits 16-31 of address/value: MOVK, no check */
2849 {"abs_g1_nc", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_MOVW_G1_NC,
2853 0,
2854 0,
2855 0},
2856
2857 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2858 {"abs_g2", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_G2,
2862 0,
2863 0,
2864 0},
2865
2866 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2867 {"abs_g2_s", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_G2_S,
2871 0,
2872 0,
2873 0},
2874
2875 /* Less significant bits 32-47 of address/value: MOVK, no check */
2876 {"abs_g2_nc", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_MOVW_G2_NC,
2880 0,
2881 0,
2882 0},
2883
2884 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2885 {"abs_g3", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_MOVW_G3,
2889 0,
2890 0,
2891 0},
2892
2893 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2894 {"prel_g0", 1,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2898 0,
2899 0,
2900 0},
2901
2902 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2903 {"prel_g0_nc", 1,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2907 0,
2908 0,
2909 0},
2910
2911 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2912 {"prel_g1", 1,
2913 0, /* adr_type */
2914 0,
2915 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2916 0,
2917 0,
2918 0},
2919
2920 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2921 {"prel_g1_nc", 1,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2930 {"prel_g2", 1,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2939 {"prel_g2_nc", 1,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2948 {"prel_g3", 1,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2952 0,
2953 0,
2954 0},
2955
2956 /* Get to the page containing GOT entry for a symbol. */
2957 {"got", 1,
2958 0, /* adr_type */
2959 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2960 0,
2961 0,
2962 0,
2963 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2964
2965 /* 12 bit offset into the page containing GOT entry for that symbol. */
2966 {"got_lo12", 0,
2967 0, /* adr_type */
2968 0,
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2972 0},
2973
2974 /* 0-15 bits of address/value: MOVk, no check. */
2975 {"gotoff_g0_nc", 0,
2976 0, /* adr_type */
2977 0,
2978 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2979 0,
2980 0,
2981 0},
2982
2983 /* Most significant bits 16-31 of address/value: MOVZ. */
2984 {"gotoff_g1", 0,
2985 0, /* adr_type */
2986 0,
2987 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2988 0,
2989 0,
2990 0},
2991
2992 /* 15 bit offset into the page containing GOT entry for that symbol. */
2993 {"gotoff_lo15", 0,
2994 0, /* adr_type */
2995 0,
2996 0,
2997 0,
2998 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2999 0},
3000
3001 /* Get to the page containing GOT TLS entry for a symbol */
3002 {"gottprel_g0_nc", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3006 0,
3007 0,
3008 0},
3009
3010 /* Get to the page containing GOT TLS entry for a symbol */
3011 {"gottprel_g1", 0,
3012 0, /* adr_type */
3013 0,
3014 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3015 0,
3016 0,
3017 0},
3018
3019 /* Get to the page containing GOT TLS entry for a symbol */
3020 {"tlsgd", 0,
3021 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3022 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3023 0,
3024 0,
3025 0,
3026 0},
3027
3028 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3029 {"tlsgd_lo12", 0,
3030 0, /* adr_type */
3031 0,
3032 0,
3033 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3034 0,
3035 0},
3036
3037 /* Lower 16 bits address/value: MOVk. */
3038 {"tlsgd_g0_nc", 0,
3039 0, /* adr_type */
3040 0,
3041 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3042 0,
3043 0,
3044 0},
3045
3046 /* Most significant bits 16-31 of address/value: MOVZ. */
3047 {"tlsgd_g1", 0,
3048 0, /* adr_type */
3049 0,
3050 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3051 0,
3052 0,
3053 0},
3054
3055 /* Get to the page containing GOT TLS entry for a symbol */
3056 {"tlsdesc", 0,
3057 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3058 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3059 0,
3060 0,
3061 0,
3062 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3063
3064 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3065 {"tlsdesc_lo12", 0,
3066 0, /* adr_type */
3067 0,
3068 0,
3069 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3070 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3071 0},
3072
3073 /* Get to the page containing GOT TLS entry for a symbol.
3074 The same as GD, we allocate two consecutive GOT slots
3075 for module index and module offset, the only difference
3076 with GD is the module offset should be initialized to
3077 zero without any outstanding runtime relocation. */
3078 {"tlsldm", 0,
3079 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3080 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3081 0,
3082 0,
3083 0,
3084 0},
3085
3086 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3087 {"tlsldm_lo12_nc", 0,
3088 0, /* adr_type */
3089 0,
3090 0,
3091 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3092 0,
3093 0},
3094
3095 /* 12 bit offset into the module TLS base address. */
3096 {"dtprel_lo12", 0,
3097 0, /* adr_type */
3098 0,
3099 0,
3100 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3101 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3102 0},
3103
3104 /* Same as dtprel_lo12, no overflow check. */
3105 {"dtprel_lo12_nc", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3110 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3111 0},
3112
3113 /* bits[23:12] of offset to the module TLS base address. */
3114 {"dtprel_hi12", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3119 0,
3120 0},
3121
3122 /* bits[15:0] of offset to the module TLS base address. */
3123 {"dtprel_g0", 0,
3124 0, /* adr_type */
3125 0,
3126 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3127 0,
3128 0,
3129 0},
3130
3131 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3132 {"dtprel_g0_nc", 0,
3133 0, /* adr_type */
3134 0,
3135 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3136 0,
3137 0,
3138 0},
3139
3140 /* bits[31:16] of offset to the module TLS base address. */
3141 {"dtprel_g1", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3145 0,
3146 0,
3147 0},
3148
3149 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3150 {"dtprel_g1_nc", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3154 0,
3155 0,
3156 0},
3157
3158 /* bits[47:32] of offset to the module TLS base address. */
3159 {"dtprel_g2", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3163 0,
3164 0,
3165 0},
3166
3167 /* Lower 16 bit offset into GOT entry for a symbol */
3168 {"tlsdesc_off_g0_nc", 0,
3169 0, /* adr_type */
3170 0,
3171 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3172 0,
3173 0,
3174 0},
3175
3176 /* Higher 16 bit offset into GOT entry for a symbol */
3177 {"tlsdesc_off_g1", 0,
3178 0, /* adr_type */
3179 0,
3180 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3181 0,
3182 0,
3183 0},
3184
3185 /* Get to the page containing GOT TLS entry for a symbol */
3186 {"gottprel", 0,
3187 0, /* adr_type */
3188 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3189 0,
3190 0,
3191 0,
3192 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3193
3194 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3195 {"gottprel_lo12", 0,
3196 0, /* adr_type */
3197 0,
3198 0,
3199 0,
3200 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3201 0},
3202
3203 /* Get tp offset for a symbol. */
3204 {"tprel", 0,
3205 0, /* adr_type */
3206 0,
3207 0,
3208 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3209 0,
3210 0},
3211
3212 /* Get tp offset for a symbol. */
3213 {"tprel_lo12", 0,
3214 0, /* adr_type */
3215 0,
3216 0,
3217 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3218 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3219 0},
3220
3221 /* Get tp offset for a symbol. */
3222 {"tprel_hi12", 0,
3223 0, /* adr_type */
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3227 0,
3228 0},
3229
3230 /* Get tp offset for a symbol. */
3231 {"tprel_lo12_nc", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3236 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3237 0},
3238
3239 /* Most significant bits 32-47 of address/value: MOVZ. */
3240 {"tprel_g2", 0,
3241 0, /* adr_type */
3242 0,
3243 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3244 0,
3245 0,
3246 0},
3247
3248 /* Most significant bits 16-31 of address/value: MOVZ. */
3249 {"tprel_g1", 0,
3250 0, /* adr_type */
3251 0,
3252 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3253 0,
3254 0,
3255 0},
3256
3257 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3258 {"tprel_g1_nc", 0,
3259 0, /* adr_type */
3260 0,
3261 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3262 0,
3263 0,
3264 0},
3265
3266 /* Most significant bits 0-15 of address/value: MOVZ. */
3267 {"tprel_g0", 0,
3268 0, /* adr_type */
3269 0,
3270 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3271 0,
3272 0,
3273 0},
3274
3275 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3276 {"tprel_g0_nc", 0,
3277 0, /* adr_type */
3278 0,
3279 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3280 0,
3281 0,
3282 0},
3283
3284 /* 15bit offset from got entry to base address of GOT table. */
3285 {"gotpage_lo15", 0,
3286 0,
3287 0,
3288 0,
3289 0,
3290 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3291 0},
3292
3293 /* 14bit offset from got entry to base address of GOT table. */
3294 {"gotpage_lo14", 0,
3295 0,
3296 0,
3297 0,
3298 0,
3299 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3300 0},
3301 };
3302
3303 /* Given the address of a pointer pointing to the textual name of a
3304 relocation as may appear in assembler source, attempt to find its
3305 details in reloc_table. The pointer will be updated to the character
3306 after the trailing colon. On failure, NULL will be returned;
3307 otherwise return the reloc_table_entry. */
3308
3309 static struct reloc_table_entry *
3310 find_reloc_table_entry (char **str)
3311 {
3312 unsigned int i;
3313 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3314 {
3315 int length = strlen (reloc_table[i].name);
3316
3317 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3318 && (*str)[length] == ':')
3319 {
3320 *str += (length + 1);
3321 return &reloc_table[i];
3322 }
3323 }
3324
3325 return NULL;
3326 }
3327
3328 /* Returns 0 if the relocation should never be forced,
3329 1 if the relocation must be forced, and -1 if either
3330 result is OK. */
3331
3332 static signed int
3333 aarch64_force_reloc (unsigned int type)
3334 {
3335 switch (type)
3336 {
3337 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3338 /* Perform these "immediate" internal relocations
3339 even if the symbol is extern or weak. */
3340 return 0;
3341
3342 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3343 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3344 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3345 /* Pseudo relocs that need to be fixed up according to
3346 ilp32_p. */
3347 return 1;
3348
3349 case BFD_RELOC_AARCH64_ADD_LO12:
3350 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3351 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3352 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3353 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3354 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3355 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3356 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3357 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3358 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3359 case BFD_RELOC_AARCH64_LDST128_LO12:
3360 case BFD_RELOC_AARCH64_LDST16_LO12:
3361 case BFD_RELOC_AARCH64_LDST32_LO12:
3362 case BFD_RELOC_AARCH64_LDST64_LO12:
3363 case BFD_RELOC_AARCH64_LDST8_LO12:
3364 case BFD_RELOC_AARCH64_LDST_LO12:
3365 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3366 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3367 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3368 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3369 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3370 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3371 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3372 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3373 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3374 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3375 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3376 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3377 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3378 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3379 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3380 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3381 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3382 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3383 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3384 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3385 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3386 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3387 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3388 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3389 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3390 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3391 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3392 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3393 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3394 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3395 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3396 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3397 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3399 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3400 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3401 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3402 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3403 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3404 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3405 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3406 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3407 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3408 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3409 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3410 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3411 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3412 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3413 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3414 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3415 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3416 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3417 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3418 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3419 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3420 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3421 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3422 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3423 /* Always leave these relocations for the linker. */
3424 return 1;
3425
3426 default:
3427 return -1;
3428 }
3429 }
3430
3431 int
3432 aarch64_force_relocation (struct fix *fixp)
3433 {
3434 int res = aarch64_force_reloc (fixp->fx_r_type);
3435
3436 if (res == -1)
3437 return generic_force_reloc (fixp);
3438 return res;
3439 }
3440
3441 /* Mode argument to parse_shift and parser_shifter_operand. */
3442 enum parse_shift_mode
3443 {
3444 SHIFTED_NONE, /* no shifter allowed */
3445 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3446 "#imm{,lsl #n}" */
3447 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3448 "#imm" */
3449 SHIFTED_LSL, /* bare "lsl #n" */
3450 SHIFTED_MUL, /* bare "mul #n" */
3451 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3452 SHIFTED_MUL_VL, /* "mul vl" */
3453 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3454 };
3455
3456 /* Parse a <shift> operator on an AArch64 data processing instruction.
3457 Return TRUE on success; otherwise return FALSE. */
3458 static bool
3459 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3460 {
3461 const struct aarch64_name_value_pair *shift_op;
3462 enum aarch64_modifier_kind kind;
3463 expressionS exp;
3464 int exp_has_prefix;
3465 char *s = *str;
3466 char *p = s;
3467
3468 for (p = *str; ISALPHA (*p); p++)
3469 ;
3470
3471 if (p == *str)
3472 {
3473 set_syntax_error (_("shift expression expected"));
3474 return false;
3475 }
3476
3477 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3478
3479 if (shift_op == NULL)
3480 {
3481 set_syntax_error (_("shift operator expected"));
3482 return false;
3483 }
3484
3485 kind = aarch64_get_operand_modifier (shift_op);
3486
3487 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3488 {
3489 set_syntax_error (_("invalid use of 'MSL'"));
3490 return false;
3491 }
3492
3493 if (kind == AARCH64_MOD_MUL
3494 && mode != SHIFTED_MUL
3495 && mode != SHIFTED_MUL_VL)
3496 {
3497 set_syntax_error (_("invalid use of 'MUL'"));
3498 return false;
3499 }
3500
3501 switch (mode)
3502 {
3503 case SHIFTED_LOGIC_IMM:
3504 if (aarch64_extend_operator_p (kind))
3505 {
3506 set_syntax_error (_("extending shift is not permitted"));
3507 return false;
3508 }
3509 break;
3510
3511 case SHIFTED_ARITH_IMM:
3512 if (kind == AARCH64_MOD_ROR)
3513 {
3514 set_syntax_error (_("'ROR' shift is not permitted"));
3515 return false;
3516 }
3517 break;
3518
3519 case SHIFTED_LSL:
3520 if (kind != AARCH64_MOD_LSL)
3521 {
3522 set_syntax_error (_("only 'LSL' shift is permitted"));
3523 return false;
3524 }
3525 break;
3526
3527 case SHIFTED_MUL:
3528 if (kind != AARCH64_MOD_MUL)
3529 {
3530 set_syntax_error (_("only 'MUL' is permitted"));
3531 return false;
3532 }
3533 break;
3534
3535 case SHIFTED_MUL_VL:
3536 /* "MUL VL" consists of two separate tokens. Require the first
3537 token to be "MUL" and look for a following "VL". */
3538 if (kind == AARCH64_MOD_MUL)
3539 {
3540 skip_whitespace (p);
3541 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3542 {
3543 p += 2;
3544 kind = AARCH64_MOD_MUL_VL;
3545 break;
3546 }
3547 }
3548 set_syntax_error (_("only 'MUL VL' is permitted"));
3549 return false;
3550
3551 case SHIFTED_REG_OFFSET:
3552 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3553 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3554 {
3555 set_fatal_syntax_error
3556 (_("invalid shift for the register offset addressing mode"));
3557 return false;
3558 }
3559 break;
3560
3561 case SHIFTED_LSL_MSL:
3562 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3563 {
3564 set_syntax_error (_("invalid shift operator"));
3565 return false;
3566 }
3567 break;
3568
3569 default:
3570 abort ();
3571 }
3572
3573 /* Whitespace can appear here if the next thing is a bare digit. */
3574 skip_whitespace (p);
3575
3576 /* Parse shift amount. */
3577 exp_has_prefix = 0;
3578 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3579 exp.X_op = O_absent;
3580 else
3581 {
3582 if (is_immediate_prefix (*p))
3583 {
3584 p++;
3585 exp_has_prefix = 1;
3586 }
3587 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3588 }
3589 if (kind == AARCH64_MOD_MUL_VL)
3590 /* For consistency, give MUL VL the same shift amount as an implicit
3591 MUL #1. */
3592 operand->shifter.amount = 1;
3593 else if (exp.X_op == O_absent)
3594 {
3595 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3596 {
3597 set_syntax_error (_("missing shift amount"));
3598 return false;
3599 }
3600 operand->shifter.amount = 0;
3601 }
3602 else if (exp.X_op != O_constant)
3603 {
3604 set_syntax_error (_("constant shift amount required"));
3605 return false;
3606 }
3607 /* For parsing purposes, MUL #n has no inherent range. The range
3608 depends on the operand and will be checked by operand-specific
3609 routines. */
3610 else if (kind != AARCH64_MOD_MUL
3611 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3612 {
3613 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3614 return false;
3615 }
3616 else
3617 {
3618 operand->shifter.amount = exp.X_add_number;
3619 operand->shifter.amount_present = 1;
3620 }
3621
3622 operand->shifter.operator_present = 1;
3623 operand->shifter.kind = kind;
3624
3625 *str = p;
3626 return true;
3627 }
3628
3629 /* Parse a <shifter_operand> for a data processing instruction:
3630
3631 #<immediate>
3632 #<immediate>, LSL #imm
3633
3634 Validation of immediate operands is deferred to md_apply_fix.
3635
3636 Return TRUE on success; otherwise return FALSE. */
3637
3638 static bool
3639 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3640 enum parse_shift_mode mode)
3641 {
3642 char *p;
3643
3644 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3645 return false;
3646
3647 p = *str;
3648
3649 /* Accept an immediate expression. */
3650 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3651 REJECT_ABSENT))
3652 return false;
3653
3654 /* Accept optional LSL for arithmetic immediate values. */
3655 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3656 if (! parse_shift (&p, operand, SHIFTED_LSL))
3657 return false;
3658
3659 /* Not accept any shifter for logical immediate values. */
3660 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3661 && parse_shift (&p, operand, mode))
3662 {
3663 set_syntax_error (_("unexpected shift operator"));
3664 return false;
3665 }
3666
3667 *str = p;
3668 return true;
3669 }
3670
3671 /* Parse a <shifter_operand> for a data processing instruction:
3672
3673 <Rm>
3674 <Rm>, <shift>
3675 #<immediate>
3676 #<immediate>, LSL #imm
3677
3678 where <shift> is handled by parse_shift above, and the last two
3679 cases are handled by the function above.
3680
3681 Validation of immediate operands is deferred to md_apply_fix.
3682
3683 Return TRUE on success; otherwise return FALSE. */
3684
3685 static bool
3686 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3687 enum parse_shift_mode mode)
3688 {
3689 const reg_entry *reg;
3690 aarch64_opnd_qualifier_t qualifier;
3691 enum aarch64_operand_class opd_class
3692 = aarch64_get_operand_class (operand->type);
3693
3694 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3695 if (reg)
3696 {
3697 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3698 {
3699 set_syntax_error (_("unexpected register in the immediate operand"));
3700 return false;
3701 }
3702
3703 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3704 {
3705 set_expected_reg_error (REG_TYPE_R_Z, reg, 0);
3706 return false;
3707 }
3708
3709 operand->reg.regno = reg->number;
3710 operand->qualifier = qualifier;
3711
3712 /* Accept optional shift operation on register. */
3713 if (! skip_past_comma (str))
3714 return true;
3715
3716 if (! parse_shift (str, operand, mode))
3717 return false;
3718
3719 return true;
3720 }
3721 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3722 {
3723 set_syntax_error
3724 (_("integer register expected in the extended/shifted operand "
3725 "register"));
3726 return false;
3727 }
3728
3729 /* We have a shifted immediate variable. */
3730 return parse_shifter_operand_imm (str, operand, mode);
3731 }
3732
3733 /* Return TRUE on success; return FALSE otherwise. */
3734
3735 static bool
3736 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3737 enum parse_shift_mode mode)
3738 {
3739 char *p = *str;
3740
3741 /* Determine if we have the sequence of characters #: or just :
3742 coming next. If we do, then we check for a :rello: relocation
3743 modifier. If we don't, punt the whole lot to
3744 parse_shifter_operand. */
3745
3746 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3747 {
3748 struct reloc_table_entry *entry;
3749
3750 if (p[0] == '#')
3751 p += 2;
3752 else
3753 p++;
3754 *str = p;
3755
3756 /* Try to parse a relocation. Anything else is an error. */
3757 if (!(entry = find_reloc_table_entry (str)))
3758 {
3759 set_syntax_error (_("unknown relocation modifier"));
3760 return false;
3761 }
3762
3763 if (entry->add_type == 0)
3764 {
3765 set_syntax_error
3766 (_("this relocation modifier is not allowed on this instruction"));
3767 return false;
3768 }
3769
3770 /* Save str before we decompose it. */
3771 p = *str;
3772
3773 /* Next, we parse the expression. */
3774 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3775 REJECT_ABSENT))
3776 return false;
3777
3778 /* Record the relocation type (use the ADD variant here). */
3779 inst.reloc.type = entry->add_type;
3780 inst.reloc.pc_rel = entry->pc_rel;
3781
3782 /* If str is empty, we've reached the end, stop here. */
3783 if (**str == '\0')
3784 return true;
3785
3786 /* Otherwise, we have a shifted reloc modifier, so rewind to
3787 recover the variable name and continue parsing for the shifter. */
3788 *str = p;
3789 return parse_shifter_operand_imm (str, operand, mode);
3790 }
3791
3792 return parse_shifter_operand (str, operand, mode);
3793 }
3794
3795 /* Parse all forms of an address expression. Information is written
3796 to *OPERAND and/or inst.reloc.
3797
3798 The A64 instruction set has the following addressing modes:
3799
3800 Offset
3801 [base] // in SIMD ld/st structure
3802 [base{,#0}] // in ld/st exclusive
3803 [base{,#imm}]
3804 [base,Xm{,LSL #imm}]
3805 [base,Xm,SXTX {#imm}]
3806 [base,Wm,(S|U)XTW {#imm}]
3807 Pre-indexed
3808 [base]! // in ldraa/ldrab exclusive
3809 [base,#imm]!
3810 Post-indexed
3811 [base],#imm
3812 [base],Xm // in SIMD ld/st structure
3813 PC-relative (literal)
3814 label
3815 SVE:
3816 [base,#imm,MUL VL]
3817 [base,Zm.D{,LSL #imm}]
3818 [base,Zm.S,(S|U)XTW {#imm}]
3819 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3820 [Zn.S,#imm]
3821 [Zn.D,#imm]
3822 [Zn.S{, Xm}]
3823 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3824 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3825 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3826
3827 (As a convenience, the notation "=immediate" is permitted in conjunction
3828 with the pc-relative literal load instructions to automatically place an
3829 immediate value or symbolic address in a nearby literal pool and generate
3830 a hidden label which references it.)
3831
3832 Upon a successful parsing, the address structure in *OPERAND will be
3833 filled in the following way:
3834
3835 .base_regno = <base>
3836 .offset.is_reg // 1 if the offset is a register
3837 .offset.imm = <imm>
3838 .offset.regno = <Rm>
3839
3840 For different addressing modes defined in the A64 ISA:
3841
3842 Offset
3843 .pcrel=0; .preind=1; .postind=0; .writeback=0
3844 Pre-indexed
3845 .pcrel=0; .preind=1; .postind=0; .writeback=1
3846 Post-indexed
3847 .pcrel=0; .preind=0; .postind=1; .writeback=1
3848 PC-relative (literal)
3849 .pcrel=1; .preind=1; .postind=0; .writeback=0
3850
3851 The shift/extension information, if any, will be stored in .shifter.
3852 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3853 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3854 corresponding register.
3855
3856 BASE_TYPE says which types of base register should be accepted and
3857 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3858 is the type of shifter that is allowed for immediate offsets,
3859 or SHIFTED_NONE if none.
3860
3861 In all other respects, it is the caller's responsibility to check
3862 for addressing modes not supported by the instruction, and to set
3863 inst.reloc.type. */
3864
3865 static bool
3866 parse_address_main (char **str, aarch64_opnd_info *operand,
3867 aarch64_opnd_qualifier_t *base_qualifier,
3868 aarch64_opnd_qualifier_t *offset_qualifier,
3869 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3870 enum parse_shift_mode imm_shift_mode)
3871 {
3872 char *p = *str;
3873 const reg_entry *reg;
3874 expressionS *exp = &inst.reloc.exp;
3875
3876 *base_qualifier = AARCH64_OPND_QLF_NIL;
3877 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3878 if (! skip_past_char (&p, '['))
3879 {
3880 /* =immediate or label. */
3881 operand->addr.pcrel = 1;
3882 operand->addr.preind = 1;
3883
3884 /* #:<reloc_op>:<symbol> */
3885 skip_past_char (&p, '#');
3886 if (skip_past_char (&p, ':'))
3887 {
3888 bfd_reloc_code_real_type ty;
3889 struct reloc_table_entry *entry;
3890
3891 /* Try to parse a relocation modifier. Anything else is
3892 an error. */
3893 entry = find_reloc_table_entry (&p);
3894 if (! entry)
3895 {
3896 set_syntax_error (_("unknown relocation modifier"));
3897 return false;
3898 }
3899
3900 switch (operand->type)
3901 {
3902 case AARCH64_OPND_ADDR_PCREL21:
3903 /* adr */
3904 ty = entry->adr_type;
3905 break;
3906
3907 default:
3908 ty = entry->ld_literal_type;
3909 break;
3910 }
3911
3912 if (ty == 0)
3913 {
3914 set_syntax_error
3915 (_("this relocation modifier is not allowed on this "
3916 "instruction"));
3917 return false;
3918 }
3919
3920 /* #:<reloc_op>: */
3921 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3922 {
3923 set_syntax_error (_("invalid relocation expression"));
3924 return false;
3925 }
3926 /* #:<reloc_op>:<expr> */
3927 /* Record the relocation type. */
3928 inst.reloc.type = ty;
3929 inst.reloc.pc_rel = entry->pc_rel;
3930 }
3931 else
3932 {
3933 if (skip_past_char (&p, '='))
3934 /* =immediate; need to generate the literal in the literal pool. */
3935 inst.gen_lit_pool = 1;
3936
3937 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3938 {
3939 set_syntax_error (_("invalid address"));
3940 return false;
3941 }
3942 }
3943
3944 *str = p;
3945 return true;
3946 }
3947
3948 /* [ */
3949
3950 bool alpha_base_p = ISALPHA (*p);
3951 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3952 if (!reg || !aarch64_check_reg_type (reg, base_type))
3953 {
3954 if (reg
3955 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3956 && *base_qualifier == AARCH64_OPND_QLF_W)
3957 set_syntax_error (_("expected a 64-bit base register"));
3958 else if (alpha_base_p)
3959 set_syntax_error (_("invalid base register"));
3960 else
3961 set_syntax_error (_("expected a base register"));
3962 return false;
3963 }
3964 operand->addr.base_regno = reg->number;
3965
3966 /* [Xn */
3967 if (skip_past_comma (&p))
3968 {
3969 /* [Xn, */
3970 operand->addr.preind = 1;
3971
3972 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3973 if (reg)
3974 {
3975 if (!aarch64_check_reg_type (reg, offset_type))
3976 {
3977 set_syntax_error (_("invalid offset register"));
3978 return false;
3979 }
3980
3981 /* [Xn,Rm */
3982 operand->addr.offset.regno = reg->number;
3983 operand->addr.offset.is_reg = 1;
3984 /* Shifted index. */
3985 if (skip_past_comma (&p))
3986 {
3987 /* [Xn,Rm, */
3988 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3989 /* Use the diagnostics set in parse_shift, so not set new
3990 error message here. */
3991 return false;
3992 }
3993 /* We only accept:
3994 [base,Xm] # For vector plus scalar SVE2 indexing.
3995 [base,Xm{,LSL #imm}]
3996 [base,Xm,SXTX {#imm}]
3997 [base,Wm,(S|U)XTW {#imm}] */
3998 if (operand->shifter.kind == AARCH64_MOD_NONE
3999 || operand->shifter.kind == AARCH64_MOD_LSL
4000 || operand->shifter.kind == AARCH64_MOD_SXTX)
4001 {
4002 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4003 {
4004 set_syntax_error (_("invalid use of 32-bit register offset"));
4005 return false;
4006 }
4007 if (aarch64_get_qualifier_esize (*base_qualifier)
4008 != aarch64_get_qualifier_esize (*offset_qualifier)
4009 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4010 || *base_qualifier != AARCH64_OPND_QLF_S_S
4011 || *offset_qualifier != AARCH64_OPND_QLF_X))
4012 {
4013 set_syntax_error (_("offset has different size from base"));
4014 return false;
4015 }
4016 }
4017 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4018 {
4019 set_syntax_error (_("invalid use of 64-bit register offset"));
4020 return false;
4021 }
4022 }
4023 else
4024 {
4025 /* [Xn,#:<reloc_op>:<symbol> */
4026 skip_past_char (&p, '#');
4027 if (skip_past_char (&p, ':'))
4028 {
4029 struct reloc_table_entry *entry;
4030
4031 /* Try to parse a relocation modifier. Anything else is
4032 an error. */
4033 if (!(entry = find_reloc_table_entry (&p)))
4034 {
4035 set_syntax_error (_("unknown relocation modifier"));
4036 return false;
4037 }
4038
4039 if (entry->ldst_type == 0)
4040 {
4041 set_syntax_error
4042 (_("this relocation modifier is not allowed on this "
4043 "instruction"));
4044 return false;
4045 }
4046
4047 /* [Xn,#:<reloc_op>: */
4048 /* We now have the group relocation table entry corresponding to
4049 the name in the assembler source. Next, we parse the
4050 expression. */
4051 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4052 {
4053 set_syntax_error (_("invalid relocation expression"));
4054 return false;
4055 }
4056
4057 /* [Xn,#:<reloc_op>:<expr> */
4058 /* Record the load/store relocation type. */
4059 inst.reloc.type = entry->ldst_type;
4060 inst.reloc.pc_rel = entry->pc_rel;
4061 }
4062 else
4063 {
4064 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4065 {
4066 set_syntax_error (_("invalid expression in the address"));
4067 return false;
4068 }
4069 /* [Xn,<expr> */
4070 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4071 /* [Xn,<expr>,<shifter> */
4072 if (! parse_shift (&p, operand, imm_shift_mode))
4073 return false;
4074 }
4075 }
4076 }
4077
4078 if (! skip_past_char (&p, ']'))
4079 {
4080 set_syntax_error (_("']' expected"));
4081 return false;
4082 }
4083
4084 if (skip_past_char (&p, '!'))
4085 {
4086 if (operand->addr.preind && operand->addr.offset.is_reg)
4087 {
4088 set_syntax_error (_("register offset not allowed in pre-indexed "
4089 "addressing mode"));
4090 return false;
4091 }
4092 /* [Xn]! */
4093 operand->addr.writeback = 1;
4094 }
4095 else if (skip_past_comma (&p))
4096 {
4097 /* [Xn], */
4098 operand->addr.postind = 1;
4099 operand->addr.writeback = 1;
4100
4101 if (operand->addr.preind)
4102 {
4103 set_syntax_error (_("cannot combine pre- and post-indexing"));
4104 return false;
4105 }
4106
4107 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4108 if (reg)
4109 {
4110 /* [Xn],Xm */
4111 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4112 {
4113 set_syntax_error (_("invalid offset register"));
4114 return false;
4115 }
4116
4117 operand->addr.offset.regno = reg->number;
4118 operand->addr.offset.is_reg = 1;
4119 }
4120 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4121 {
4122 /* [Xn],#expr */
4123 set_syntax_error (_("invalid expression in the address"));
4124 return false;
4125 }
4126 }
4127
4128 /* If at this point neither .preind nor .postind is set, we have a
4129 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4130 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4131 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4132 [Zn.<T>, xzr]. */
4133 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4134 {
4135 if (operand->addr.writeback)
4136 {
4137 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4138 {
4139 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4140 operand->addr.offset.is_reg = 0;
4141 operand->addr.offset.imm = 0;
4142 operand->addr.preind = 1;
4143 }
4144 else
4145 {
4146 /* Reject [Rn]! */
4147 set_syntax_error (_("missing offset in the pre-indexed address"));
4148 return false;
4149 }
4150 }
4151 else
4152 {
4153 operand->addr.preind = 1;
4154 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4155 {
4156 operand->addr.offset.is_reg = 1;
4157 operand->addr.offset.regno = REG_ZR;
4158 *offset_qualifier = AARCH64_OPND_QLF_X;
4159 }
4160 else
4161 {
4162 inst.reloc.exp.X_op = O_constant;
4163 inst.reloc.exp.X_add_number = 0;
4164 }
4165 }
4166 }
4167
4168 *str = p;
4169 return true;
4170 }
4171
4172 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4173 on success. */
4174 static bool
4175 parse_address (char **str, aarch64_opnd_info *operand)
4176 {
4177 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4178 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4179 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4180 }
4181
4182 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4183 The arguments have the same meaning as for parse_address_main.
4184 Return TRUE on success. */
4185 static bool
4186 parse_sve_address (char **str, aarch64_opnd_info *operand,
4187 aarch64_opnd_qualifier_t *base_qualifier,
4188 aarch64_opnd_qualifier_t *offset_qualifier)
4189 {
4190 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4191 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4192 SHIFTED_MUL_VL);
4193 }
4194
4195 /* Parse a register X0-X30. The register must be 64-bit and register 31
4196 is unallocated. */
4197 static bool
4198 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4199 {
4200 const reg_entry *reg = parse_reg (str);
4201 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4202 {
4203 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4204 return false;
4205 }
4206 operand->reg.regno = reg->number;
4207 operand->qualifier = AARCH64_OPND_QLF_X;
4208 return true;
4209 }
4210
4211 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4212 Return TRUE on success; otherwise return FALSE. */
4213 static bool
4214 parse_half (char **str, int *internal_fixup_p)
4215 {
4216 char *p = *str;
4217
4218 skip_past_char (&p, '#');
4219
4220 gas_assert (internal_fixup_p);
4221 *internal_fixup_p = 0;
4222
4223 if (*p == ':')
4224 {
4225 struct reloc_table_entry *entry;
4226
4227 /* Try to parse a relocation. Anything else is an error. */
4228 ++p;
4229
4230 if (!(entry = find_reloc_table_entry (&p)))
4231 {
4232 set_syntax_error (_("unknown relocation modifier"));
4233 return false;
4234 }
4235
4236 if (entry->movw_type == 0)
4237 {
4238 set_syntax_error
4239 (_("this relocation modifier is not allowed on this instruction"));
4240 return false;
4241 }
4242
4243 inst.reloc.type = entry->movw_type;
4244 }
4245 else
4246 *internal_fixup_p = 1;
4247
4248 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4249 return false;
4250
4251 *str = p;
4252 return true;
4253 }
4254
4255 /* Parse an operand for an ADRP instruction:
4256 ADRP <Xd>, <label>
4257 Return TRUE on success; otherwise return FALSE. */
4258
4259 static bool
4260 parse_adrp (char **str)
4261 {
4262 char *p;
4263
4264 p = *str;
4265 if (*p == ':')
4266 {
4267 struct reloc_table_entry *entry;
4268
4269 /* Try to parse a relocation. Anything else is an error. */
4270 ++p;
4271 if (!(entry = find_reloc_table_entry (&p)))
4272 {
4273 set_syntax_error (_("unknown relocation modifier"));
4274 return false;
4275 }
4276
4277 if (entry->adrp_type == 0)
4278 {
4279 set_syntax_error
4280 (_("this relocation modifier is not allowed on this instruction"));
4281 return false;
4282 }
4283
4284 inst.reloc.type = entry->adrp_type;
4285 }
4286 else
4287 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4288
4289 inst.reloc.pc_rel = 1;
4290 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4291 return false;
4292 *str = p;
4293 return true;
4294 }
4295
4296 /* Miscellaneous. */
4297
4298 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4299 of SIZE tokens in which index I gives the token for field value I,
4300 or is null if field value I is invalid. REG_TYPE says which register
4301 names should be treated as registers rather than as symbolic immediates.
4302
4303 Return true on success, moving *STR past the operand and storing the
4304 field value in *VAL. */
4305
4306 static int
4307 parse_enum_string (char **str, int64_t *val, const char *const *array,
4308 size_t size, aarch64_reg_type reg_type)
4309 {
4310 expressionS exp;
4311 char *p, *q;
4312 size_t i;
4313
4314 /* Match C-like tokens. */
4315 p = q = *str;
4316 while (ISALNUM (*q))
4317 q++;
4318
4319 for (i = 0; i < size; ++i)
4320 if (array[i]
4321 && strncasecmp (array[i], p, q - p) == 0
4322 && array[i][q - p] == 0)
4323 {
4324 *val = i;
4325 *str = q;
4326 return true;
4327 }
4328
4329 if (!parse_immediate_expression (&p, &exp, reg_type))
4330 return false;
4331
4332 if (exp.X_op == O_constant
4333 && (uint64_t) exp.X_add_number < size)
4334 {
4335 *val = exp.X_add_number;
4336 *str = p;
4337 return true;
4338 }
4339
4340 /* Use the default error for this operand. */
4341 return false;
4342 }
4343
4344 /* Parse an option for a preload instruction. Returns the encoding for the
4345 option, or PARSE_FAIL. */
4346
4347 static int
4348 parse_pldop (char **str)
4349 {
4350 char *p, *q;
4351 const struct aarch64_name_value_pair *o;
4352
4353 p = q = *str;
4354 while (ISALNUM (*q))
4355 q++;
4356
4357 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4358 if (!o)
4359 return PARSE_FAIL;
4360
4361 *str = q;
4362 return o->value;
4363 }
4364
4365 /* Parse an option for a barrier instruction. Returns the encoding for the
4366 option, or PARSE_FAIL. */
4367
4368 static int
4369 parse_barrier (char **str)
4370 {
4371 char *p, *q;
4372 const struct aarch64_name_value_pair *o;
4373
4374 p = q = *str;
4375 while (ISALPHA (*q))
4376 q++;
4377
4378 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4379 if (!o)
4380 return PARSE_FAIL;
4381
4382 *str = q;
4383 return o->value;
4384 }
4385
4386 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4387 return 0 if successful. Otherwise return PARSE_FAIL. */
4388
4389 static int
4390 parse_barrier_psb (char **str,
4391 const struct aarch64_name_value_pair ** hint_opt)
4392 {
4393 char *p, *q;
4394 const struct aarch64_name_value_pair *o;
4395
4396 p = q = *str;
4397 while (ISALPHA (*q))
4398 q++;
4399
4400 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4401 if (!o)
4402 {
4403 set_fatal_syntax_error
4404 ( _("unknown or missing option to PSB/TSB"));
4405 return PARSE_FAIL;
4406 }
4407
4408 if (o->value != 0x11)
4409 {
4410 /* PSB only accepts option name 'CSYNC'. */
4411 set_syntax_error
4412 (_("the specified option is not accepted for PSB/TSB"));
4413 return PARSE_FAIL;
4414 }
4415
4416 *str = q;
4417 *hint_opt = o;
4418 return 0;
4419 }
4420
4421 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4422 return 0 if successful. Otherwise return PARSE_FAIL. */
4423
4424 static int
4425 parse_bti_operand (char **str,
4426 const struct aarch64_name_value_pair ** hint_opt)
4427 {
4428 char *p, *q;
4429 const struct aarch64_name_value_pair *o;
4430
4431 p = q = *str;
4432 while (ISALPHA (*q))
4433 q++;
4434
4435 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4436 if (!o)
4437 {
4438 set_fatal_syntax_error
4439 ( _("unknown option to BTI"));
4440 return PARSE_FAIL;
4441 }
4442
4443 switch (o->value)
4444 {
4445 /* Valid BTI operands. */
4446 case HINT_OPD_C:
4447 case HINT_OPD_J:
4448 case HINT_OPD_JC:
4449 break;
4450
4451 default:
4452 set_syntax_error
4453 (_("unknown option to BTI"));
4454 return PARSE_FAIL;
4455 }
4456
4457 *str = q;
4458 *hint_opt = o;
4459 return 0;
4460 }
4461
4462 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4463 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4464 on failure. Format:
4465
4466 REG_TYPE.QUALIFIER
4467
4468 Side effect: Update STR with current parse position of success.
4469
4470 FLAGS is as for parse_typed_reg. */
4471
4472 static const reg_entry *
4473 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4474 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4475 {
4476 struct vector_type_el vectype;
4477 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4478 PTR_FULL_REG | flags);
4479 if (!reg)
4480 return NULL;
4481
4482 if (vectype.type == NT_invtype)
4483 *qualifier = AARCH64_OPND_QLF_NIL;
4484 else
4485 {
4486 *qualifier = vectype_to_qualifier (&vectype);
4487 if (*qualifier == AARCH64_OPND_QLF_NIL)
4488 return NULL;
4489 }
4490
4491 return reg;
4492 }
4493
4494 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4495
4496 #<imm>
4497 <imm>
4498
4499 Function return TRUE if immediate was found, or FALSE.
4500 */
4501 static bool
4502 parse_sme_immediate (char **str, int64_t *imm)
4503 {
4504 int64_t val;
4505 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4506 return false;
4507
4508 *imm = val;
4509 return true;
4510 }
4511
4512 /* Parse index with selection register and immediate offset:
4513
4514 [<Wv>, <imm>]
4515 [<Wv>, #<imm>]
4516
4517 Return true on success, populating OPND with the parsed index. */
4518
4519 static bool
4520 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4521 {
4522 const reg_entry *reg;
4523
4524 if (!skip_past_char (str, '['))
4525 {
4526 set_syntax_error (_("expected '['"));
4527 return false;
4528 }
4529
4530 /* The selection register, encoded in the 2-bit Rv field. */
4531 reg = parse_reg (str);
4532 if (reg == NULL || reg->type != REG_TYPE_R_32)
4533 {
4534 set_syntax_error (_("expected a 32-bit selection register"));
4535 return false;
4536 }
4537 opnd->index.regno = reg->number;
4538
4539 if (!skip_past_char (str, ','))
4540 {
4541 set_syntax_error (_("missing immediate offset"));
4542 return false;
4543 }
4544
4545 if (!parse_sme_immediate (str, &opnd->index.imm))
4546 {
4547 set_syntax_error (_("expected a constant immediate offset"));
4548 return false;
4549 }
4550
4551 if (!skip_past_char (str, ']'))
4552 {
4553 set_syntax_error (_("expected ']'"));
4554 return false;
4555 }
4556
4557 return true;
4558 }
4559
4560 /* Parse a register of type REG_TYPE that might have an element type
4561 qualifier and that is indexed by two values: a 32-bit register,
4562 followed by an immediate. The ranges of the register and the
4563 immediate vary by opcode and are checked in libopcodes.
4564
4565 Return true on success, populating OPND with information about
4566 the operand and setting QUALIFIER to the register qualifier.
4567
4568 Field format examples:
4569
4570 <Pm>.<T>[<Wv>< #<imm>]
4571 ZA[<Wv>, #<imm>]
4572 <ZAn><HV>.<T>[<Wv>, #<imm>]
4573
4574 FLAGS is as for parse_typed_reg. */
4575
4576 static bool
4577 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4578 struct aarch64_indexed_za *opnd,
4579 aarch64_opnd_qualifier_t *qualifier,
4580 unsigned int flags)
4581 {
4582 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4583 if (!reg)
4584 return false;
4585
4586 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4587 opnd->regno = reg->number;
4588
4589 return parse_sme_za_index (str, opnd);
4590 }
4591
4592 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4593 operand. */
4594
4595 static bool
4596 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4597 struct aarch64_indexed_za *opnd,
4598 aarch64_opnd_qualifier_t *qualifier)
4599 {
4600 if (!skip_past_char (str, '{'))
4601 {
4602 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4603 return false;
4604 }
4605
4606 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4607 PTR_IN_REGLIST))
4608 return false;
4609
4610 if (!skip_past_char (str, '}'))
4611 {
4612 set_syntax_error (_("expected '}'"));
4613 return false;
4614 }
4615
4616 return true;
4617 }
4618
4619 /* Parse list of up to eight 64-bit element tile names separated by commas in
4620 SME's ZERO instruction:
4621
4622 ZERO { <mask> }
4623
4624 Function returns <mask>:
4625
4626 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4627 */
4628 static int
4629 parse_sme_zero_mask(char **str)
4630 {
4631 char *q;
4632 int mask;
4633 aarch64_opnd_qualifier_t qualifier;
4634 unsigned int ptr_flags = PTR_IN_REGLIST;
4635
4636 mask = 0x00;
4637 q = *str;
4638 do
4639 {
4640 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4641 &qualifier, ptr_flags);
4642 if (!reg)
4643 return PARSE_FAIL;
4644
4645 if (reg->type == REG_TYPE_ZA)
4646 {
4647 if (qualifier != AARCH64_OPND_QLF_NIL)
4648 {
4649 set_syntax_error ("ZA should not have a size suffix");
4650 return PARSE_FAIL;
4651 }
4652 /* { ZA } is assembled as all-ones immediate. */
4653 mask = 0xff;
4654 }
4655 else
4656 {
4657 int regno = reg->number;
4658 if (qualifier == AARCH64_OPND_QLF_S_B)
4659 {
4660 /* { ZA0.B } is assembled as all-ones immediate. */
4661 mask = 0xff;
4662 }
4663 else if (qualifier == AARCH64_OPND_QLF_S_H)
4664 mask |= 0x55 << regno;
4665 else if (qualifier == AARCH64_OPND_QLF_S_S)
4666 mask |= 0x11 << regno;
4667 else if (qualifier == AARCH64_OPND_QLF_S_D)
4668 mask |= 0x01 << regno;
4669 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4670 {
4671 set_syntax_error (_("ZA tile masks do not operate at .Q"
4672 " granularity"));
4673 return PARSE_FAIL;
4674 }
4675 else if (qualifier == AARCH64_OPND_QLF_NIL)
4676 {
4677 set_syntax_error (_("missing ZA tile size"));
4678 return PARSE_FAIL;
4679 }
4680 else
4681 {
4682 set_syntax_error (_("invalid ZA tile"));
4683 return PARSE_FAIL;
4684 }
4685 }
4686 ptr_flags |= PTR_GOOD_MATCH;
4687 }
4688 while (skip_past_char (&q, ','));
4689
4690 *str = q;
4691 return mask;
4692 }
4693
4694 /* Wraps in curly braces <mask> operand ZERO instruction:
4695
4696 ZERO { <mask> }
4697
4698 Function returns value of <mask> bit-field.
4699 */
4700 static int
4701 parse_sme_list_of_64bit_tiles (char **str)
4702 {
4703 int regno;
4704
4705 if (!skip_past_char (str, '{'))
4706 {
4707 set_syntax_error (_("expected '{'"));
4708 return PARSE_FAIL;
4709 }
4710
4711 /* Empty <mask> list is an all-zeros immediate. */
4712 if (!skip_past_char (str, '}'))
4713 {
4714 regno = parse_sme_zero_mask (str);
4715 if (regno == PARSE_FAIL)
4716 return PARSE_FAIL;
4717
4718 if (!skip_past_char (str, '}'))
4719 {
4720 set_syntax_error (_("expected '}'"));
4721 return PARSE_FAIL;
4722 }
4723 }
4724 else
4725 regno = 0x00;
4726
4727 return regno;
4728 }
4729
4730 /* Parse streaming mode operand for SMSTART and SMSTOP.
4731
4732 {SM | ZA}
4733
4734 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4735 */
4736 static int
4737 parse_sme_sm_za (char **str)
4738 {
4739 char *p, *q;
4740
4741 p = q = *str;
4742 while (ISALPHA (*q))
4743 q++;
4744
4745 if ((q - p != 2)
4746 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4747 {
4748 set_syntax_error (_("expected SM or ZA operand"));
4749 return PARSE_FAIL;
4750 }
4751
4752 *str = q;
4753 return TOLOWER (p[0]);
4754 }
4755
4756 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4757 Returns the encoding for the option, or PARSE_FAIL.
4758
4759 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4760 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4761
4762 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4763 field, otherwise as a system register.
4764 */
4765
4766 static int
4767 parse_sys_reg (char **str, htab_t sys_regs,
4768 int imple_defined_p, int pstatefield_p,
4769 uint32_t* flags)
4770 {
4771 char *p, *q;
4772 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4773 const aarch64_sys_reg *o;
4774 int value;
4775
4776 p = buf;
4777 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4778 if (p < buf + (sizeof (buf) - 1))
4779 *p++ = TOLOWER (*q);
4780 *p = '\0';
4781
4782 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4783 valid system register. This is enforced by construction of the hash
4784 table. */
4785 if (p - buf != q - *str)
4786 return PARSE_FAIL;
4787
4788 o = str_hash_find (sys_regs, buf);
4789 if (!o)
4790 {
4791 if (!imple_defined_p)
4792 return PARSE_FAIL;
4793 else
4794 {
4795 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4796 unsigned int op0, op1, cn, cm, op2;
4797
4798 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4799 != 5)
4800 return PARSE_FAIL;
4801 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4802 return PARSE_FAIL;
4803 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4804 if (flags)
4805 *flags = 0;
4806 }
4807 }
4808 else
4809 {
4810 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4811 as_bad (_("selected processor does not support PSTATE field "
4812 "name '%s'"), buf);
4813 if (!pstatefield_p
4814 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4815 o->value, o->flags, o->features))
4816 as_bad (_("selected processor does not support system register "
4817 "name '%s'"), buf);
4818 if (aarch64_sys_reg_deprecated_p (o->flags))
4819 as_warn (_("system register name '%s' is deprecated and may be "
4820 "removed in a future release"), buf);
4821 value = o->value;
4822 if (flags)
4823 *flags = o->flags;
4824 }
4825
4826 *str = q;
4827 return value;
4828 }
4829
4830 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4831 for the option, or NULL. */
4832
4833 static const aarch64_sys_ins_reg *
4834 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4835 {
4836 char *p, *q;
4837 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4838 const aarch64_sys_ins_reg *o;
4839
4840 p = buf;
4841 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4842 if (p < buf + (sizeof (buf) - 1))
4843 *p++ = TOLOWER (*q);
4844 *p = '\0';
4845
4846 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4847 valid system register. This is enforced by construction of the hash
4848 table. */
4849 if (p - buf != q - *str)
4850 return NULL;
4851
4852 o = str_hash_find (sys_ins_regs, buf);
4853 if (!o)
4854 return NULL;
4855
4856 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4857 o->name, o->value, o->flags, 0))
4858 as_bad (_("selected processor does not support system register "
4859 "name '%s'"), buf);
4860 if (aarch64_sys_reg_deprecated_p (o->flags))
4861 as_warn (_("system register name '%s' is deprecated and may be "
4862 "removed in a future release"), buf);
4863
4864 *str = q;
4865 return o;
4866 }
4867 \f
4868 #define po_char_or_fail(chr) do { \
4869 if (! skip_past_char (&str, chr)) \
4870 goto failure; \
4871 } while (0)
4872
4873 #define po_reg_or_fail(regtype) do { \
4874 reg = aarch64_reg_parse (&str, regtype, NULL); \
4875 if (!reg) \
4876 goto failure; \
4877 } while (0)
4878
4879 #define po_int_fp_reg_or_fail(reg_type) do { \
4880 reg = parse_reg (&str); \
4881 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4882 { \
4883 set_expected_reg_error (reg_type, reg, 0); \
4884 goto failure; \
4885 } \
4886 info->reg.regno = reg->number; \
4887 info->qualifier = inherent_reg_qualifier (reg); \
4888 } while (0)
4889
4890 #define po_imm_nc_or_fail() do { \
4891 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4892 goto failure; \
4893 } while (0)
4894
4895 #define po_imm_or_fail(min, max) do { \
4896 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4897 goto failure; \
4898 if (val < min || val > max) \
4899 { \
4900 set_fatal_syntax_error (_("immediate value out of range "\
4901 #min " to "#max)); \
4902 goto failure; \
4903 } \
4904 } while (0)
4905
4906 #define po_enum_or_fail(array) do { \
4907 if (!parse_enum_string (&str, &val, array, \
4908 ARRAY_SIZE (array), imm_reg_type)) \
4909 goto failure; \
4910 } while (0)
4911
4912 #define po_misc_or_fail(expr) do { \
4913 if (!expr) \
4914 goto failure; \
4915 } while (0)
4916 \f
4917 /* encode the 12-bit imm field of Add/sub immediate */
4918 static inline uint32_t
4919 encode_addsub_imm (uint32_t imm)
4920 {
4921 return imm << 10;
4922 }
4923
4924 /* encode the shift amount field of Add/sub immediate */
4925 static inline uint32_t
4926 encode_addsub_imm_shift_amount (uint32_t cnt)
4927 {
4928 return cnt << 22;
4929 }
4930
4931
4932 /* encode the imm field of Adr instruction */
4933 static inline uint32_t
4934 encode_adr_imm (uint32_t imm)
4935 {
4936 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4937 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4938 }
4939
4940 /* encode the immediate field of Move wide immediate */
4941 static inline uint32_t
4942 encode_movw_imm (uint32_t imm)
4943 {
4944 return imm << 5;
4945 }
4946
4947 /* encode the 26-bit offset of unconditional branch */
4948 static inline uint32_t
4949 encode_branch_ofs_26 (uint32_t ofs)
4950 {
4951 return ofs & ((1 << 26) - 1);
4952 }
4953
4954 /* encode the 19-bit offset of conditional branch and compare & branch */
4955 static inline uint32_t
4956 encode_cond_branch_ofs_19 (uint32_t ofs)
4957 {
4958 return (ofs & ((1 << 19) - 1)) << 5;
4959 }
4960
4961 /* encode the 19-bit offset of ld literal */
4962 static inline uint32_t
4963 encode_ld_lit_ofs_19 (uint32_t ofs)
4964 {
4965 return (ofs & ((1 << 19) - 1)) << 5;
4966 }
4967
4968 /* Encode the 14-bit offset of test & branch. */
4969 static inline uint32_t
4970 encode_tst_branch_ofs_14 (uint32_t ofs)
4971 {
4972 return (ofs & ((1 << 14) - 1)) << 5;
4973 }
4974
4975 /* Encode the 16-bit imm field of svc/hvc/smc. */
4976 static inline uint32_t
4977 encode_svc_imm (uint32_t imm)
4978 {
4979 return imm << 5;
4980 }
4981
4982 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4983 static inline uint32_t
4984 reencode_addsub_switch_add_sub (uint32_t opcode)
4985 {
4986 return opcode ^ (1 << 30);
4987 }
4988
4989 static inline uint32_t
4990 reencode_movzn_to_movz (uint32_t opcode)
4991 {
4992 return opcode | (1 << 30);
4993 }
4994
4995 static inline uint32_t
4996 reencode_movzn_to_movn (uint32_t opcode)
4997 {
4998 return opcode & ~(1 << 30);
4999 }
5000
5001 /* Overall per-instruction processing. */
5002
5003 /* We need to be able to fix up arbitrary expressions in some statements.
5004 This is so that we can handle symbols that are an arbitrary distance from
5005 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5006 which returns part of an address in a form which will be valid for
5007 a data instruction. We do this by pushing the expression into a symbol
5008 in the expr_section, and creating a fix for that. */
5009
5010 static fixS *
5011 fix_new_aarch64 (fragS * frag,
5012 int where,
5013 short int size,
5014 expressionS * exp,
5015 int pc_rel,
5016 int reloc)
5017 {
5018 fixS *new_fix;
5019
5020 switch (exp->X_op)
5021 {
5022 case O_constant:
5023 case O_symbol:
5024 case O_add:
5025 case O_subtract:
5026 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5027 break;
5028
5029 default:
5030 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5031 pc_rel, reloc);
5032 break;
5033 }
5034 return new_fix;
5035 }
5036 \f
5037 /* Diagnostics on operands errors. */
5038
5039 /* By default, output verbose error message.
5040 Disable the verbose error message by -mno-verbose-error. */
5041 static int verbose_error_p = 1;
5042
5043 #ifdef DEBUG_AARCH64
5044 /* N.B. this is only for the purpose of debugging. */
5045 const char* operand_mismatch_kind_names[] =
5046 {
5047 "AARCH64_OPDE_NIL",
5048 "AARCH64_OPDE_RECOVERABLE",
5049 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5050 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5051 "AARCH64_OPDE_SYNTAX_ERROR",
5052 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5053 "AARCH64_OPDE_INVALID_VARIANT",
5054 "AARCH64_OPDE_REG_LIST",
5055 "AARCH64_OPDE_UNTIED_IMMS",
5056 "AARCH64_OPDE_UNTIED_OPERAND",
5057 "AARCH64_OPDE_OUT_OF_RANGE",
5058 "AARCH64_OPDE_UNALIGNED",
5059 "AARCH64_OPDE_OTHER_ERROR",
5060 "AARCH64_OPDE_INVALID_REGNO",
5061 };
5062 #endif /* DEBUG_AARCH64 */
5063
5064 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5065
5066 When multiple errors of different kinds are found in the same assembly
5067 line, only the error of the highest severity will be picked up for
5068 issuing the diagnostics. */
5069
5070 static inline bool
5071 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5072 enum aarch64_operand_error_kind rhs)
5073 {
5074 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5075 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5076 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5077 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5078 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5079 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5080 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5081 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_INVALID_VARIANT);
5082 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST);
5083 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5084 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5085 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5086 return lhs > rhs;
5087 }
5088
5089 /* Helper routine to get the mnemonic name from the assembly instruction
5090 line; should only be called for the diagnosis purpose, as there is
5091 string copy operation involved, which may affect the runtime
5092 performance if used in elsewhere. */
5093
5094 static const char*
5095 get_mnemonic_name (const char *str)
5096 {
5097 static char mnemonic[32];
5098 char *ptr;
5099
5100 /* Get the first 15 bytes and assume that the full name is included. */
5101 strncpy (mnemonic, str, 31);
5102 mnemonic[31] = '\0';
5103
5104 /* Scan up to the end of the mnemonic, which must end in white space,
5105 '.', or end of string. */
5106 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5107 ;
5108
5109 *ptr = '\0';
5110
5111 /* Append '...' to the truncated long name. */
5112 if (ptr - mnemonic == 31)
5113 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5114
5115 return mnemonic;
5116 }
5117
5118 static void
5119 reset_aarch64_instruction (aarch64_instruction *instruction)
5120 {
5121 memset (instruction, '\0', sizeof (aarch64_instruction));
5122 instruction->reloc.type = BFD_RELOC_UNUSED;
5123 }
5124
5125 /* Data structures storing one user error in the assembly code related to
5126 operands. */
5127
5128 struct operand_error_record
5129 {
5130 const aarch64_opcode *opcode;
5131 aarch64_operand_error detail;
5132 struct operand_error_record *next;
5133 };
5134
5135 typedef struct operand_error_record operand_error_record;
5136
5137 struct operand_errors
5138 {
5139 operand_error_record *head;
5140 operand_error_record *tail;
5141 };
5142
5143 typedef struct operand_errors operand_errors;
5144
5145 /* Top-level data structure reporting user errors for the current line of
5146 the assembly code.
5147 The way md_assemble works is that all opcodes sharing the same mnemonic
5148 name are iterated to find a match to the assembly line. In this data
5149 structure, each of the such opcodes will have one operand_error_record
5150 allocated and inserted. In other words, excessive errors related with
5151 a single opcode are disregarded. */
5152 operand_errors operand_error_report;
5153
5154 /* Free record nodes. */
5155 static operand_error_record *free_opnd_error_record_nodes = NULL;
5156
5157 /* Initialize the data structure that stores the operand mismatch
5158 information on assembling one line of the assembly code. */
5159 static void
5160 init_operand_error_report (void)
5161 {
5162 if (operand_error_report.head != NULL)
5163 {
5164 gas_assert (operand_error_report.tail != NULL);
5165 operand_error_report.tail->next = free_opnd_error_record_nodes;
5166 free_opnd_error_record_nodes = operand_error_report.head;
5167 operand_error_report.head = NULL;
5168 operand_error_report.tail = NULL;
5169 return;
5170 }
5171 gas_assert (operand_error_report.tail == NULL);
5172 }
5173
5174 /* Return TRUE if some operand error has been recorded during the
5175 parsing of the current assembly line using the opcode *OPCODE;
5176 otherwise return FALSE. */
5177 static inline bool
5178 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5179 {
5180 operand_error_record *record = operand_error_report.head;
5181 return record && record->opcode == opcode;
5182 }
5183
5184 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5185 OPCODE field is initialized with OPCODE.
5186 N.B. only one record for each opcode, i.e. the maximum of one error is
5187 recorded for each instruction template. */
5188
5189 static void
5190 add_operand_error_record (const operand_error_record* new_record)
5191 {
5192 const aarch64_opcode *opcode = new_record->opcode;
5193 operand_error_record* record = operand_error_report.head;
5194
5195 /* The record may have been created for this opcode. If not, we need
5196 to prepare one. */
5197 if (! opcode_has_operand_error_p (opcode))
5198 {
5199 /* Get one empty record. */
5200 if (free_opnd_error_record_nodes == NULL)
5201 {
5202 record = XNEW (operand_error_record);
5203 }
5204 else
5205 {
5206 record = free_opnd_error_record_nodes;
5207 free_opnd_error_record_nodes = record->next;
5208 }
5209 record->opcode = opcode;
5210 /* Insert at the head. */
5211 record->next = operand_error_report.head;
5212 operand_error_report.head = record;
5213 if (operand_error_report.tail == NULL)
5214 operand_error_report.tail = record;
5215 }
5216 else if (record->detail.kind != AARCH64_OPDE_NIL
5217 && record->detail.index <= new_record->detail.index
5218 && operand_error_higher_severity_p (record->detail.kind,
5219 new_record->detail.kind))
5220 {
5221 /* In the case of multiple errors found on operands related with a
5222 single opcode, only record the error of the leftmost operand and
5223 only if the error is of higher severity. */
5224 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5225 " the existing error %s on operand %d",
5226 operand_mismatch_kind_names[new_record->detail.kind],
5227 new_record->detail.index,
5228 operand_mismatch_kind_names[record->detail.kind],
5229 record->detail.index);
5230 return;
5231 }
5232
5233 record->detail = new_record->detail;
5234 }
5235
5236 static inline void
5237 record_operand_error_info (const aarch64_opcode *opcode,
5238 aarch64_operand_error *error_info)
5239 {
5240 operand_error_record record;
5241 record.opcode = opcode;
5242 record.detail = *error_info;
5243 add_operand_error_record (&record);
5244 }
5245
5246 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5247 error message *ERROR, for operand IDX (count from 0). */
5248
5249 static void
5250 record_operand_error (const aarch64_opcode *opcode, int idx,
5251 enum aarch64_operand_error_kind kind,
5252 const char* error)
5253 {
5254 aarch64_operand_error info;
5255 memset(&info, 0, sizeof (info));
5256 info.index = idx;
5257 info.kind = kind;
5258 info.error = error;
5259 info.non_fatal = false;
5260 record_operand_error_info (opcode, &info);
5261 }
5262
5263 static void
5264 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5265 enum aarch64_operand_error_kind kind,
5266 const char* error, const int *extra_data)
5267 {
5268 aarch64_operand_error info;
5269 info.index = idx;
5270 info.kind = kind;
5271 info.error = error;
5272 info.data[0].i = extra_data[0];
5273 info.data[1].i = extra_data[1];
5274 info.data[2].i = extra_data[2];
5275 info.non_fatal = false;
5276 record_operand_error_info (opcode, &info);
5277 }
5278
5279 static void
5280 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5281 const char* error, int lower_bound,
5282 int upper_bound)
5283 {
5284 int data[3] = {lower_bound, upper_bound, 0};
5285 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5286 error, data);
5287 }
5288
5289 /* Remove the operand error record for *OPCODE. */
5290 static void ATTRIBUTE_UNUSED
5291 remove_operand_error_record (const aarch64_opcode *opcode)
5292 {
5293 if (opcode_has_operand_error_p (opcode))
5294 {
5295 operand_error_record* record = operand_error_report.head;
5296 gas_assert (record != NULL && operand_error_report.tail != NULL);
5297 operand_error_report.head = record->next;
5298 record->next = free_opnd_error_record_nodes;
5299 free_opnd_error_record_nodes = record;
5300 if (operand_error_report.head == NULL)
5301 {
5302 gas_assert (operand_error_report.tail == record);
5303 operand_error_report.tail = NULL;
5304 }
5305 }
5306 }
5307
5308 /* Given the instruction in *INSTR, return the index of the best matched
5309 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5310
5311 Return -1 if there is no qualifier sequence; return the first match
5312 if there is multiple matches found. */
5313
5314 static int
5315 find_best_match (const aarch64_inst *instr,
5316 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5317 {
5318 int i, num_opnds, max_num_matched, idx;
5319
5320 num_opnds = aarch64_num_of_operands (instr->opcode);
5321 if (num_opnds == 0)
5322 {
5323 DEBUG_TRACE ("no operand");
5324 return -1;
5325 }
5326
5327 max_num_matched = 0;
5328 idx = 0;
5329
5330 /* For each pattern. */
5331 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5332 {
5333 int j, num_matched;
5334 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5335
5336 /* Most opcodes has much fewer patterns in the list. */
5337 if (empty_qualifier_sequence_p (qualifiers))
5338 {
5339 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5340 break;
5341 }
5342
5343 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5344 if (*qualifiers == instr->operands[j].qualifier)
5345 ++num_matched;
5346
5347 if (num_matched > max_num_matched)
5348 {
5349 max_num_matched = num_matched;
5350 idx = i;
5351 }
5352 }
5353
5354 DEBUG_TRACE ("return with %d", idx);
5355 return idx;
5356 }
5357
5358 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5359 corresponding operands in *INSTR. */
5360
5361 static inline void
5362 assign_qualifier_sequence (aarch64_inst *instr,
5363 const aarch64_opnd_qualifier_t *qualifiers)
5364 {
5365 int i = 0;
5366 int num_opnds = aarch64_num_of_operands (instr->opcode);
5367 gas_assert (num_opnds);
5368 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5369 instr->operands[i].qualifier = *qualifiers;
5370 }
5371
5372 /* Callback used by aarch64_print_operand to apply STYLE to the
5373 disassembler output created from FMT and ARGS. The STYLER object holds
5374 any required state. Must return a pointer to a string (created from FMT
5375 and ARGS) that will continue to be valid until the complete disassembled
5376 instruction has been printed.
5377
5378 We don't currently add any styling to the output of the disassembler as
5379 used within assembler error messages, and so STYLE is ignored here. A
5380 new string is allocated on the obstack help within STYLER and returned
5381 to the caller. */
5382
5383 static const char *aarch64_apply_style
5384 (struct aarch64_styler *styler,
5385 enum disassembler_style style ATTRIBUTE_UNUSED,
5386 const char *fmt, va_list args)
5387 {
5388 int res;
5389 char *ptr;
5390 struct obstack *stack = (struct obstack *) styler->state;
5391 va_list ap;
5392
5393 /* Calculate the required space. */
5394 va_copy (ap, args);
5395 res = vsnprintf (NULL, 0, fmt, ap);
5396 va_end (ap);
5397 gas_assert (res >= 0);
5398
5399 /* Allocate space on the obstack and format the result. */
5400 ptr = (char *) obstack_alloc (stack, res + 1);
5401 res = vsnprintf (ptr, (res + 1), fmt, args);
5402 gas_assert (res >= 0);
5403
5404 return ptr;
5405 }
5406
5407 /* Print operands for the diagnosis purpose. */
5408
5409 static void
5410 print_operands (char *buf, const aarch64_opcode *opcode,
5411 const aarch64_opnd_info *opnds)
5412 {
5413 int i;
5414 struct aarch64_styler styler;
5415 struct obstack content;
5416 obstack_init (&content);
5417
5418 styler.apply_style = aarch64_apply_style;
5419 styler.state = (void *) &content;
5420
5421 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5422 {
5423 char str[128];
5424 char cmt[128];
5425
5426 /* We regard the opcode operand info more, however we also look into
5427 the inst->operands to support the disassembling of the optional
5428 operand.
5429 The two operand code should be the same in all cases, apart from
5430 when the operand can be optional. */
5431 if (opcode->operands[i] == AARCH64_OPND_NIL
5432 || opnds[i].type == AARCH64_OPND_NIL)
5433 break;
5434
5435 /* Generate the operand string in STR. */
5436 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5437 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5438
5439 /* Delimiter. */
5440 if (str[0] != '\0')
5441 strcat (buf, i == 0 ? " " : ", ");
5442
5443 /* Append the operand string. */
5444 strcat (buf, str);
5445
5446 /* Append a comment. This works because only the last operand ever
5447 adds a comment. If that ever changes then we'll need to be
5448 smarter here. */
5449 if (cmt[0] != '\0')
5450 {
5451 strcat (buf, "\t// ");
5452 strcat (buf, cmt);
5453 }
5454 }
5455
5456 obstack_free (&content, NULL);
5457 }
5458
5459 /* Send to stderr a string as information. */
5460
5461 static void
5462 output_info (const char *format, ...)
5463 {
5464 const char *file;
5465 unsigned int line;
5466 va_list args;
5467
5468 file = as_where (&line);
5469 if (file)
5470 {
5471 if (line != 0)
5472 fprintf (stderr, "%s:%u: ", file, line);
5473 else
5474 fprintf (stderr, "%s: ", file);
5475 }
5476 fprintf (stderr, _("Info: "));
5477 va_start (args, format);
5478 vfprintf (stderr, format, args);
5479 va_end (args);
5480 (void) putc ('\n', stderr);
5481 }
5482
5483 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5484 relates to registers or register lists. If so, return a string that
5485 reports the error against "operand %d", otherwise return null. */
5486
5487 static const char *
5488 get_reg_error_message (const aarch64_operand_error *detail)
5489 {
5490 /* Handle the case where we found a register that was expected
5491 to be in a register list outside of a register list. */
5492 if ((detail->data[1].i & detail->data[2].i) != 0
5493 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5494 return _("missing braces at operand %d");
5495
5496 /* If some opcodes expected a register, and we found a register,
5497 complain about the difference. */
5498 if (detail->data[2].i)
5499 {
5500 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5501 ? detail->data[1].i & ~SEF_IN_REGLIST
5502 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5503 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5504 if (!msg)
5505 msg = N_("unexpected register type at operand %d");
5506 return msg;
5507 }
5508
5509 /* Handle the case where we got to the point of trying to parse a
5510 register within a register list, but didn't find a known register. */
5511 if (detail->data[1].i & SEF_IN_REGLIST)
5512 {
5513 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5514 const char *msg = get_reg_expected_msg (expected, 0);
5515 if (!msg)
5516 msg = _("invalid register list at operand %d");
5517 return msg;
5518 }
5519
5520 /* Punt if register-related problems weren't the only errors. */
5521 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5522 return NULL;
5523
5524 /* Handle the case where the only acceptable things are registers. */
5525 if (detail->data[1].i == 0)
5526 {
5527 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5528 if (!msg)
5529 msg = _("expected a register at operand %d");
5530 return msg;
5531 }
5532
5533 /* Handle the case where the only acceptable things are register lists,
5534 and there was no opening '{'. */
5535 if (detail->data[0].i == 0)
5536 return _("expected '{' at operand %d");
5537
5538 return _("expected a register or register list at operand %d");
5539 }
5540
5541 /* Output one operand error record. */
5542
5543 static void
5544 output_operand_error_record (const operand_error_record *record, char *str)
5545 {
5546 const aarch64_operand_error *detail = &record->detail;
5547 int idx = detail->index;
5548 const aarch64_opcode *opcode = record->opcode;
5549 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5550 : AARCH64_OPND_NIL);
5551
5552 typedef void (*handler_t)(const char *format, ...);
5553 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5554 const char *msg = detail->error;
5555
5556 switch (detail->kind)
5557 {
5558 case AARCH64_OPDE_NIL:
5559 gas_assert (0);
5560 break;
5561
5562 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5563 handler (_("this `%s' should have an immediately preceding `%s'"
5564 " -- `%s'"),
5565 detail->data[0].s, detail->data[1].s, str);
5566 break;
5567
5568 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5569 handler (_("the preceding `%s' should be followed by `%s` rather"
5570 " than `%s` -- `%s'"),
5571 detail->data[1].s, detail->data[0].s, opcode->name, str);
5572 break;
5573
5574 case AARCH64_OPDE_SYNTAX_ERROR:
5575 if (!msg && idx >= 0)
5576 {
5577 msg = get_reg_error_message (detail);
5578 if (msg)
5579 {
5580 char *full_msg = xasprintf (msg, idx + 1);
5581 handler (_("%s -- `%s'"), full_msg, str);
5582 free (full_msg);
5583 break;
5584 }
5585 }
5586 /* Fall through. */
5587
5588 case AARCH64_OPDE_RECOVERABLE:
5589 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5590 case AARCH64_OPDE_OTHER_ERROR:
5591 /* Use the prepared error message if there is, otherwise use the
5592 operand description string to describe the error. */
5593 if (msg != NULL)
5594 {
5595 if (idx < 0)
5596 handler (_("%s -- `%s'"), msg, str);
5597 else
5598 handler (_("%s at operand %d -- `%s'"),
5599 msg, idx + 1, str);
5600 }
5601 else
5602 {
5603 gas_assert (idx >= 0);
5604 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5605 aarch64_get_operand_desc (opd_code), str);
5606 }
5607 break;
5608
5609 case AARCH64_OPDE_INVALID_VARIANT:
5610 handler (_("operand mismatch -- `%s'"), str);
5611 if (verbose_error_p)
5612 {
5613 /* We will try to correct the erroneous instruction and also provide
5614 more information e.g. all other valid variants.
5615
5616 The string representation of the corrected instruction and other
5617 valid variants are generated by
5618
5619 1) obtaining the intermediate representation of the erroneous
5620 instruction;
5621 2) manipulating the IR, e.g. replacing the operand qualifier;
5622 3) printing out the instruction by calling the printer functions
5623 shared with the disassembler.
5624
5625 The limitation of this method is that the exact input assembly
5626 line cannot be accurately reproduced in some cases, for example an
5627 optional operand present in the actual assembly line will be
5628 omitted in the output; likewise for the optional syntax rules,
5629 e.g. the # before the immediate. Another limitation is that the
5630 assembly symbols and relocation operations in the assembly line
5631 currently cannot be printed out in the error report. Last but not
5632 least, when there is other error(s) co-exist with this error, the
5633 'corrected' instruction may be still incorrect, e.g. given
5634 'ldnp h0,h1,[x0,#6]!'
5635 this diagnosis will provide the version:
5636 'ldnp s0,s1,[x0,#6]!'
5637 which is still not right. */
5638 size_t len = strlen (get_mnemonic_name (str));
5639 int i, qlf_idx;
5640 bool result;
5641 char buf[2048];
5642 aarch64_inst *inst_base = &inst.base;
5643 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5644
5645 /* Init inst. */
5646 reset_aarch64_instruction (&inst);
5647 inst_base->opcode = opcode;
5648
5649 /* Reset the error report so that there is no side effect on the
5650 following operand parsing. */
5651 init_operand_error_report ();
5652
5653 /* Fill inst. */
5654 result = parse_operands (str + len, opcode)
5655 && programmer_friendly_fixup (&inst);
5656 gas_assert (result);
5657 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5658 NULL, NULL, insn_sequence);
5659 gas_assert (!result);
5660
5661 /* Find the most matched qualifier sequence. */
5662 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5663 gas_assert (qlf_idx > -1);
5664
5665 /* Assign the qualifiers. */
5666 assign_qualifier_sequence (inst_base,
5667 opcode->qualifiers_list[qlf_idx]);
5668
5669 /* Print the hint. */
5670 output_info (_(" did you mean this?"));
5671 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5672 print_operands (buf, opcode, inst_base->operands);
5673 output_info (_(" %s"), buf);
5674
5675 /* Print out other variant(s) if there is any. */
5676 if (qlf_idx != 0 ||
5677 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5678 output_info (_(" other valid variant(s):"));
5679
5680 /* For each pattern. */
5681 qualifiers_list = opcode->qualifiers_list;
5682 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5683 {
5684 /* Most opcodes has much fewer patterns in the list.
5685 First NIL qualifier indicates the end in the list. */
5686 if (empty_qualifier_sequence_p (*qualifiers_list))
5687 break;
5688
5689 if (i != qlf_idx)
5690 {
5691 /* Mnemonics name. */
5692 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5693
5694 /* Assign the qualifiers. */
5695 assign_qualifier_sequence (inst_base, *qualifiers_list);
5696
5697 /* Print instruction. */
5698 print_operands (buf, opcode, inst_base->operands);
5699
5700 output_info (_(" %s"), buf);
5701 }
5702 }
5703 }
5704 break;
5705
5706 case AARCH64_OPDE_UNTIED_IMMS:
5707 handler (_("operand %d must have the same immediate value "
5708 "as operand 1 -- `%s'"),
5709 detail->index + 1, str);
5710 break;
5711
5712 case AARCH64_OPDE_UNTIED_OPERAND:
5713 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5714 detail->index + 1, str);
5715 break;
5716
5717 case AARCH64_OPDE_INVALID_REGNO:
5718 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5719 detail->data[0].s, detail->data[1].i,
5720 detail->data[0].s, detail->data[2].i, idx + 1, str);
5721 break;
5722
5723 case AARCH64_OPDE_OUT_OF_RANGE:
5724 if (detail->data[0].i != detail->data[1].i)
5725 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5726 msg ? msg : _("immediate value"),
5727 detail->data[0].i, detail->data[1].i, idx + 1, str);
5728 else
5729 handler (_("%s must be %d at operand %d -- `%s'"),
5730 msg ? msg : _("immediate value"),
5731 detail->data[0].i, idx + 1, str);
5732 break;
5733
5734 case AARCH64_OPDE_REG_LIST:
5735 if (detail->data[0].i == 1)
5736 handler (_("invalid number of registers in the list; "
5737 "only 1 register is expected at operand %d -- `%s'"),
5738 idx + 1, str);
5739 else
5740 handler (_("invalid number of registers in the list; "
5741 "%d registers are expected at operand %d -- `%s'"),
5742 detail->data[0].i, idx + 1, str);
5743 break;
5744
5745 case AARCH64_OPDE_UNALIGNED:
5746 handler (_("immediate value must be a multiple of "
5747 "%d at operand %d -- `%s'"),
5748 detail->data[0].i, idx + 1, str);
5749 break;
5750
5751 default:
5752 gas_assert (0);
5753 break;
5754 }
5755 }
5756
5757 /* Process and output the error message about the operand mismatching.
5758
5759 When this function is called, the operand error information had
5760 been collected for an assembly line and there will be multiple
5761 errors in the case of multiple instruction templates; output the
5762 error message that most closely describes the problem.
5763
5764 The errors to be printed can be filtered on printing all errors
5765 or only non-fatal errors. This distinction has to be made because
5766 the error buffer may already be filled with fatal errors we don't want to
5767 print due to the different instruction templates. */
5768
5769 static void
5770 output_operand_error_report (char *str, bool non_fatal_only)
5771 {
5772 enum aarch64_operand_error_kind kind;
5773 operand_error_record *curr;
5774 operand_error_record *head = operand_error_report.head;
5775 operand_error_record *record = NULL;
5776
5777 /* No error to report. */
5778 if (head == NULL)
5779 return;
5780
5781 gas_assert (head != NULL && operand_error_report.tail != NULL);
5782
5783 /* Only one error. */
5784 if (head == operand_error_report.tail)
5785 {
5786 /* If the only error is a non-fatal one and we don't want to print it,
5787 just exit. */
5788 if (!non_fatal_only || head->detail.non_fatal)
5789 {
5790 DEBUG_TRACE ("single opcode entry with error kind: %s",
5791 operand_mismatch_kind_names[head->detail.kind]);
5792 output_operand_error_record (head, str);
5793 }
5794 return;
5795 }
5796
5797 /* Find the error kind of the highest severity. */
5798 DEBUG_TRACE ("multiple opcode entries with error kind");
5799 kind = AARCH64_OPDE_NIL;
5800 for (curr = head; curr != NULL; curr = curr->next)
5801 {
5802 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5803 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5804 {
5805 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5806 operand_mismatch_kind_names[curr->detail.kind],
5807 curr->detail.data[0].i, curr->detail.data[1].i,
5808 curr->detail.data[2].i);
5809 }
5810 else
5811 {
5812 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5813 }
5814 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5815 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5816 kind = curr->detail.kind;
5817 }
5818
5819 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5820
5821 /* Pick up one of errors of KIND to report. */
5822 for (curr = head; curr != NULL; curr = curr->next)
5823 {
5824 /* If we don't want to print non-fatal errors then don't consider them
5825 at all. */
5826 if (curr->detail.kind != kind
5827 || (non_fatal_only && !curr->detail.non_fatal))
5828 continue;
5829 /* If there are multiple errors, pick up the one with the highest
5830 mismatching operand index. In the case of multiple errors with
5831 the equally highest operand index, pick up the first one or the
5832 first one with non-NULL error message. */
5833 if (!record || curr->detail.index > record->detail.index)
5834 record = curr;
5835 else if (curr->detail.index == record->detail.index
5836 && !record->detail.error)
5837 {
5838 if (curr->detail.error)
5839 record = curr;
5840 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5841 {
5842 record->detail.data[0].i |= curr->detail.data[0].i;
5843 record->detail.data[1].i |= curr->detail.data[1].i;
5844 record->detail.data[2].i |= curr->detail.data[2].i;
5845 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5846 operand_mismatch_kind_names[kind],
5847 curr->detail.data[0].i, curr->detail.data[1].i,
5848 curr->detail.data[2].i);
5849 }
5850 }
5851 }
5852
5853 /* The way errors are collected in the back-end is a bit non-intuitive. But
5854 essentially, because each operand template is tried recursively you may
5855 always have errors collected from the previous tried OPND. These are
5856 usually skipped if there is one successful match. However now with the
5857 non-fatal errors we have to ignore those previously collected hard errors
5858 when we're only interested in printing the non-fatal ones. This condition
5859 prevents us from printing errors that are not appropriate, since we did
5860 match a condition, but it also has warnings that it wants to print. */
5861 if (non_fatal_only && !record)
5862 return;
5863
5864 gas_assert (record);
5865 DEBUG_TRACE ("Pick up error kind %s to report",
5866 operand_mismatch_kind_names[kind]);
5867
5868 /* Output. */
5869 output_operand_error_record (record, str);
5870 }
5871 \f
5872 /* Write an AARCH64 instruction to buf - always little-endian. */
5873 static void
5874 put_aarch64_insn (char *buf, uint32_t insn)
5875 {
5876 unsigned char *where = (unsigned char *) buf;
5877 where[0] = insn;
5878 where[1] = insn >> 8;
5879 where[2] = insn >> 16;
5880 where[3] = insn >> 24;
5881 }
5882
5883 static uint32_t
5884 get_aarch64_insn (char *buf)
5885 {
5886 unsigned char *where = (unsigned char *) buf;
5887 uint32_t result;
5888 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5889 | ((uint32_t) where[3] << 24)));
5890 return result;
5891 }
5892
5893 static void
5894 output_inst (struct aarch64_inst *new_inst)
5895 {
5896 char *to = NULL;
5897
5898 to = frag_more (INSN_SIZE);
5899
5900 frag_now->tc_frag_data.recorded = 1;
5901
5902 put_aarch64_insn (to, inst.base.value);
5903
5904 if (inst.reloc.type != BFD_RELOC_UNUSED)
5905 {
5906 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5907 INSN_SIZE, &inst.reloc.exp,
5908 inst.reloc.pc_rel,
5909 inst.reloc.type);
5910 DEBUG_TRACE ("Prepared relocation fix up");
5911 /* Don't check the addend value against the instruction size,
5912 that's the job of our code in md_apply_fix(). */
5913 fixp->fx_no_overflow = 1;
5914 if (new_inst != NULL)
5915 fixp->tc_fix_data.inst = new_inst;
5916 if (aarch64_gas_internal_fixup_p ())
5917 {
5918 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5919 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5920 fixp->fx_addnumber = inst.reloc.flags;
5921 }
5922 }
5923
5924 dwarf2_emit_insn (INSN_SIZE);
5925 }
5926
5927 /* Link together opcodes of the same name. */
5928
5929 struct templates
5930 {
5931 const aarch64_opcode *opcode;
5932 struct templates *next;
5933 };
5934
5935 typedef struct templates templates;
5936
5937 static templates *
5938 lookup_mnemonic (const char *start, int len)
5939 {
5940 templates *templ = NULL;
5941
5942 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5943 return templ;
5944 }
5945
5946 /* Subroutine of md_assemble, responsible for looking up the primary
5947 opcode from the mnemonic the user wrote. BASE points to the beginning
5948 of the mnemonic, DOT points to the first '.' within the mnemonic
5949 (if any) and END points to the end of the mnemonic. */
5950
5951 static templates *
5952 opcode_lookup (char *base, char *dot, char *end)
5953 {
5954 const aarch64_cond *cond;
5955 char condname[16];
5956 int len;
5957
5958 if (dot == end)
5959 return 0;
5960
5961 inst.cond = COND_ALWAYS;
5962
5963 /* Handle a possible condition. */
5964 if (dot)
5965 {
5966 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5967 if (!cond)
5968 return 0;
5969 inst.cond = cond->value;
5970 len = dot - base;
5971 }
5972 else
5973 len = end - base;
5974
5975 if (inst.cond == COND_ALWAYS)
5976 {
5977 /* Look for unaffixed mnemonic. */
5978 return lookup_mnemonic (base, len);
5979 }
5980 else if (len <= 13)
5981 {
5982 /* append ".c" to mnemonic if conditional */
5983 memcpy (condname, base, len);
5984 memcpy (condname + len, ".c", 2);
5985 base = condname;
5986 len += 2;
5987 return lookup_mnemonic (base, len);
5988 }
5989
5990 return NULL;
5991 }
5992
5993 /* Process an optional operand that is found omitted from the assembly line.
5994 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5995 instruction's opcode entry while IDX is the index of this omitted operand.
5996 */
5997
5998 static void
5999 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6000 int idx, aarch64_opnd_info *operand)
6001 {
6002 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6003 gas_assert (optional_operand_p (opcode, idx));
6004 gas_assert (!operand->present);
6005
6006 switch (type)
6007 {
6008 case AARCH64_OPND_Rd:
6009 case AARCH64_OPND_Rn:
6010 case AARCH64_OPND_Rm:
6011 case AARCH64_OPND_Rt:
6012 case AARCH64_OPND_Rt2:
6013 case AARCH64_OPND_Rt_LS64:
6014 case AARCH64_OPND_Rt_SP:
6015 case AARCH64_OPND_Rs:
6016 case AARCH64_OPND_Ra:
6017 case AARCH64_OPND_Rt_SYS:
6018 case AARCH64_OPND_Rd_SP:
6019 case AARCH64_OPND_Rn_SP:
6020 case AARCH64_OPND_Rm_SP:
6021 case AARCH64_OPND_Fd:
6022 case AARCH64_OPND_Fn:
6023 case AARCH64_OPND_Fm:
6024 case AARCH64_OPND_Fa:
6025 case AARCH64_OPND_Ft:
6026 case AARCH64_OPND_Ft2:
6027 case AARCH64_OPND_Sd:
6028 case AARCH64_OPND_Sn:
6029 case AARCH64_OPND_Sm:
6030 case AARCH64_OPND_Va:
6031 case AARCH64_OPND_Vd:
6032 case AARCH64_OPND_Vn:
6033 case AARCH64_OPND_Vm:
6034 case AARCH64_OPND_VdD1:
6035 case AARCH64_OPND_VnD1:
6036 operand->reg.regno = default_value;
6037 break;
6038
6039 case AARCH64_OPND_Ed:
6040 case AARCH64_OPND_En:
6041 case AARCH64_OPND_Em:
6042 case AARCH64_OPND_Em16:
6043 case AARCH64_OPND_SM3_IMM2:
6044 operand->reglane.regno = default_value;
6045 break;
6046
6047 case AARCH64_OPND_IDX:
6048 case AARCH64_OPND_BIT_NUM:
6049 case AARCH64_OPND_IMMR:
6050 case AARCH64_OPND_IMMS:
6051 case AARCH64_OPND_SHLL_IMM:
6052 case AARCH64_OPND_IMM_VLSL:
6053 case AARCH64_OPND_IMM_VLSR:
6054 case AARCH64_OPND_CCMP_IMM:
6055 case AARCH64_OPND_FBITS:
6056 case AARCH64_OPND_UIMM4:
6057 case AARCH64_OPND_UIMM3_OP1:
6058 case AARCH64_OPND_UIMM3_OP2:
6059 case AARCH64_OPND_IMM:
6060 case AARCH64_OPND_IMM_2:
6061 case AARCH64_OPND_WIDTH:
6062 case AARCH64_OPND_UIMM7:
6063 case AARCH64_OPND_NZCV:
6064 case AARCH64_OPND_SVE_PATTERN:
6065 case AARCH64_OPND_SVE_PRFOP:
6066 operand->imm.value = default_value;
6067 break;
6068
6069 case AARCH64_OPND_SVE_PATTERN_SCALED:
6070 operand->imm.value = default_value;
6071 operand->shifter.kind = AARCH64_MOD_MUL;
6072 operand->shifter.amount = 1;
6073 break;
6074
6075 case AARCH64_OPND_EXCEPTION:
6076 inst.reloc.type = BFD_RELOC_UNUSED;
6077 break;
6078
6079 case AARCH64_OPND_BARRIER_ISB:
6080 operand->barrier = aarch64_barrier_options + default_value;
6081 break;
6082
6083 case AARCH64_OPND_BTI_TARGET:
6084 operand->hint_option = aarch64_hint_options + default_value;
6085 break;
6086
6087 default:
6088 break;
6089 }
6090 }
6091
6092 /* Process the relocation type for move wide instructions.
6093 Return TRUE on success; otherwise return FALSE. */
6094
6095 static bool
6096 process_movw_reloc_info (void)
6097 {
6098 int is32;
6099 unsigned shift;
6100
6101 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6102
6103 if (inst.base.opcode->op == OP_MOVK)
6104 switch (inst.reloc.type)
6105 {
6106 case BFD_RELOC_AARCH64_MOVW_G0_S:
6107 case BFD_RELOC_AARCH64_MOVW_G1_S:
6108 case BFD_RELOC_AARCH64_MOVW_G2_S:
6109 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6110 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6111 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6112 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6113 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6114 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6116 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6117 set_syntax_error
6118 (_("the specified relocation type is not allowed for MOVK"));
6119 return false;
6120 default:
6121 break;
6122 }
6123
6124 switch (inst.reloc.type)
6125 {
6126 case BFD_RELOC_AARCH64_MOVW_G0:
6127 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6128 case BFD_RELOC_AARCH64_MOVW_G0_S:
6129 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6130 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6131 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6132 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6133 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6134 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6135 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6137 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6138 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6139 shift = 0;
6140 break;
6141 case BFD_RELOC_AARCH64_MOVW_G1:
6142 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6143 case BFD_RELOC_AARCH64_MOVW_G1_S:
6144 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6145 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6146 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6147 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6148 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6149 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6150 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6151 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6154 shift = 16;
6155 break;
6156 case BFD_RELOC_AARCH64_MOVW_G2:
6157 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6158 case BFD_RELOC_AARCH64_MOVW_G2_S:
6159 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6160 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6161 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6162 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6163 if (is32)
6164 {
6165 set_fatal_syntax_error
6166 (_("the specified relocation type is not allowed for 32-bit "
6167 "register"));
6168 return false;
6169 }
6170 shift = 32;
6171 break;
6172 case BFD_RELOC_AARCH64_MOVW_G3:
6173 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6174 if (is32)
6175 {
6176 set_fatal_syntax_error
6177 (_("the specified relocation type is not allowed for 32-bit "
6178 "register"));
6179 return false;
6180 }
6181 shift = 48;
6182 break;
6183 default:
6184 /* More cases should be added when more MOVW-related relocation types
6185 are supported in GAS. */
6186 gas_assert (aarch64_gas_internal_fixup_p ());
6187 /* The shift amount should have already been set by the parser. */
6188 return true;
6189 }
6190 inst.base.operands[1].shifter.amount = shift;
6191 return true;
6192 }
6193
6194 /* A primitive log calculator. */
6195
6196 static inline unsigned int
6197 get_logsz (unsigned int size)
6198 {
6199 const unsigned char ls[16] =
6200 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6201 if (size > 16)
6202 {
6203 gas_assert (0);
6204 return -1;
6205 }
6206 gas_assert (ls[size - 1] != (unsigned char)-1);
6207 return ls[size - 1];
6208 }
6209
6210 /* Determine and return the real reloc type code for an instruction
6211 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6212
6213 static inline bfd_reloc_code_real_type
6214 ldst_lo12_determine_real_reloc_type (void)
6215 {
6216 unsigned logsz, max_logsz;
6217 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6218 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6219
6220 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6221 {
6222 BFD_RELOC_AARCH64_LDST8_LO12,
6223 BFD_RELOC_AARCH64_LDST16_LO12,
6224 BFD_RELOC_AARCH64_LDST32_LO12,
6225 BFD_RELOC_AARCH64_LDST64_LO12,
6226 BFD_RELOC_AARCH64_LDST128_LO12
6227 },
6228 {
6229 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6230 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6231 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6232 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6233 BFD_RELOC_AARCH64_NONE
6234 },
6235 {
6236 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6237 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6238 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6239 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6240 BFD_RELOC_AARCH64_NONE
6241 },
6242 {
6243 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6244 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6245 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6246 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6247 BFD_RELOC_AARCH64_NONE
6248 },
6249 {
6250 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6251 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6252 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6253 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6254 BFD_RELOC_AARCH64_NONE
6255 }
6256 };
6257
6258 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6259 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6260 || (inst.reloc.type
6261 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6262 || (inst.reloc.type
6263 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6264 || (inst.reloc.type
6265 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6266 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6267
6268 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6269 opd1_qlf =
6270 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6271 1, opd0_qlf, 0);
6272 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6273
6274 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6275
6276 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6277 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6278 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6279 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6280 max_logsz = 3;
6281 else
6282 max_logsz = 4;
6283
6284 if (logsz > max_logsz)
6285 {
6286 /* SEE PR 27904 for an example of this. */
6287 set_fatal_syntax_error
6288 (_("relocation qualifier does not match instruction size"));
6289 return BFD_RELOC_AARCH64_NONE;
6290 }
6291
6292 /* In reloc.c, these pseudo relocation types should be defined in similar
6293 order as above reloc_ldst_lo12 array. Because the array index calculation
6294 below relies on this. */
6295 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6296 }
6297
6298 /* Check whether a register list REGINFO is valid. The registers must be
6299 numbered in increasing order (modulo 32), in increments of one or two.
6300
6301 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6302 increments of two.
6303
6304 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6305
6306 static bool
6307 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6308 {
6309 uint32_t i, nb_regs, prev_regno, incr;
6310
6311 nb_regs = 1 + (reginfo & 0x3);
6312 reginfo >>= 2;
6313 prev_regno = reginfo & 0x1f;
6314 incr = accept_alternate ? 2 : 1;
6315
6316 for (i = 1; i < nb_regs; ++i)
6317 {
6318 uint32_t curr_regno;
6319 reginfo >>= 5;
6320 curr_regno = reginfo & 0x1f;
6321 if (curr_regno != ((prev_regno + incr) & 0x1f))
6322 return false;
6323 prev_regno = curr_regno;
6324 }
6325
6326 return true;
6327 }
6328
6329 /* Generic instruction operand parser. This does no encoding and no
6330 semantic validation; it merely squirrels values away in the inst
6331 structure. Returns TRUE or FALSE depending on whether the
6332 specified grammar matched. */
6333
6334 static bool
6335 parse_operands (char *str, const aarch64_opcode *opcode)
6336 {
6337 int i;
6338 char *backtrack_pos = 0;
6339 const enum aarch64_opnd *operands = opcode->operands;
6340 aarch64_reg_type imm_reg_type;
6341
6342 clear_error ();
6343 skip_whitespace (str);
6344
6345 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6346 AARCH64_FEATURE_SVE
6347 | AARCH64_FEATURE_SVE2))
6348 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6349 else
6350 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6351
6352 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6353 {
6354 int64_t val;
6355 const reg_entry *reg;
6356 int comma_skipped_p = 0;
6357 struct vector_type_el vectype;
6358 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6359 aarch64_opnd_info *info = &inst.base.operands[i];
6360 aarch64_reg_type reg_type;
6361
6362 DEBUG_TRACE ("parse operand %d", i);
6363
6364 /* Assign the operand code. */
6365 info->type = operands[i];
6366
6367 if (optional_operand_p (opcode, i))
6368 {
6369 /* Remember where we are in case we need to backtrack. */
6370 gas_assert (!backtrack_pos);
6371 backtrack_pos = str;
6372 }
6373
6374 /* Expect comma between operands; the backtrack mechanism will take
6375 care of cases of omitted optional operand. */
6376 if (i > 0 && ! skip_past_char (&str, ','))
6377 {
6378 set_syntax_error (_("comma expected between operands"));
6379 goto failure;
6380 }
6381 else
6382 comma_skipped_p = 1;
6383
6384 switch (operands[i])
6385 {
6386 case AARCH64_OPND_Rd:
6387 case AARCH64_OPND_Rn:
6388 case AARCH64_OPND_Rm:
6389 case AARCH64_OPND_Rt:
6390 case AARCH64_OPND_Rt2:
6391 case AARCH64_OPND_Rs:
6392 case AARCH64_OPND_Ra:
6393 case AARCH64_OPND_Rt_LS64:
6394 case AARCH64_OPND_Rt_SYS:
6395 case AARCH64_OPND_PAIRREG:
6396 case AARCH64_OPND_SVE_Rm:
6397 po_int_fp_reg_or_fail (REG_TYPE_R_Z);
6398
6399 /* In LS64 load/store instructions Rt register number must be even
6400 and <=22. */
6401 if (operands[i] == AARCH64_OPND_Rt_LS64)
6402 {
6403 /* We've already checked if this is valid register.
6404 This will check if register number (Rt) is not undefined for LS64
6405 instructions:
6406 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6407 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6408 {
6409 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6410 goto failure;
6411 }
6412 }
6413 break;
6414
6415 case AARCH64_OPND_Rd_SP:
6416 case AARCH64_OPND_Rn_SP:
6417 case AARCH64_OPND_Rt_SP:
6418 case AARCH64_OPND_SVE_Rn_SP:
6419 case AARCH64_OPND_Rm_SP:
6420 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6421 break;
6422
6423 case AARCH64_OPND_Rm_EXT:
6424 case AARCH64_OPND_Rm_SFT:
6425 po_misc_or_fail (parse_shifter_operand
6426 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6427 ? SHIFTED_ARITH_IMM
6428 : SHIFTED_LOGIC_IMM)));
6429 if (!info->shifter.operator_present)
6430 {
6431 /* Default to LSL if not present. Libopcodes prefers shifter
6432 kind to be explicit. */
6433 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6434 info->shifter.kind = AARCH64_MOD_LSL;
6435 /* For Rm_EXT, libopcodes will carry out further check on whether
6436 or not stack pointer is used in the instruction (Recall that
6437 "the extend operator is not optional unless at least one of
6438 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6439 }
6440 break;
6441
6442 case AARCH64_OPND_Fd:
6443 case AARCH64_OPND_Fn:
6444 case AARCH64_OPND_Fm:
6445 case AARCH64_OPND_Fa:
6446 case AARCH64_OPND_Ft:
6447 case AARCH64_OPND_Ft2:
6448 case AARCH64_OPND_Sd:
6449 case AARCH64_OPND_Sn:
6450 case AARCH64_OPND_Sm:
6451 case AARCH64_OPND_SVE_VZn:
6452 case AARCH64_OPND_SVE_Vd:
6453 case AARCH64_OPND_SVE_Vm:
6454 case AARCH64_OPND_SVE_Vn:
6455 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6456 break;
6457
6458 case AARCH64_OPND_SVE_Pd:
6459 case AARCH64_OPND_SVE_Pg3:
6460 case AARCH64_OPND_SVE_Pg4_5:
6461 case AARCH64_OPND_SVE_Pg4_10:
6462 case AARCH64_OPND_SVE_Pg4_16:
6463 case AARCH64_OPND_SVE_Pm:
6464 case AARCH64_OPND_SVE_Pn:
6465 case AARCH64_OPND_SVE_Pt:
6466 case AARCH64_OPND_SME_Pm:
6467 reg_type = REG_TYPE_PN;
6468 goto vector_reg;
6469
6470 case AARCH64_OPND_SVE_Za_5:
6471 case AARCH64_OPND_SVE_Za_16:
6472 case AARCH64_OPND_SVE_Zd:
6473 case AARCH64_OPND_SVE_Zm_5:
6474 case AARCH64_OPND_SVE_Zm_16:
6475 case AARCH64_OPND_SVE_Zn:
6476 case AARCH64_OPND_SVE_Zt:
6477 reg_type = REG_TYPE_ZN;
6478 goto vector_reg;
6479
6480 case AARCH64_OPND_Va:
6481 case AARCH64_OPND_Vd:
6482 case AARCH64_OPND_Vn:
6483 case AARCH64_OPND_Vm:
6484 reg_type = REG_TYPE_VN;
6485 vector_reg:
6486 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6487 if (!reg)
6488 goto failure;
6489 if (vectype.defined & NTA_HASINDEX)
6490 goto failure;
6491
6492 info->reg.regno = reg->number;
6493 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6494 && vectype.type == NT_invtype)
6495 /* Unqualified Pn and Zn registers are allowed in certain
6496 contexts. Rely on F_STRICT qualifier checking to catch
6497 invalid uses. */
6498 info->qualifier = AARCH64_OPND_QLF_NIL;
6499 else
6500 {
6501 info->qualifier = vectype_to_qualifier (&vectype);
6502 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6503 goto failure;
6504 }
6505 break;
6506
6507 case AARCH64_OPND_VdD1:
6508 case AARCH64_OPND_VnD1:
6509 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6510 if (!reg)
6511 goto failure;
6512 if (vectype.type != NT_d || vectype.index != 1)
6513 {
6514 set_fatal_syntax_error
6515 (_("the top half of a 128-bit FP/SIMD register is expected"));
6516 goto failure;
6517 }
6518 info->reg.regno = reg->number;
6519 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6520 here; it is correct for the purpose of encoding/decoding since
6521 only the register number is explicitly encoded in the related
6522 instructions, although this appears a bit hacky. */
6523 info->qualifier = AARCH64_OPND_QLF_S_D;
6524 break;
6525
6526 case AARCH64_OPND_SVE_Zm3_INDEX:
6527 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6528 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6529 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6530 case AARCH64_OPND_SVE_Zm4_INDEX:
6531 case AARCH64_OPND_SVE_Zn_INDEX:
6532 reg_type = REG_TYPE_ZN;
6533 goto vector_reg_index;
6534
6535 case AARCH64_OPND_Ed:
6536 case AARCH64_OPND_En:
6537 case AARCH64_OPND_Em:
6538 case AARCH64_OPND_Em16:
6539 case AARCH64_OPND_SM3_IMM2:
6540 reg_type = REG_TYPE_VN;
6541 vector_reg_index:
6542 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6543 if (!reg)
6544 goto failure;
6545 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6546 goto failure;
6547
6548 info->reglane.regno = reg->number;
6549 info->reglane.index = vectype.index;
6550 info->qualifier = vectype_to_qualifier (&vectype);
6551 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6552 goto failure;
6553 break;
6554
6555 case AARCH64_OPND_SVE_ZnxN:
6556 case AARCH64_OPND_SVE_ZtxN:
6557 reg_type = REG_TYPE_ZN;
6558 goto vector_reg_list;
6559
6560 case AARCH64_OPND_LVn:
6561 case AARCH64_OPND_LVt:
6562 case AARCH64_OPND_LVt_AL:
6563 case AARCH64_OPND_LEt:
6564 reg_type = REG_TYPE_VN;
6565 vector_reg_list:
6566 if (reg_type == REG_TYPE_ZN
6567 && get_opcode_dependent_value (opcode) == 1
6568 && *str != '{')
6569 {
6570 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6571 if (!reg)
6572 goto failure;
6573 info->reglist.first_regno = reg->number;
6574 info->reglist.num_regs = 1;
6575 }
6576 else
6577 {
6578 val = parse_vector_reg_list (&str, reg_type, &vectype);
6579 if (val == PARSE_FAIL)
6580 goto failure;
6581
6582 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6583 {
6584 set_fatal_syntax_error (_("invalid register list"));
6585 goto failure;
6586 }
6587
6588 if (vectype.width != 0 && *str != ',')
6589 {
6590 set_fatal_syntax_error
6591 (_("expected element type rather than vector type"));
6592 goto failure;
6593 }
6594
6595 info->reglist.first_regno = (val >> 2) & 0x1f;
6596 info->reglist.num_regs = (val & 0x3) + 1;
6597 }
6598 if (operands[i] == AARCH64_OPND_LEt)
6599 {
6600 if (!(vectype.defined & NTA_HASINDEX))
6601 goto failure;
6602 info->reglist.has_index = 1;
6603 info->reglist.index = vectype.index;
6604 }
6605 else
6606 {
6607 if (vectype.defined & NTA_HASINDEX)
6608 goto failure;
6609 if (!(vectype.defined & NTA_HASTYPE))
6610 {
6611 if (reg_type == REG_TYPE_ZN)
6612 set_fatal_syntax_error (_("missing type suffix"));
6613 goto failure;
6614 }
6615 }
6616 info->qualifier = vectype_to_qualifier (&vectype);
6617 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6618 goto failure;
6619 break;
6620
6621 case AARCH64_OPND_CRn:
6622 case AARCH64_OPND_CRm:
6623 {
6624 char prefix = *(str++);
6625 if (prefix != 'c' && prefix != 'C')
6626 goto failure;
6627
6628 po_imm_nc_or_fail ();
6629 if (val > 15)
6630 {
6631 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6632 goto failure;
6633 }
6634 info->qualifier = AARCH64_OPND_QLF_CR;
6635 info->imm.value = val;
6636 break;
6637 }
6638
6639 case AARCH64_OPND_SHLL_IMM:
6640 case AARCH64_OPND_IMM_VLSR:
6641 po_imm_or_fail (1, 64);
6642 info->imm.value = val;
6643 break;
6644
6645 case AARCH64_OPND_CCMP_IMM:
6646 case AARCH64_OPND_SIMM5:
6647 case AARCH64_OPND_FBITS:
6648 case AARCH64_OPND_TME_UIMM16:
6649 case AARCH64_OPND_UIMM4:
6650 case AARCH64_OPND_UIMM4_ADDG:
6651 case AARCH64_OPND_UIMM10:
6652 case AARCH64_OPND_UIMM3_OP1:
6653 case AARCH64_OPND_UIMM3_OP2:
6654 case AARCH64_OPND_IMM_VLSL:
6655 case AARCH64_OPND_IMM:
6656 case AARCH64_OPND_IMM_2:
6657 case AARCH64_OPND_WIDTH:
6658 case AARCH64_OPND_SVE_INV_LIMM:
6659 case AARCH64_OPND_SVE_LIMM:
6660 case AARCH64_OPND_SVE_LIMM_MOV:
6661 case AARCH64_OPND_SVE_SHLIMM_PRED:
6662 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6663 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6664 case AARCH64_OPND_SVE_SHRIMM_PRED:
6665 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6666 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6667 case AARCH64_OPND_SVE_SIMM5:
6668 case AARCH64_OPND_SVE_SIMM5B:
6669 case AARCH64_OPND_SVE_SIMM6:
6670 case AARCH64_OPND_SVE_SIMM8:
6671 case AARCH64_OPND_SVE_UIMM3:
6672 case AARCH64_OPND_SVE_UIMM7:
6673 case AARCH64_OPND_SVE_UIMM8:
6674 case AARCH64_OPND_SVE_UIMM8_53:
6675 case AARCH64_OPND_IMM_ROT1:
6676 case AARCH64_OPND_IMM_ROT2:
6677 case AARCH64_OPND_IMM_ROT3:
6678 case AARCH64_OPND_SVE_IMM_ROT1:
6679 case AARCH64_OPND_SVE_IMM_ROT2:
6680 case AARCH64_OPND_SVE_IMM_ROT3:
6681 case AARCH64_OPND_CSSC_SIMM8:
6682 case AARCH64_OPND_CSSC_UIMM8:
6683 po_imm_nc_or_fail ();
6684 info->imm.value = val;
6685 break;
6686
6687 case AARCH64_OPND_SVE_AIMM:
6688 case AARCH64_OPND_SVE_ASIMM:
6689 po_imm_nc_or_fail ();
6690 info->imm.value = val;
6691 skip_whitespace (str);
6692 if (skip_past_comma (&str))
6693 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6694 else
6695 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6696 break;
6697
6698 case AARCH64_OPND_SVE_PATTERN:
6699 po_enum_or_fail (aarch64_sve_pattern_array);
6700 info->imm.value = val;
6701 break;
6702
6703 case AARCH64_OPND_SVE_PATTERN_SCALED:
6704 po_enum_or_fail (aarch64_sve_pattern_array);
6705 info->imm.value = val;
6706 if (skip_past_comma (&str)
6707 && !parse_shift (&str, info, SHIFTED_MUL))
6708 goto failure;
6709 if (!info->shifter.operator_present)
6710 {
6711 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6712 info->shifter.kind = AARCH64_MOD_MUL;
6713 info->shifter.amount = 1;
6714 }
6715 break;
6716
6717 case AARCH64_OPND_SVE_PRFOP:
6718 po_enum_or_fail (aarch64_sve_prfop_array);
6719 info->imm.value = val;
6720 break;
6721
6722 case AARCH64_OPND_UIMM7:
6723 po_imm_or_fail (0, 127);
6724 info->imm.value = val;
6725 break;
6726
6727 case AARCH64_OPND_IDX:
6728 case AARCH64_OPND_MASK:
6729 case AARCH64_OPND_BIT_NUM:
6730 case AARCH64_OPND_IMMR:
6731 case AARCH64_OPND_IMMS:
6732 po_imm_or_fail (0, 63);
6733 info->imm.value = val;
6734 break;
6735
6736 case AARCH64_OPND_IMM0:
6737 po_imm_nc_or_fail ();
6738 if (val != 0)
6739 {
6740 set_fatal_syntax_error (_("immediate zero expected"));
6741 goto failure;
6742 }
6743 info->imm.value = 0;
6744 break;
6745
6746 case AARCH64_OPND_FPIMM0:
6747 {
6748 int qfloat;
6749 bool res1 = false, res2 = false;
6750 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6751 it is probably not worth the effort to support it. */
6752 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6753 imm_reg_type))
6754 && (error_p ()
6755 || !(res2 = parse_constant_immediate (&str, &val,
6756 imm_reg_type))))
6757 goto failure;
6758 if ((res1 && qfloat == 0) || (res2 && val == 0))
6759 {
6760 info->imm.value = 0;
6761 info->imm.is_fp = 1;
6762 break;
6763 }
6764 set_fatal_syntax_error (_("immediate zero expected"));
6765 goto failure;
6766 }
6767
6768 case AARCH64_OPND_IMM_MOV:
6769 {
6770 char *saved = str;
6771 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6772 reg_name_p (str, REG_TYPE_VN))
6773 goto failure;
6774 str = saved;
6775 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6776 GE_OPT_PREFIX, REJECT_ABSENT));
6777 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6778 later. fix_mov_imm_insn will try to determine a machine
6779 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6780 message if the immediate cannot be moved by a single
6781 instruction. */
6782 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6783 inst.base.operands[i].skip = 1;
6784 }
6785 break;
6786
6787 case AARCH64_OPND_SIMD_IMM:
6788 case AARCH64_OPND_SIMD_IMM_SFT:
6789 if (! parse_big_immediate (&str, &val, imm_reg_type))
6790 goto failure;
6791 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6792 /* addr_off_p */ 0,
6793 /* need_libopcodes_p */ 1,
6794 /* skip_p */ 1);
6795 /* Parse shift.
6796 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6797 shift, we don't check it here; we leave the checking to
6798 the libopcodes (operand_general_constraint_met_p). By
6799 doing this, we achieve better diagnostics. */
6800 if (skip_past_comma (&str)
6801 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6802 goto failure;
6803 if (!info->shifter.operator_present
6804 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6805 {
6806 /* Default to LSL if not present. Libopcodes prefers shifter
6807 kind to be explicit. */
6808 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6809 info->shifter.kind = AARCH64_MOD_LSL;
6810 }
6811 break;
6812
6813 case AARCH64_OPND_FPIMM:
6814 case AARCH64_OPND_SIMD_FPIMM:
6815 case AARCH64_OPND_SVE_FPIMM8:
6816 {
6817 int qfloat;
6818 bool dp_p;
6819
6820 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6821 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6822 || !aarch64_imm_float_p (qfloat))
6823 {
6824 if (!error_p ())
6825 set_fatal_syntax_error (_("invalid floating-point"
6826 " constant"));
6827 goto failure;
6828 }
6829 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6830 inst.base.operands[i].imm.is_fp = 1;
6831 }
6832 break;
6833
6834 case AARCH64_OPND_SVE_I1_HALF_ONE:
6835 case AARCH64_OPND_SVE_I1_HALF_TWO:
6836 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6837 {
6838 int qfloat;
6839 bool dp_p;
6840
6841 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6842 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6843 {
6844 if (!error_p ())
6845 set_fatal_syntax_error (_("invalid floating-point"
6846 " constant"));
6847 goto failure;
6848 }
6849 inst.base.operands[i].imm.value = qfloat;
6850 inst.base.operands[i].imm.is_fp = 1;
6851 }
6852 break;
6853
6854 case AARCH64_OPND_LIMM:
6855 po_misc_or_fail (parse_shifter_operand (&str, info,
6856 SHIFTED_LOGIC_IMM));
6857 if (info->shifter.operator_present)
6858 {
6859 set_fatal_syntax_error
6860 (_("shift not allowed for bitmask immediate"));
6861 goto failure;
6862 }
6863 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6864 /* addr_off_p */ 0,
6865 /* need_libopcodes_p */ 1,
6866 /* skip_p */ 1);
6867 break;
6868
6869 case AARCH64_OPND_AIMM:
6870 if (opcode->op == OP_ADD)
6871 /* ADD may have relocation types. */
6872 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6873 SHIFTED_ARITH_IMM));
6874 else
6875 po_misc_or_fail (parse_shifter_operand (&str, info,
6876 SHIFTED_ARITH_IMM));
6877 switch (inst.reloc.type)
6878 {
6879 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6880 info->shifter.amount = 12;
6881 break;
6882 case BFD_RELOC_UNUSED:
6883 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6884 if (info->shifter.kind != AARCH64_MOD_NONE)
6885 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6886 inst.reloc.pc_rel = 0;
6887 break;
6888 default:
6889 break;
6890 }
6891 info->imm.value = 0;
6892 if (!info->shifter.operator_present)
6893 {
6894 /* Default to LSL if not present. Libopcodes prefers shifter
6895 kind to be explicit. */
6896 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6897 info->shifter.kind = AARCH64_MOD_LSL;
6898 }
6899 break;
6900
6901 case AARCH64_OPND_HALF:
6902 {
6903 /* #<imm16> or relocation. */
6904 int internal_fixup_p;
6905 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6906 if (internal_fixup_p)
6907 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6908 skip_whitespace (str);
6909 if (skip_past_comma (&str))
6910 {
6911 /* {, LSL #<shift>} */
6912 if (! aarch64_gas_internal_fixup_p ())
6913 {
6914 set_fatal_syntax_error (_("can't mix relocation modifier "
6915 "with explicit shift"));
6916 goto failure;
6917 }
6918 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6919 }
6920 else
6921 inst.base.operands[i].shifter.amount = 0;
6922 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6923 inst.base.operands[i].imm.value = 0;
6924 if (! process_movw_reloc_info ())
6925 goto failure;
6926 }
6927 break;
6928
6929 case AARCH64_OPND_EXCEPTION:
6930 case AARCH64_OPND_UNDEFINED:
6931 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6932 imm_reg_type));
6933 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6934 /* addr_off_p */ 0,
6935 /* need_libopcodes_p */ 0,
6936 /* skip_p */ 1);
6937 break;
6938
6939 case AARCH64_OPND_NZCV:
6940 {
6941 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6942 if (nzcv != NULL)
6943 {
6944 str += 4;
6945 info->imm.value = nzcv->value;
6946 break;
6947 }
6948 po_imm_or_fail (0, 15);
6949 info->imm.value = val;
6950 }
6951 break;
6952
6953 case AARCH64_OPND_COND:
6954 case AARCH64_OPND_COND1:
6955 {
6956 char *start = str;
6957 do
6958 str++;
6959 while (ISALPHA (*str));
6960 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6961 if (info->cond == NULL)
6962 {
6963 set_syntax_error (_("invalid condition"));
6964 goto failure;
6965 }
6966 else if (operands[i] == AARCH64_OPND_COND1
6967 && (info->cond->value & 0xe) == 0xe)
6968 {
6969 /* Do not allow AL or NV. */
6970 set_default_error ();
6971 goto failure;
6972 }
6973 }
6974 break;
6975
6976 case AARCH64_OPND_ADDR_ADRP:
6977 po_misc_or_fail (parse_adrp (&str));
6978 /* Clear the value as operand needs to be relocated. */
6979 info->imm.value = 0;
6980 break;
6981
6982 case AARCH64_OPND_ADDR_PCREL14:
6983 case AARCH64_OPND_ADDR_PCREL19:
6984 case AARCH64_OPND_ADDR_PCREL21:
6985 case AARCH64_OPND_ADDR_PCREL26:
6986 po_misc_or_fail (parse_address (&str, info));
6987 if (!info->addr.pcrel)
6988 {
6989 set_syntax_error (_("invalid pc-relative address"));
6990 goto failure;
6991 }
6992 if (inst.gen_lit_pool
6993 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6994 {
6995 /* Only permit "=value" in the literal load instructions.
6996 The literal will be generated by programmer_friendly_fixup. */
6997 set_syntax_error (_("invalid use of \"=immediate\""));
6998 goto failure;
6999 }
7000 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7001 {
7002 set_syntax_error (_("unrecognized relocation suffix"));
7003 goto failure;
7004 }
7005 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7006 {
7007 info->imm.value = inst.reloc.exp.X_add_number;
7008 inst.reloc.type = BFD_RELOC_UNUSED;
7009 }
7010 else
7011 {
7012 info->imm.value = 0;
7013 if (inst.reloc.type == BFD_RELOC_UNUSED)
7014 switch (opcode->iclass)
7015 {
7016 case compbranch:
7017 case condbranch:
7018 /* e.g. CBZ or B.COND */
7019 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7020 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7021 break;
7022 case testbranch:
7023 /* e.g. TBZ */
7024 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7025 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7026 break;
7027 case branch_imm:
7028 /* e.g. B or BL */
7029 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7030 inst.reloc.type =
7031 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7032 : BFD_RELOC_AARCH64_JUMP26;
7033 break;
7034 case loadlit:
7035 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7036 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7037 break;
7038 case pcreladdr:
7039 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7040 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7041 break;
7042 default:
7043 gas_assert (0);
7044 abort ();
7045 }
7046 inst.reloc.pc_rel = 1;
7047 }
7048 break;
7049
7050 case AARCH64_OPND_ADDR_SIMPLE:
7051 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7052 {
7053 /* [<Xn|SP>{, #<simm>}] */
7054 char *start = str;
7055 /* First use the normal address-parsing routines, to get
7056 the usual syntax errors. */
7057 po_misc_or_fail (parse_address (&str, info));
7058 if (info->addr.pcrel || info->addr.offset.is_reg
7059 || !info->addr.preind || info->addr.postind
7060 || info->addr.writeback)
7061 {
7062 set_syntax_error (_("invalid addressing mode"));
7063 goto failure;
7064 }
7065
7066 /* Then retry, matching the specific syntax of these addresses. */
7067 str = start;
7068 po_char_or_fail ('[');
7069 po_reg_or_fail (REG_TYPE_R64_SP);
7070 /* Accept optional ", #0". */
7071 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7072 && skip_past_char (&str, ','))
7073 {
7074 skip_past_char (&str, '#');
7075 if (! skip_past_char (&str, '0'))
7076 {
7077 set_fatal_syntax_error
7078 (_("the optional immediate offset can only be 0"));
7079 goto failure;
7080 }
7081 }
7082 po_char_or_fail (']');
7083 break;
7084 }
7085
7086 case AARCH64_OPND_ADDR_REGOFF:
7087 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7088 po_misc_or_fail (parse_address (&str, info));
7089 regoff_addr:
7090 if (info->addr.pcrel || !info->addr.offset.is_reg
7091 || !info->addr.preind || info->addr.postind
7092 || info->addr.writeback)
7093 {
7094 set_syntax_error (_("invalid addressing mode"));
7095 goto failure;
7096 }
7097 if (!info->shifter.operator_present)
7098 {
7099 /* Default to LSL if not present. Libopcodes prefers shifter
7100 kind to be explicit. */
7101 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7102 info->shifter.kind = AARCH64_MOD_LSL;
7103 }
7104 /* Qualifier to be deduced by libopcodes. */
7105 break;
7106
7107 case AARCH64_OPND_ADDR_SIMM7:
7108 po_misc_or_fail (parse_address (&str, info));
7109 if (info->addr.pcrel || info->addr.offset.is_reg
7110 || (!info->addr.preind && !info->addr.postind))
7111 {
7112 set_syntax_error (_("invalid addressing mode"));
7113 goto failure;
7114 }
7115 if (inst.reloc.type != BFD_RELOC_UNUSED)
7116 {
7117 set_syntax_error (_("relocation not allowed"));
7118 goto failure;
7119 }
7120 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7121 /* addr_off_p */ 1,
7122 /* need_libopcodes_p */ 1,
7123 /* skip_p */ 0);
7124 break;
7125
7126 case AARCH64_OPND_ADDR_SIMM9:
7127 case AARCH64_OPND_ADDR_SIMM9_2:
7128 case AARCH64_OPND_ADDR_SIMM11:
7129 case AARCH64_OPND_ADDR_SIMM13:
7130 po_misc_or_fail (parse_address (&str, info));
7131 if (info->addr.pcrel || info->addr.offset.is_reg
7132 || (!info->addr.preind && !info->addr.postind)
7133 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7134 && info->addr.writeback))
7135 {
7136 set_syntax_error (_("invalid addressing mode"));
7137 goto failure;
7138 }
7139 if (inst.reloc.type != BFD_RELOC_UNUSED)
7140 {
7141 set_syntax_error (_("relocation not allowed"));
7142 goto failure;
7143 }
7144 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7145 /* addr_off_p */ 1,
7146 /* need_libopcodes_p */ 1,
7147 /* skip_p */ 0);
7148 break;
7149
7150 case AARCH64_OPND_ADDR_SIMM10:
7151 case AARCH64_OPND_ADDR_OFFSET:
7152 po_misc_or_fail (parse_address (&str, info));
7153 if (info->addr.pcrel || info->addr.offset.is_reg
7154 || !info->addr.preind || info->addr.postind)
7155 {
7156 set_syntax_error (_("invalid addressing mode"));
7157 goto failure;
7158 }
7159 if (inst.reloc.type != BFD_RELOC_UNUSED)
7160 {
7161 set_syntax_error (_("relocation not allowed"));
7162 goto failure;
7163 }
7164 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7165 /* addr_off_p */ 1,
7166 /* need_libopcodes_p */ 1,
7167 /* skip_p */ 0);
7168 break;
7169
7170 case AARCH64_OPND_ADDR_UIMM12:
7171 po_misc_or_fail (parse_address (&str, info));
7172 if (info->addr.pcrel || info->addr.offset.is_reg
7173 || !info->addr.preind || info->addr.writeback)
7174 {
7175 set_syntax_error (_("invalid addressing mode"));
7176 goto failure;
7177 }
7178 if (inst.reloc.type == BFD_RELOC_UNUSED)
7179 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7180 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7181 || (inst.reloc.type
7182 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7183 || (inst.reloc.type
7184 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7185 || (inst.reloc.type
7186 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7187 || (inst.reloc.type
7188 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7189 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7190 /* Leave qualifier to be determined by libopcodes. */
7191 break;
7192
7193 case AARCH64_OPND_SIMD_ADDR_POST:
7194 /* [<Xn|SP>], <Xm|#<amount>> */
7195 po_misc_or_fail (parse_address (&str, info));
7196 if (!info->addr.postind || !info->addr.writeback)
7197 {
7198 set_syntax_error (_("invalid addressing mode"));
7199 goto failure;
7200 }
7201 if (!info->addr.offset.is_reg)
7202 {
7203 if (inst.reloc.exp.X_op == O_constant)
7204 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7205 else
7206 {
7207 set_fatal_syntax_error
7208 (_("writeback value must be an immediate constant"));
7209 goto failure;
7210 }
7211 }
7212 /* No qualifier. */
7213 break;
7214
7215 case AARCH64_OPND_SME_SM_ZA:
7216 /* { SM | ZA } */
7217 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7218 {
7219 set_syntax_error (_("unknown or missing PSTATE field name"));
7220 goto failure;
7221 }
7222 info->reg.regno = val;
7223 break;
7224
7225 case AARCH64_OPND_SME_PnT_Wm_imm:
7226 if (!parse_dual_indexed_reg (&str, REG_TYPE_PN,
7227 &info->indexed_za, &qualifier, 0))
7228 goto failure;
7229 info->qualifier = qualifier;
7230 break;
7231
7232 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7233 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7234 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7235 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7236 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7237 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7238 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7239 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7240 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7241 case AARCH64_OPND_SVE_ADDR_RI_U6:
7242 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7243 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7244 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7245 /* [X<n>{, #imm, MUL VL}]
7246 [X<n>{, #imm}]
7247 but recognizing SVE registers. */
7248 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7249 &offset_qualifier));
7250 if (base_qualifier != AARCH64_OPND_QLF_X)
7251 {
7252 set_syntax_error (_("invalid addressing mode"));
7253 goto failure;
7254 }
7255 sve_regimm:
7256 if (info->addr.pcrel || info->addr.offset.is_reg
7257 || !info->addr.preind || info->addr.writeback)
7258 {
7259 set_syntax_error (_("invalid addressing mode"));
7260 goto failure;
7261 }
7262 if (inst.reloc.type != BFD_RELOC_UNUSED
7263 || inst.reloc.exp.X_op != O_constant)
7264 {
7265 /* Make sure this has priority over
7266 "invalid addressing mode". */
7267 set_fatal_syntax_error (_("constant offset required"));
7268 goto failure;
7269 }
7270 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7271 break;
7272
7273 case AARCH64_OPND_SVE_ADDR_R:
7274 /* [<Xn|SP>{, <R><m>}]
7275 but recognizing SVE registers. */
7276 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7277 &offset_qualifier));
7278 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7279 {
7280 offset_qualifier = AARCH64_OPND_QLF_X;
7281 info->addr.offset.is_reg = 1;
7282 info->addr.offset.regno = 31;
7283 }
7284 else if (base_qualifier != AARCH64_OPND_QLF_X
7285 || offset_qualifier != AARCH64_OPND_QLF_X)
7286 {
7287 set_syntax_error (_("invalid addressing mode"));
7288 goto failure;
7289 }
7290 goto regoff_addr;
7291
7292 case AARCH64_OPND_SVE_ADDR_RR:
7293 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7294 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7295 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7296 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7297 case AARCH64_OPND_SVE_ADDR_RX:
7298 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7299 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7300 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7301 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7302 but recognizing SVE registers. */
7303 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7304 &offset_qualifier));
7305 if (base_qualifier != AARCH64_OPND_QLF_X
7306 || offset_qualifier != AARCH64_OPND_QLF_X)
7307 {
7308 set_syntax_error (_("invalid addressing mode"));
7309 goto failure;
7310 }
7311 goto regoff_addr;
7312
7313 case AARCH64_OPND_SVE_ADDR_RZ:
7314 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7315 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7316 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7317 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7318 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7319 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7320 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7321 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7322 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7323 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7324 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7325 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7326 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7327 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7328 &offset_qualifier));
7329 if (base_qualifier != AARCH64_OPND_QLF_X
7330 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7331 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7332 {
7333 set_syntax_error (_("invalid addressing mode"));
7334 goto failure;
7335 }
7336 info->qualifier = offset_qualifier;
7337 goto regoff_addr;
7338
7339 case AARCH64_OPND_SVE_ADDR_ZX:
7340 /* [Zn.<T>{, <Xm>}]. */
7341 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7342 &offset_qualifier));
7343 /* Things to check:
7344 base_qualifier either S_S or S_D
7345 offset_qualifier must be X
7346 */
7347 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7348 && base_qualifier != AARCH64_OPND_QLF_S_D)
7349 || offset_qualifier != AARCH64_OPND_QLF_X)
7350 {
7351 set_syntax_error (_("invalid addressing mode"));
7352 goto failure;
7353 }
7354 info->qualifier = base_qualifier;
7355 if (!info->addr.offset.is_reg || info->addr.pcrel
7356 || !info->addr.preind || info->addr.writeback
7357 || info->shifter.operator_present != 0)
7358 {
7359 set_syntax_error (_("invalid addressing mode"));
7360 goto failure;
7361 }
7362 info->shifter.kind = AARCH64_MOD_LSL;
7363 break;
7364
7365
7366 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7367 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7368 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7369 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7370 /* [Z<n>.<T>{, #imm}] */
7371 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7372 &offset_qualifier));
7373 if (base_qualifier != AARCH64_OPND_QLF_S_S
7374 && base_qualifier != AARCH64_OPND_QLF_S_D)
7375 {
7376 set_syntax_error (_("invalid addressing mode"));
7377 goto failure;
7378 }
7379 info->qualifier = base_qualifier;
7380 goto sve_regimm;
7381
7382 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7383 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7384 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7385 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7386 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7387
7388 We don't reject:
7389
7390 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7391
7392 here since we get better error messages by leaving it to
7393 the qualifier checking routines. */
7394 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7395 &offset_qualifier));
7396 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7397 && base_qualifier != AARCH64_OPND_QLF_S_D)
7398 || offset_qualifier != base_qualifier)
7399 {
7400 set_syntax_error (_("invalid addressing mode"));
7401 goto failure;
7402 }
7403 info->qualifier = base_qualifier;
7404 goto regoff_addr;
7405
7406 case AARCH64_OPND_SYSREG:
7407 {
7408 uint32_t sysreg_flags;
7409 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7410 &sysreg_flags)) == PARSE_FAIL)
7411 {
7412 set_syntax_error (_("unknown or missing system register name"));
7413 goto failure;
7414 }
7415 inst.base.operands[i].sysreg.value = val;
7416 inst.base.operands[i].sysreg.flags = sysreg_flags;
7417 break;
7418 }
7419
7420 case AARCH64_OPND_PSTATEFIELD:
7421 {
7422 uint32_t sysreg_flags;
7423 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7424 &sysreg_flags)) == PARSE_FAIL)
7425 {
7426 set_syntax_error (_("unknown or missing PSTATE field name"));
7427 goto failure;
7428 }
7429 inst.base.operands[i].pstatefield = val;
7430 inst.base.operands[i].sysreg.flags = sysreg_flags;
7431 break;
7432 }
7433
7434 case AARCH64_OPND_SYSREG_IC:
7435 inst.base.operands[i].sysins_op =
7436 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7437 goto sys_reg_ins;
7438
7439 case AARCH64_OPND_SYSREG_DC:
7440 inst.base.operands[i].sysins_op =
7441 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7442 goto sys_reg_ins;
7443
7444 case AARCH64_OPND_SYSREG_AT:
7445 inst.base.operands[i].sysins_op =
7446 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7447 goto sys_reg_ins;
7448
7449 case AARCH64_OPND_SYSREG_SR:
7450 inst.base.operands[i].sysins_op =
7451 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7452 goto sys_reg_ins;
7453
7454 case AARCH64_OPND_SYSREG_TLBI:
7455 inst.base.operands[i].sysins_op =
7456 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7457 sys_reg_ins:
7458 if (inst.base.operands[i].sysins_op == NULL)
7459 {
7460 set_fatal_syntax_error ( _("unknown or missing operation name"));
7461 goto failure;
7462 }
7463 break;
7464
7465 case AARCH64_OPND_BARRIER:
7466 case AARCH64_OPND_BARRIER_ISB:
7467 val = parse_barrier (&str);
7468 if (val != PARSE_FAIL
7469 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7470 {
7471 /* ISB only accepts options name 'sy'. */
7472 set_syntax_error
7473 (_("the specified option is not accepted in ISB"));
7474 /* Turn off backtrack as this optional operand is present. */
7475 backtrack_pos = 0;
7476 goto failure;
7477 }
7478 if (val != PARSE_FAIL
7479 && operands[i] == AARCH64_OPND_BARRIER)
7480 {
7481 /* Regular barriers accept options CRm (C0-C15).
7482 DSB nXS barrier variant accepts values > 15. */
7483 if (val < 0 || val > 15)
7484 {
7485 set_syntax_error (_("the specified option is not accepted in DSB"));
7486 goto failure;
7487 }
7488 }
7489 /* This is an extension to accept a 0..15 immediate. */
7490 if (val == PARSE_FAIL)
7491 po_imm_or_fail (0, 15);
7492 info->barrier = aarch64_barrier_options + val;
7493 break;
7494
7495 case AARCH64_OPND_BARRIER_DSB_NXS:
7496 val = parse_barrier (&str);
7497 if (val != PARSE_FAIL)
7498 {
7499 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7500 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7501 {
7502 set_syntax_error (_("the specified option is not accepted in DSB"));
7503 /* Turn off backtrack as this optional operand is present. */
7504 backtrack_pos = 0;
7505 goto failure;
7506 }
7507 }
7508 else
7509 {
7510 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7511 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7512 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7513 goto failure;
7514 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7515 {
7516 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7517 goto failure;
7518 }
7519 }
7520 /* Option index is encoded as 2-bit value in val<3:2>. */
7521 val = (val >> 2) - 4;
7522 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7523 break;
7524
7525 case AARCH64_OPND_PRFOP:
7526 val = parse_pldop (&str);
7527 /* This is an extension to accept a 0..31 immediate. */
7528 if (val == PARSE_FAIL)
7529 po_imm_or_fail (0, 31);
7530 inst.base.operands[i].prfop = aarch64_prfops + val;
7531 break;
7532
7533 case AARCH64_OPND_BARRIER_PSB:
7534 val = parse_barrier_psb (&str, &(info->hint_option));
7535 if (val == PARSE_FAIL)
7536 goto failure;
7537 break;
7538
7539 case AARCH64_OPND_BTI_TARGET:
7540 val = parse_bti_operand (&str, &(info->hint_option));
7541 if (val == PARSE_FAIL)
7542 goto failure;
7543 break;
7544
7545 case AARCH64_OPND_SME_ZAda_2b:
7546 case AARCH64_OPND_SME_ZAda_3b:
7547 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7548 if (!reg)
7549 goto failure;
7550 info->reg.regno = reg->number;
7551 info->qualifier = qualifier;
7552 break;
7553
7554 case AARCH64_OPND_SME_ZA_HV_idx_src:
7555 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7556 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7557 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7558 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7559 &info->indexed_za,
7560 &qualifier)
7561 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7562 &info->indexed_za, &qualifier, 0))
7563 goto failure;
7564 info->qualifier = qualifier;
7565 break;
7566
7567 case AARCH64_OPND_SME_list_of_64bit_tiles:
7568 val = parse_sme_list_of_64bit_tiles (&str);
7569 if (val == PARSE_FAIL)
7570 goto failure;
7571 info->imm.value = val;
7572 break;
7573
7574 case AARCH64_OPND_SME_ZA_array:
7575 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7576 &info->indexed_za, &qualifier, 0))
7577 goto failure;
7578 info->qualifier = qualifier;
7579 break;
7580
7581 case AARCH64_OPND_MOPS_ADDR_Rd:
7582 case AARCH64_OPND_MOPS_ADDR_Rs:
7583 po_char_or_fail ('[');
7584 if (!parse_x0_to_x30 (&str, info))
7585 goto failure;
7586 po_char_or_fail (']');
7587 po_char_or_fail ('!');
7588 break;
7589
7590 case AARCH64_OPND_MOPS_WB_Rn:
7591 if (!parse_x0_to_x30 (&str, info))
7592 goto failure;
7593 po_char_or_fail ('!');
7594 break;
7595
7596 default:
7597 as_fatal (_("unhandled operand code %d"), operands[i]);
7598 }
7599
7600 /* If we get here, this operand was successfully parsed. */
7601 inst.base.operands[i].present = 1;
7602 continue;
7603
7604 failure:
7605 /* The parse routine should already have set the error, but in case
7606 not, set a default one here. */
7607 if (! error_p ())
7608 set_default_error ();
7609
7610 if (! backtrack_pos)
7611 goto parse_operands_return;
7612
7613 {
7614 /* We reach here because this operand is marked as optional, and
7615 either no operand was supplied or the operand was supplied but it
7616 was syntactically incorrect. In the latter case we report an
7617 error. In the former case we perform a few more checks before
7618 dropping through to the code to insert the default operand. */
7619
7620 char *tmp = backtrack_pos;
7621 char endchar = END_OF_INSN;
7622
7623 if (i != (aarch64_num_of_operands (opcode) - 1))
7624 endchar = ',';
7625 skip_past_char (&tmp, ',');
7626
7627 if (*tmp != endchar)
7628 /* The user has supplied an operand in the wrong format. */
7629 goto parse_operands_return;
7630
7631 /* Make sure there is not a comma before the optional operand.
7632 For example the fifth operand of 'sys' is optional:
7633
7634 sys #0,c0,c0,#0, <--- wrong
7635 sys #0,c0,c0,#0 <--- correct. */
7636 if (comma_skipped_p && i && endchar == END_OF_INSN)
7637 {
7638 set_fatal_syntax_error
7639 (_("unexpected comma before the omitted optional operand"));
7640 goto parse_operands_return;
7641 }
7642 }
7643
7644 /* Reaching here means we are dealing with an optional operand that is
7645 omitted from the assembly line. */
7646 gas_assert (optional_operand_p (opcode, i));
7647 info->present = 0;
7648 process_omitted_operand (operands[i], opcode, i, info);
7649
7650 /* Try again, skipping the optional operand at backtrack_pos. */
7651 str = backtrack_pos;
7652 backtrack_pos = 0;
7653
7654 /* Clear any error record after the omitted optional operand has been
7655 successfully handled. */
7656 clear_error ();
7657 }
7658
7659 /* Check if we have parsed all the operands. */
7660 if (*str != '\0' && ! error_p ())
7661 {
7662 /* Set I to the index of the last present operand; this is
7663 for the purpose of diagnostics. */
7664 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7665 ;
7666 set_fatal_syntax_error
7667 (_("unexpected characters following instruction"));
7668 }
7669
7670 parse_operands_return:
7671
7672 if (error_p ())
7673 {
7674 inst.parsing_error.index = i;
7675 DEBUG_TRACE ("parsing FAIL: %s - %s",
7676 operand_mismatch_kind_names[inst.parsing_error.kind],
7677 inst.parsing_error.error);
7678 /* Record the operand error properly; this is useful when there
7679 are multiple instruction templates for a mnemonic name, so that
7680 later on, we can select the error that most closely describes
7681 the problem. */
7682 record_operand_error_info (opcode, &inst.parsing_error);
7683 return false;
7684 }
7685 else
7686 {
7687 DEBUG_TRACE ("parsing SUCCESS");
7688 return true;
7689 }
7690 }
7691
7692 /* It does some fix-up to provide some programmer friendly feature while
7693 keeping the libopcodes happy, i.e. libopcodes only accepts
7694 the preferred architectural syntax.
7695 Return FALSE if there is any failure; otherwise return TRUE. */
7696
7697 static bool
7698 programmer_friendly_fixup (aarch64_instruction *instr)
7699 {
7700 aarch64_inst *base = &instr->base;
7701 const aarch64_opcode *opcode = base->opcode;
7702 enum aarch64_op op = opcode->op;
7703 aarch64_opnd_info *operands = base->operands;
7704
7705 DEBUG_TRACE ("enter");
7706
7707 switch (opcode->iclass)
7708 {
7709 case testbranch:
7710 /* TBNZ Xn|Wn, #uimm6, label
7711 Test and Branch Not Zero: conditionally jumps to label if bit number
7712 uimm6 in register Xn is not zero. The bit number implies the width of
7713 the register, which may be written and should be disassembled as Wn if
7714 uimm is less than 32. */
7715 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7716 {
7717 if (operands[1].imm.value >= 32)
7718 {
7719 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7720 0, 31);
7721 return false;
7722 }
7723 operands[0].qualifier = AARCH64_OPND_QLF_X;
7724 }
7725 break;
7726 case loadlit:
7727 /* LDR Wt, label | =value
7728 As a convenience assemblers will typically permit the notation
7729 "=value" in conjunction with the pc-relative literal load instructions
7730 to automatically place an immediate value or symbolic address in a
7731 nearby literal pool and generate a hidden label which references it.
7732 ISREG has been set to 0 in the case of =value. */
7733 if (instr->gen_lit_pool
7734 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7735 {
7736 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7737 if (op == OP_LDRSW_LIT)
7738 size = 4;
7739 if (instr->reloc.exp.X_op != O_constant
7740 && instr->reloc.exp.X_op != O_big
7741 && instr->reloc.exp.X_op != O_symbol)
7742 {
7743 record_operand_error (opcode, 1,
7744 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7745 _("constant expression expected"));
7746 return false;
7747 }
7748 if (! add_to_lit_pool (&instr->reloc.exp, size))
7749 {
7750 record_operand_error (opcode, 1,
7751 AARCH64_OPDE_OTHER_ERROR,
7752 _("literal pool insertion failed"));
7753 return false;
7754 }
7755 }
7756 break;
7757 case log_shift:
7758 case bitfield:
7759 /* UXT[BHW] Wd, Wn
7760 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7761 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7762 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7763 A programmer-friendly assembler should accept a destination Xd in
7764 place of Wd, however that is not the preferred form for disassembly.
7765 */
7766 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7767 && operands[1].qualifier == AARCH64_OPND_QLF_W
7768 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7769 operands[0].qualifier = AARCH64_OPND_QLF_W;
7770 break;
7771
7772 case addsub_ext:
7773 {
7774 /* In the 64-bit form, the final register operand is written as Wm
7775 for all but the (possibly omitted) UXTX/LSL and SXTX
7776 operators.
7777 As a programmer-friendly assembler, we accept e.g.
7778 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7779 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7780 int idx = aarch64_operand_index (opcode->operands,
7781 AARCH64_OPND_Rm_EXT);
7782 gas_assert (idx == 1 || idx == 2);
7783 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7784 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7785 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7786 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7787 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7788 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7789 }
7790 break;
7791
7792 default:
7793 break;
7794 }
7795
7796 DEBUG_TRACE ("exit with SUCCESS");
7797 return true;
7798 }
7799
7800 /* Check for loads and stores that will cause unpredictable behavior. */
7801
7802 static void
7803 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7804 {
7805 aarch64_inst *base = &instr->base;
7806 const aarch64_opcode *opcode = base->opcode;
7807 const aarch64_opnd_info *opnds = base->operands;
7808 switch (opcode->iclass)
7809 {
7810 case ldst_pos:
7811 case ldst_imm9:
7812 case ldst_imm10:
7813 case ldst_unscaled:
7814 case ldst_unpriv:
7815 /* Loading/storing the base register is unpredictable if writeback. */
7816 if ((aarch64_get_operand_class (opnds[0].type)
7817 == AARCH64_OPND_CLASS_INT_REG)
7818 && opnds[0].reg.regno == opnds[1].addr.base_regno
7819 && opnds[1].addr.base_regno != REG_SP
7820 /* Exempt STG/STZG/ST2G/STZ2G. */
7821 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7822 && opnds[1].addr.writeback)
7823 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7824 break;
7825
7826 case ldstpair_off:
7827 case ldstnapair_offs:
7828 case ldstpair_indexed:
7829 /* Loading/storing the base register is unpredictable if writeback. */
7830 if ((aarch64_get_operand_class (opnds[0].type)
7831 == AARCH64_OPND_CLASS_INT_REG)
7832 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7833 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7834 && opnds[2].addr.base_regno != REG_SP
7835 /* Exempt STGP. */
7836 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7837 && opnds[2].addr.writeback)
7838 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7839 /* Load operations must load different registers. */
7840 if ((opcode->opcode & (1 << 22))
7841 && opnds[0].reg.regno == opnds[1].reg.regno)
7842 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7843 break;
7844
7845 case ldstexcl:
7846 if ((aarch64_get_operand_class (opnds[0].type)
7847 == AARCH64_OPND_CLASS_INT_REG)
7848 && (aarch64_get_operand_class (opnds[1].type)
7849 == AARCH64_OPND_CLASS_INT_REG))
7850 {
7851 if ((opcode->opcode & (1 << 22)))
7852 {
7853 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7854 if ((opcode->opcode & (1 << 21))
7855 && opnds[0].reg.regno == opnds[1].reg.regno)
7856 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7857 }
7858 else
7859 {
7860 /* Store-Exclusive is unpredictable if Rt == Rs. */
7861 if (opnds[0].reg.regno == opnds[1].reg.regno)
7862 as_warn
7863 (_("unpredictable: identical transfer and status registers"
7864 " --`%s'"),str);
7865
7866 if (opnds[0].reg.regno == opnds[2].reg.regno)
7867 {
7868 if (!(opcode->opcode & (1 << 21)))
7869 /* Store-Exclusive is unpredictable if Rn == Rs. */
7870 as_warn
7871 (_("unpredictable: identical base and status registers"
7872 " --`%s'"),str);
7873 else
7874 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7875 as_warn
7876 (_("unpredictable: "
7877 "identical transfer and status registers"
7878 " --`%s'"),str);
7879 }
7880
7881 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7882 if ((opcode->opcode & (1 << 21))
7883 && opnds[0].reg.regno == opnds[3].reg.regno
7884 && opnds[3].reg.regno != REG_SP)
7885 as_warn (_("unpredictable: identical base and status registers"
7886 " --`%s'"),str);
7887 }
7888 }
7889 break;
7890
7891 default:
7892 break;
7893 }
7894 }
7895
7896 static void
7897 force_automatic_sequence_close (void)
7898 {
7899 struct aarch64_segment_info_type *tc_seg_info;
7900
7901 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7902 if (tc_seg_info->insn_sequence.instr)
7903 {
7904 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7905 _("previous `%s' sequence has not been closed"),
7906 tc_seg_info->insn_sequence.instr->opcode->name);
7907 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7908 }
7909 }
7910
7911 /* A wrapper function to interface with libopcodes on encoding and
7912 record the error message if there is any.
7913
7914 Return TRUE on success; otherwise return FALSE. */
7915
7916 static bool
7917 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7918 aarch64_insn *code)
7919 {
7920 aarch64_operand_error error_info;
7921 memset (&error_info, '\0', sizeof (error_info));
7922 error_info.kind = AARCH64_OPDE_NIL;
7923 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7924 && !error_info.non_fatal)
7925 return true;
7926
7927 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7928 record_operand_error_info (opcode, &error_info);
7929 return error_info.non_fatal;
7930 }
7931
7932 #ifdef DEBUG_AARCH64
7933 static inline void
7934 dump_opcode_operands (const aarch64_opcode *opcode)
7935 {
7936 int i = 0;
7937 while (opcode->operands[i] != AARCH64_OPND_NIL)
7938 {
7939 aarch64_verbose ("\t\t opnd%d: %s", i,
7940 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7941 ? aarch64_get_operand_name (opcode->operands[i])
7942 : aarch64_get_operand_desc (opcode->operands[i]));
7943 ++i;
7944 }
7945 }
7946 #endif /* DEBUG_AARCH64 */
7947
7948 /* This is the guts of the machine-dependent assembler. STR points to a
7949 machine dependent instruction. This function is supposed to emit
7950 the frags/bytes it assembles to. */
7951
7952 void
7953 md_assemble (char *str)
7954 {
7955 templates *template;
7956 const aarch64_opcode *opcode;
7957 struct aarch64_segment_info_type *tc_seg_info;
7958 aarch64_inst *inst_base;
7959 unsigned saved_cond;
7960
7961 /* Align the previous label if needed. */
7962 if (last_label_seen != NULL)
7963 {
7964 symbol_set_frag (last_label_seen, frag_now);
7965 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7966 S_SET_SEGMENT (last_label_seen, now_seg);
7967 }
7968
7969 /* Update the current insn_sequence from the segment. */
7970 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7971 insn_sequence = &tc_seg_info->insn_sequence;
7972 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7973
7974 inst.reloc.type = BFD_RELOC_UNUSED;
7975
7976 DEBUG_TRACE ("\n\n");
7977 DEBUG_TRACE ("==============================");
7978 DEBUG_TRACE ("Enter md_assemble with %s", str);
7979
7980 /* Scan up to the end of the mnemonic, which must end in whitespace,
7981 '.', or end of string. */
7982 char *p = str;
7983 char *dot = 0;
7984 for (; is_part_of_name (*p); p++)
7985 if (*p == '.' && !dot)
7986 dot = p;
7987
7988 if (p == str)
7989 {
7990 as_bad (_("unknown mnemonic -- `%s'"), str);
7991 return;
7992 }
7993
7994 if (!dot && create_register_alias (str, p))
7995 return;
7996
7997 template = opcode_lookup (str, dot, p);
7998 if (!template)
7999 {
8000 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8001 str);
8002 return;
8003 }
8004
8005 skip_whitespace (p);
8006 if (*p == ',')
8007 {
8008 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8009 get_mnemonic_name (str), str);
8010 return;
8011 }
8012
8013 init_operand_error_report ();
8014
8015 /* Sections are assumed to start aligned. In executable section, there is no
8016 MAP_DATA symbol pending. So we only align the address during
8017 MAP_DATA --> MAP_INSN transition.
8018 For other sections, this is not guaranteed. */
8019 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8020 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8021 frag_align_code (2, 0);
8022
8023 saved_cond = inst.cond;
8024 reset_aarch64_instruction (&inst);
8025 inst.cond = saved_cond;
8026
8027 /* Iterate through all opcode entries with the same mnemonic name. */
8028 do
8029 {
8030 opcode = template->opcode;
8031
8032 DEBUG_TRACE ("opcode %s found", opcode->name);
8033 #ifdef DEBUG_AARCH64
8034 if (debug_dump)
8035 dump_opcode_operands (opcode);
8036 #endif /* DEBUG_AARCH64 */
8037
8038 mapping_state (MAP_INSN);
8039
8040 inst_base = &inst.base;
8041 inst_base->opcode = opcode;
8042
8043 /* Truly conditionally executed instructions, e.g. b.cond. */
8044 if (opcode->flags & F_COND)
8045 {
8046 gas_assert (inst.cond != COND_ALWAYS);
8047 inst_base->cond = get_cond_from_value (inst.cond);
8048 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8049 }
8050 else if (inst.cond != COND_ALWAYS)
8051 {
8052 /* It shouldn't arrive here, where the assembly looks like a
8053 conditional instruction but the found opcode is unconditional. */
8054 gas_assert (0);
8055 continue;
8056 }
8057
8058 if (parse_operands (p, opcode)
8059 && programmer_friendly_fixup (&inst)
8060 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8061 {
8062 /* Check that this instruction is supported for this CPU. */
8063 if (!opcode->avariant
8064 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8065 {
8066 as_bad (_("selected processor does not support `%s'"), str);
8067 return;
8068 }
8069
8070 warn_unpredictable_ldst (&inst, str);
8071
8072 if (inst.reloc.type == BFD_RELOC_UNUSED
8073 || !inst.reloc.need_libopcodes_p)
8074 output_inst (NULL);
8075 else
8076 {
8077 /* If there is relocation generated for the instruction,
8078 store the instruction information for the future fix-up. */
8079 struct aarch64_inst *copy;
8080 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8081 copy = XNEW (struct aarch64_inst);
8082 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8083 output_inst (copy);
8084 }
8085
8086 /* Issue non-fatal messages if any. */
8087 output_operand_error_report (str, true);
8088 return;
8089 }
8090
8091 template = template->next;
8092 if (template != NULL)
8093 {
8094 reset_aarch64_instruction (&inst);
8095 inst.cond = saved_cond;
8096 }
8097 }
8098 while (template != NULL);
8099
8100 /* Issue the error messages if any. */
8101 output_operand_error_report (str, false);
8102 }
8103
8104 /* Various frobbings of labels and their addresses. */
8105
8106 void
8107 aarch64_start_line_hook (void)
8108 {
8109 last_label_seen = NULL;
8110 }
8111
8112 void
8113 aarch64_frob_label (symbolS * sym)
8114 {
8115 last_label_seen = sym;
8116
8117 dwarf2_emit_label (sym);
8118 }
8119
8120 void
8121 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8122 {
8123 /* Check to see if we have a block to close. */
8124 force_automatic_sequence_close ();
8125 }
8126
8127 int
8128 aarch64_data_in_code (void)
8129 {
8130 if (startswith (input_line_pointer + 1, "data:"))
8131 {
8132 *input_line_pointer = '/';
8133 input_line_pointer += 5;
8134 *input_line_pointer = 0;
8135 return 1;
8136 }
8137
8138 return 0;
8139 }
8140
8141 char *
8142 aarch64_canonicalize_symbol_name (char *name)
8143 {
8144 int len;
8145
8146 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8147 *(name + len - 5) = 0;
8148
8149 return name;
8150 }
8151 \f
8152 /* Table of all register names defined by default. The user can
8153 define additional names with .req. Note that all register names
8154 should appear in both upper and lowercase variants. Some registers
8155 also have mixed-case names. */
8156
8157 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8158 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8159 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8160 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8161 #define REGSET16(p,t) \
8162 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8163 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8164 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8165 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8166 #define REGSET16S(p,s,t) \
8167 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8168 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8169 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8170 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8171 #define REGSET31(p,t) \
8172 REGSET16(p, t), \
8173 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8174 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8175 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8176 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8177 #define REGSET(p,t) \
8178 REGSET31(p,t), REGNUM(p,31,t)
8179
8180 /* These go into aarch64_reg_hsh hash-table. */
8181 static const reg_entry reg_names[] = {
8182 /* Integer registers. */
8183 REGSET31 (x, R_64), REGSET31 (X, R_64),
8184 REGSET31 (w, R_32), REGSET31 (W, R_32),
8185
8186 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8187 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8188 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8189 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8190 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8191 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8192
8193 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8194 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8195
8196 /* Floating-point single precision registers. */
8197 REGSET (s, FP_S), REGSET (S, FP_S),
8198
8199 /* Floating-point double precision registers. */
8200 REGSET (d, FP_D), REGSET (D, FP_D),
8201
8202 /* Floating-point half precision registers. */
8203 REGSET (h, FP_H), REGSET (H, FP_H),
8204
8205 /* Floating-point byte precision registers. */
8206 REGSET (b, FP_B), REGSET (B, FP_B),
8207
8208 /* Floating-point quad precision registers. */
8209 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8210
8211 /* FP/SIMD registers. */
8212 REGSET (v, VN), REGSET (V, VN),
8213
8214 /* SVE vector registers. */
8215 REGSET (z, ZN), REGSET (Z, ZN),
8216
8217 /* SVE predicate registers. */
8218 REGSET16 (p, PN), REGSET16 (P, PN),
8219
8220 /* SME ZA. We model this as a register because it acts syntactically
8221 like ZA0H, supporting qualifier suffixes and indexing. */
8222 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8223
8224 /* SME ZA tile registers. */
8225 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8226
8227 /* SME ZA tile registers (horizontal slice). */
8228 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8229
8230 /* SME ZA tile registers (vertical slice). */
8231 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8232 };
8233
8234 #undef REGDEF
8235 #undef REGDEF_ALIAS
8236 #undef REGNUM
8237 #undef REGSET16
8238 #undef REGSET31
8239 #undef REGSET
8240
8241 #define N 1
8242 #define n 0
8243 #define Z 1
8244 #define z 0
8245 #define C 1
8246 #define c 0
8247 #define V 1
8248 #define v 0
8249 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8250 static const asm_nzcv nzcv_names[] = {
8251 {"nzcv", B (n, z, c, v)},
8252 {"nzcV", B (n, z, c, V)},
8253 {"nzCv", B (n, z, C, v)},
8254 {"nzCV", B (n, z, C, V)},
8255 {"nZcv", B (n, Z, c, v)},
8256 {"nZcV", B (n, Z, c, V)},
8257 {"nZCv", B (n, Z, C, v)},
8258 {"nZCV", B (n, Z, C, V)},
8259 {"Nzcv", B (N, z, c, v)},
8260 {"NzcV", B (N, z, c, V)},
8261 {"NzCv", B (N, z, C, v)},
8262 {"NzCV", B (N, z, C, V)},
8263 {"NZcv", B (N, Z, c, v)},
8264 {"NZcV", B (N, Z, c, V)},
8265 {"NZCv", B (N, Z, C, v)},
8266 {"NZCV", B (N, Z, C, V)}
8267 };
8268
8269 #undef N
8270 #undef n
8271 #undef Z
8272 #undef z
8273 #undef C
8274 #undef c
8275 #undef V
8276 #undef v
8277 #undef B
8278 \f
8279 /* MD interface: bits in the object file. */
8280
8281 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8282 for use in the a.out file, and stores them in the array pointed to by buf.
8283 This knows about the endian-ness of the target machine and does
8284 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8285 2 (short) and 4 (long) Floating numbers are put out as a series of
8286 LITTLENUMS (shorts, here at least). */
8287
8288 void
8289 md_number_to_chars (char *buf, valueT val, int n)
8290 {
8291 if (target_big_endian)
8292 number_to_chars_bigendian (buf, val, n);
8293 else
8294 number_to_chars_littleendian (buf, val, n);
8295 }
8296
8297 /* MD interface: Sections. */
8298
8299 /* Estimate the size of a frag before relaxing. Assume everything fits in
8300 4 bytes. */
8301
8302 int
8303 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8304 {
8305 fragp->fr_var = 4;
8306 return 4;
8307 }
8308
8309 /* Round up a section size to the appropriate boundary. */
8310
8311 valueT
8312 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8313 {
8314 return size;
8315 }
8316
8317 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8318 of an rs_align_code fragment.
8319
8320 Here we fill the frag with the appropriate info for padding the
8321 output stream. The resulting frag will consist of a fixed (fr_fix)
8322 and of a repeating (fr_var) part.
8323
8324 The fixed content is always emitted before the repeating content and
8325 these two parts are used as follows in constructing the output:
8326 - the fixed part will be used to align to a valid instruction word
8327 boundary, in case that we start at a misaligned address; as no
8328 executable instruction can live at the misaligned location, we
8329 simply fill with zeros;
8330 - the variable part will be used to cover the remaining padding and
8331 we fill using the AArch64 NOP instruction.
8332
8333 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8334 enough storage space for up to 3 bytes for padding the back to a valid
8335 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8336
8337 void
8338 aarch64_handle_align (fragS * fragP)
8339 {
8340 /* NOP = d503201f */
8341 /* AArch64 instructions are always little-endian. */
8342 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8343
8344 int bytes, fix, noop_size;
8345 char *p;
8346
8347 if (fragP->fr_type != rs_align_code)
8348 return;
8349
8350 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8351 p = fragP->fr_literal + fragP->fr_fix;
8352
8353 #ifdef OBJ_ELF
8354 gas_assert (fragP->tc_frag_data.recorded);
8355 #endif
8356
8357 noop_size = sizeof (aarch64_noop);
8358
8359 fix = bytes & (noop_size - 1);
8360 if (fix)
8361 {
8362 #if defined OBJ_ELF || defined OBJ_COFF
8363 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8364 #endif
8365 memset (p, 0, fix);
8366 p += fix;
8367 fragP->fr_fix += fix;
8368 }
8369
8370 if (noop_size)
8371 memcpy (p, aarch64_noop, noop_size);
8372 fragP->fr_var = noop_size;
8373 }
8374
8375 /* Perform target specific initialisation of a frag.
8376 Note - despite the name this initialisation is not done when the frag
8377 is created, but only when its type is assigned. A frag can be created
8378 and used a long time before its type is set, so beware of assuming that
8379 this initialisation is performed first. */
8380
8381 #ifndef OBJ_ELF
8382 void
8383 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8384 int max_chars ATTRIBUTE_UNUSED)
8385 {
8386 }
8387
8388 #else /* OBJ_ELF is defined. */
8389 void
8390 aarch64_init_frag (fragS * fragP, int max_chars)
8391 {
8392 /* Record a mapping symbol for alignment frags. We will delete this
8393 later if the alignment ends up empty. */
8394 if (!fragP->tc_frag_data.recorded)
8395 fragP->tc_frag_data.recorded = 1;
8396
8397 /* PR 21809: Do not set a mapping state for debug sections
8398 - it just confuses other tools. */
8399 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8400 return;
8401
8402 switch (fragP->fr_type)
8403 {
8404 case rs_align_test:
8405 case rs_fill:
8406 mapping_state_2 (MAP_DATA, max_chars);
8407 break;
8408 case rs_align:
8409 /* PR 20364: We can get alignment frags in code sections,
8410 so do not just assume that we should use the MAP_DATA state. */
8411 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8412 break;
8413 case rs_align_code:
8414 mapping_state_2 (MAP_INSN, max_chars);
8415 break;
8416 default:
8417 break;
8418 }
8419 }
8420
8421 /* Whether SFrame stack trace info is supported. */
8422
8423 bool
8424 aarch64_support_sframe_p (void)
8425 {
8426 /* At this time, SFrame is supported for aarch64 only. */
8427 return (aarch64_abi == AARCH64_ABI_LP64);
8428 }
8429
8430 /* Specify if RA tracking is needed. */
8431
8432 bool
8433 aarch64_sframe_ra_tracking_p (void)
8434 {
8435 return true;
8436 }
8437
8438 /* Specify the fixed offset to recover RA from CFA.
8439 (useful only when RA tracking is not needed). */
8440
8441 offsetT
8442 aarch64_sframe_cfa_ra_offset (void)
8443 {
8444 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8445 }
8446
8447 /* Get the abi/arch indentifier for SFrame. */
8448
8449 unsigned char
8450 aarch64_sframe_get_abi_arch (void)
8451 {
8452 unsigned char sframe_abi_arch = 0;
8453
8454 if (aarch64_support_sframe_p ())
8455 {
8456 sframe_abi_arch = target_big_endian
8457 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8458 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8459 }
8460
8461 return sframe_abi_arch;
8462 }
8463
8464 #endif /* OBJ_ELF */
8465 \f
8466 /* Initialize the DWARF-2 unwind information for this procedure. */
8467
8468 void
8469 tc_aarch64_frame_initial_instructions (void)
8470 {
8471 cfi_add_CFA_def_cfa (REG_SP, 0);
8472 }
8473
8474 /* Convert REGNAME to a DWARF-2 register number. */
8475
8476 int
8477 tc_aarch64_regname_to_dw2regnum (char *regname)
8478 {
8479 const reg_entry *reg = parse_reg (&regname);
8480 if (reg == NULL)
8481 return -1;
8482
8483 switch (reg->type)
8484 {
8485 case REG_TYPE_SP_32:
8486 case REG_TYPE_SP_64:
8487 case REG_TYPE_R_32:
8488 case REG_TYPE_R_64:
8489 return reg->number;
8490
8491 case REG_TYPE_FP_B:
8492 case REG_TYPE_FP_H:
8493 case REG_TYPE_FP_S:
8494 case REG_TYPE_FP_D:
8495 case REG_TYPE_FP_Q:
8496 return reg->number + 64;
8497
8498 default:
8499 break;
8500 }
8501 return -1;
8502 }
8503
8504 /* Implement DWARF2_ADDR_SIZE. */
8505
8506 int
8507 aarch64_dwarf2_addr_size (void)
8508 {
8509 if (ilp32_p)
8510 return 4;
8511 else if (llp64_p)
8512 return 8;
8513 return bfd_arch_bits_per_address (stdoutput) / 8;
8514 }
8515
8516 /* MD interface: Symbol and relocation handling. */
8517
8518 /* Return the address within the segment that a PC-relative fixup is
8519 relative to. For AArch64 PC-relative fixups applied to instructions
8520 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8521
8522 long
8523 md_pcrel_from_section (fixS * fixP, segT seg)
8524 {
8525 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8526
8527 /* If this is pc-relative and we are going to emit a relocation
8528 then we just want to put out any pipeline compensation that the linker
8529 will need. Otherwise we want to use the calculated base. */
8530 if (fixP->fx_pcrel
8531 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8532 || aarch64_force_relocation (fixP)))
8533 base = 0;
8534
8535 /* AArch64 should be consistent for all pc-relative relocations. */
8536 return base + AARCH64_PCREL_OFFSET;
8537 }
8538
8539 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8540 Otherwise we have no need to default values of symbols. */
8541
8542 symbolS *
8543 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8544 {
8545 #ifdef OBJ_ELF
8546 if (name[0] == '_' && name[1] == 'G'
8547 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8548 {
8549 if (!GOT_symbol)
8550 {
8551 if (symbol_find (name))
8552 as_bad (_("GOT already in the symbol table"));
8553
8554 GOT_symbol = symbol_new (name, undefined_section,
8555 &zero_address_frag, 0);
8556 }
8557
8558 return GOT_symbol;
8559 }
8560 #endif
8561
8562 return 0;
8563 }
8564
8565 /* Return non-zero if the indicated VALUE has overflowed the maximum
8566 range expressible by a unsigned number with the indicated number of
8567 BITS. */
8568
8569 static bool
8570 unsigned_overflow (valueT value, unsigned bits)
8571 {
8572 valueT lim;
8573 if (bits >= sizeof (valueT) * 8)
8574 return false;
8575 lim = (valueT) 1 << bits;
8576 return (value >= lim);
8577 }
8578
8579
8580 /* Return non-zero if the indicated VALUE has overflowed the maximum
8581 range expressible by an signed number with the indicated number of
8582 BITS. */
8583
8584 static bool
8585 signed_overflow (offsetT value, unsigned bits)
8586 {
8587 offsetT lim;
8588 if (bits >= sizeof (offsetT) * 8)
8589 return false;
8590 lim = (offsetT) 1 << (bits - 1);
8591 return (value < -lim || value >= lim);
8592 }
8593
8594 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8595 unsigned immediate offset load/store instruction, try to encode it as
8596 an unscaled, 9-bit, signed immediate offset load/store instruction.
8597 Return TRUE if it is successful; otherwise return FALSE.
8598
8599 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8600 in response to the standard LDR/STR mnemonics when the immediate offset is
8601 unambiguous, i.e. when it is negative or unaligned. */
8602
8603 static bool
8604 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8605 {
8606 int idx;
8607 enum aarch64_op new_op;
8608 const aarch64_opcode *new_opcode;
8609
8610 gas_assert (instr->opcode->iclass == ldst_pos);
8611
8612 switch (instr->opcode->op)
8613 {
8614 case OP_LDRB_POS:new_op = OP_LDURB; break;
8615 case OP_STRB_POS: new_op = OP_STURB; break;
8616 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8617 case OP_LDRH_POS: new_op = OP_LDURH; break;
8618 case OP_STRH_POS: new_op = OP_STURH; break;
8619 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8620 case OP_LDR_POS: new_op = OP_LDUR; break;
8621 case OP_STR_POS: new_op = OP_STUR; break;
8622 case OP_LDRF_POS: new_op = OP_LDURV; break;
8623 case OP_STRF_POS: new_op = OP_STURV; break;
8624 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8625 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8626 default: new_op = OP_NIL; break;
8627 }
8628
8629 if (new_op == OP_NIL)
8630 return false;
8631
8632 new_opcode = aarch64_get_opcode (new_op);
8633 gas_assert (new_opcode != NULL);
8634
8635 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8636 instr->opcode->op, new_opcode->op);
8637
8638 aarch64_replace_opcode (instr, new_opcode);
8639
8640 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8641 qualifier matching may fail because the out-of-date qualifier will
8642 prevent the operand being updated with a new and correct qualifier. */
8643 idx = aarch64_operand_index (instr->opcode->operands,
8644 AARCH64_OPND_ADDR_SIMM9);
8645 gas_assert (idx == 1);
8646 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8647
8648 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8649
8650 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8651 insn_sequence))
8652 return false;
8653
8654 return true;
8655 }
8656
8657 /* Called by fix_insn to fix a MOV immediate alias instruction.
8658
8659 Operand for a generic move immediate instruction, which is an alias
8660 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8661 a 32-bit/64-bit immediate value into general register. An assembler error
8662 shall result if the immediate cannot be created by a single one of these
8663 instructions. If there is a choice, then to ensure reversability an
8664 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8665
8666 static void
8667 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8668 {
8669 const aarch64_opcode *opcode;
8670
8671 /* Need to check if the destination is SP/ZR. The check has to be done
8672 before any aarch64_replace_opcode. */
8673 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8674 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8675
8676 instr->operands[1].imm.value = value;
8677 instr->operands[1].skip = 0;
8678
8679 if (try_mov_wide_p)
8680 {
8681 /* Try the MOVZ alias. */
8682 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8683 aarch64_replace_opcode (instr, opcode);
8684 if (aarch64_opcode_encode (instr->opcode, instr,
8685 &instr->value, NULL, NULL, insn_sequence))
8686 {
8687 put_aarch64_insn (buf, instr->value);
8688 return;
8689 }
8690 /* Try the MOVK alias. */
8691 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8692 aarch64_replace_opcode (instr, opcode);
8693 if (aarch64_opcode_encode (instr->opcode, instr,
8694 &instr->value, NULL, NULL, insn_sequence))
8695 {
8696 put_aarch64_insn (buf, instr->value);
8697 return;
8698 }
8699 }
8700
8701 if (try_mov_bitmask_p)
8702 {
8703 /* Try the ORR alias. */
8704 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8705 aarch64_replace_opcode (instr, opcode);
8706 if (aarch64_opcode_encode (instr->opcode, instr,
8707 &instr->value, NULL, NULL, insn_sequence))
8708 {
8709 put_aarch64_insn (buf, instr->value);
8710 return;
8711 }
8712 }
8713
8714 as_bad_where (fixP->fx_file, fixP->fx_line,
8715 _("immediate cannot be moved by a single instruction"));
8716 }
8717
8718 /* An instruction operand which is immediate related may have symbol used
8719 in the assembly, e.g.
8720
8721 mov w0, u32
8722 .set u32, 0x00ffff00
8723
8724 At the time when the assembly instruction is parsed, a referenced symbol,
8725 like 'u32' in the above example may not have been seen; a fixS is created
8726 in such a case and is handled here after symbols have been resolved.
8727 Instruction is fixed up with VALUE using the information in *FIXP plus
8728 extra information in FLAGS.
8729
8730 This function is called by md_apply_fix to fix up instructions that need
8731 a fix-up described above but does not involve any linker-time relocation. */
8732
8733 static void
8734 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8735 {
8736 int idx;
8737 uint32_t insn;
8738 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8739 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8740 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8741
8742 if (new_inst)
8743 {
8744 /* Now the instruction is about to be fixed-up, so the operand that
8745 was previously marked as 'ignored' needs to be unmarked in order
8746 to get the encoding done properly. */
8747 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8748 new_inst->operands[idx].skip = 0;
8749 }
8750
8751 gas_assert (opnd != AARCH64_OPND_NIL);
8752
8753 switch (opnd)
8754 {
8755 case AARCH64_OPND_EXCEPTION:
8756 case AARCH64_OPND_UNDEFINED:
8757 if (unsigned_overflow (value, 16))
8758 as_bad_where (fixP->fx_file, fixP->fx_line,
8759 _("immediate out of range"));
8760 insn = get_aarch64_insn (buf);
8761 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8762 put_aarch64_insn (buf, insn);
8763 break;
8764
8765 case AARCH64_OPND_AIMM:
8766 /* ADD or SUB with immediate.
8767 NOTE this assumes we come here with a add/sub shifted reg encoding
8768 3 322|2222|2 2 2 21111 111111
8769 1 098|7654|3 2 1 09876 543210 98765 43210
8770 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8771 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8772 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8773 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8774 ->
8775 3 322|2222|2 2 221111111111
8776 1 098|7654|3 2 109876543210 98765 43210
8777 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8778 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8779 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8780 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8781 Fields sf Rn Rd are already set. */
8782 insn = get_aarch64_insn (buf);
8783 if (value < 0)
8784 {
8785 /* Add <-> sub. */
8786 insn = reencode_addsub_switch_add_sub (insn);
8787 value = -value;
8788 }
8789
8790 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8791 && unsigned_overflow (value, 12))
8792 {
8793 /* Try to shift the value by 12 to make it fit. */
8794 if (((value >> 12) << 12) == value
8795 && ! unsigned_overflow (value, 12 + 12))
8796 {
8797 value >>= 12;
8798 insn |= encode_addsub_imm_shift_amount (1);
8799 }
8800 }
8801
8802 if (unsigned_overflow (value, 12))
8803 as_bad_where (fixP->fx_file, fixP->fx_line,
8804 _("immediate out of range"));
8805
8806 insn |= encode_addsub_imm (value);
8807
8808 put_aarch64_insn (buf, insn);
8809 break;
8810
8811 case AARCH64_OPND_SIMD_IMM:
8812 case AARCH64_OPND_SIMD_IMM_SFT:
8813 case AARCH64_OPND_LIMM:
8814 /* Bit mask immediate. */
8815 gas_assert (new_inst != NULL);
8816 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8817 new_inst->operands[idx].imm.value = value;
8818 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8819 &new_inst->value, NULL, NULL, insn_sequence))
8820 put_aarch64_insn (buf, new_inst->value);
8821 else
8822 as_bad_where (fixP->fx_file, fixP->fx_line,
8823 _("invalid immediate"));
8824 break;
8825
8826 case AARCH64_OPND_HALF:
8827 /* 16-bit unsigned immediate. */
8828 if (unsigned_overflow (value, 16))
8829 as_bad_where (fixP->fx_file, fixP->fx_line,
8830 _("immediate out of range"));
8831 insn = get_aarch64_insn (buf);
8832 insn |= encode_movw_imm (value & 0xffff);
8833 put_aarch64_insn (buf, insn);
8834 break;
8835
8836 case AARCH64_OPND_IMM_MOV:
8837 /* Operand for a generic move immediate instruction, which is
8838 an alias instruction that generates a single MOVZ, MOVN or ORR
8839 instruction to loads a 32-bit/64-bit immediate value into general
8840 register. An assembler error shall result if the immediate cannot be
8841 created by a single one of these instructions. If there is a choice,
8842 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8843 and MOVZ or MOVN to ORR. */
8844 gas_assert (new_inst != NULL);
8845 fix_mov_imm_insn (fixP, buf, new_inst, value);
8846 break;
8847
8848 case AARCH64_OPND_ADDR_SIMM7:
8849 case AARCH64_OPND_ADDR_SIMM9:
8850 case AARCH64_OPND_ADDR_SIMM9_2:
8851 case AARCH64_OPND_ADDR_SIMM10:
8852 case AARCH64_OPND_ADDR_UIMM12:
8853 case AARCH64_OPND_ADDR_SIMM11:
8854 case AARCH64_OPND_ADDR_SIMM13:
8855 /* Immediate offset in an address. */
8856 insn = get_aarch64_insn (buf);
8857
8858 gas_assert (new_inst != NULL && new_inst->value == insn);
8859 gas_assert (new_inst->opcode->operands[1] == opnd
8860 || new_inst->opcode->operands[2] == opnd);
8861
8862 /* Get the index of the address operand. */
8863 if (new_inst->opcode->operands[1] == opnd)
8864 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8865 idx = 1;
8866 else
8867 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8868 idx = 2;
8869
8870 /* Update the resolved offset value. */
8871 new_inst->operands[idx].addr.offset.imm = value;
8872
8873 /* Encode/fix-up. */
8874 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8875 &new_inst->value, NULL, NULL, insn_sequence))
8876 {
8877 put_aarch64_insn (buf, new_inst->value);
8878 break;
8879 }
8880 else if (new_inst->opcode->iclass == ldst_pos
8881 && try_to_encode_as_unscaled_ldst (new_inst))
8882 {
8883 put_aarch64_insn (buf, new_inst->value);
8884 break;
8885 }
8886
8887 as_bad_where (fixP->fx_file, fixP->fx_line,
8888 _("immediate offset out of range"));
8889 break;
8890
8891 default:
8892 gas_assert (0);
8893 as_fatal (_("unhandled operand code %d"), opnd);
8894 }
8895 }
8896
8897 /* Apply a fixup (fixP) to segment data, once it has been determined
8898 by our caller that we have all the info we need to fix it up.
8899
8900 Parameter valP is the pointer to the value of the bits. */
8901
8902 void
8903 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8904 {
8905 offsetT value = *valP;
8906 uint32_t insn;
8907 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8908 int scale;
8909 unsigned flags = fixP->fx_addnumber;
8910
8911 DEBUG_TRACE ("\n\n");
8912 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8913 DEBUG_TRACE ("Enter md_apply_fix");
8914
8915 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8916
8917 /* Note whether this will delete the relocation. */
8918
8919 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8920 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8921 fixP->fx_done = 1;
8922
8923 /* Process the relocations. */
8924 switch (fixP->fx_r_type)
8925 {
8926 case BFD_RELOC_NONE:
8927 /* This will need to go in the object file. */
8928 fixP->fx_done = 0;
8929 break;
8930
8931 case BFD_RELOC_8:
8932 case BFD_RELOC_8_PCREL:
8933 if (fixP->fx_done || !seg->use_rela_p)
8934 md_number_to_chars (buf, value, 1);
8935 break;
8936
8937 case BFD_RELOC_16:
8938 case BFD_RELOC_16_PCREL:
8939 if (fixP->fx_done || !seg->use_rela_p)
8940 md_number_to_chars (buf, value, 2);
8941 break;
8942
8943 case BFD_RELOC_32:
8944 case BFD_RELOC_32_PCREL:
8945 if (fixP->fx_done || !seg->use_rela_p)
8946 md_number_to_chars (buf, value, 4);
8947 break;
8948
8949 case BFD_RELOC_64:
8950 case BFD_RELOC_64_PCREL:
8951 if (fixP->fx_done || !seg->use_rela_p)
8952 md_number_to_chars (buf, value, 8);
8953 break;
8954
8955 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8956 /* We claim that these fixups have been processed here, even if
8957 in fact we generate an error because we do not have a reloc
8958 for them, so tc_gen_reloc() will reject them. */
8959 fixP->fx_done = 1;
8960 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8961 {
8962 as_bad_where (fixP->fx_file, fixP->fx_line,
8963 _("undefined symbol %s used as an immediate value"),
8964 S_GET_NAME (fixP->fx_addsy));
8965 goto apply_fix_return;
8966 }
8967 fix_insn (fixP, flags, value);
8968 break;
8969
8970 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8971 if (fixP->fx_done || !seg->use_rela_p)
8972 {
8973 if (value & 3)
8974 as_bad_where (fixP->fx_file, fixP->fx_line,
8975 _("pc-relative load offset not word aligned"));
8976 if (signed_overflow (value, 21))
8977 as_bad_where (fixP->fx_file, fixP->fx_line,
8978 _("pc-relative load offset out of range"));
8979 insn = get_aarch64_insn (buf);
8980 insn |= encode_ld_lit_ofs_19 (value >> 2);
8981 put_aarch64_insn (buf, insn);
8982 }
8983 break;
8984
8985 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8986 if (fixP->fx_done || !seg->use_rela_p)
8987 {
8988 if (signed_overflow (value, 21))
8989 as_bad_where (fixP->fx_file, fixP->fx_line,
8990 _("pc-relative address offset out of range"));
8991 insn = get_aarch64_insn (buf);
8992 insn |= encode_adr_imm (value);
8993 put_aarch64_insn (buf, insn);
8994 }
8995 break;
8996
8997 case BFD_RELOC_AARCH64_BRANCH19:
8998 if (fixP->fx_done || !seg->use_rela_p)
8999 {
9000 if (value & 3)
9001 as_bad_where (fixP->fx_file, fixP->fx_line,
9002 _("conditional branch target not word aligned"));
9003 if (signed_overflow (value, 21))
9004 as_bad_where (fixP->fx_file, fixP->fx_line,
9005 _("conditional branch out of range"));
9006 insn = get_aarch64_insn (buf);
9007 insn |= encode_cond_branch_ofs_19 (value >> 2);
9008 put_aarch64_insn (buf, insn);
9009 }
9010 break;
9011
9012 case BFD_RELOC_AARCH64_TSTBR14:
9013 if (fixP->fx_done || !seg->use_rela_p)
9014 {
9015 if (value & 3)
9016 as_bad_where (fixP->fx_file, fixP->fx_line,
9017 _("conditional branch target not word aligned"));
9018 if (signed_overflow (value, 16))
9019 as_bad_where (fixP->fx_file, fixP->fx_line,
9020 _("conditional branch out of range"));
9021 insn = get_aarch64_insn (buf);
9022 insn |= encode_tst_branch_ofs_14 (value >> 2);
9023 put_aarch64_insn (buf, insn);
9024 }
9025 break;
9026
9027 case BFD_RELOC_AARCH64_CALL26:
9028 case BFD_RELOC_AARCH64_JUMP26:
9029 if (fixP->fx_done || !seg->use_rela_p)
9030 {
9031 if (value & 3)
9032 as_bad_where (fixP->fx_file, fixP->fx_line,
9033 _("branch target not word aligned"));
9034 if (signed_overflow (value, 28))
9035 as_bad_where (fixP->fx_file, fixP->fx_line,
9036 _("branch out of range"));
9037 insn = get_aarch64_insn (buf);
9038 insn |= encode_branch_ofs_26 (value >> 2);
9039 put_aarch64_insn (buf, insn);
9040 }
9041 break;
9042
9043 case BFD_RELOC_AARCH64_MOVW_G0:
9044 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9045 case BFD_RELOC_AARCH64_MOVW_G0_S:
9046 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9047 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9048 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9049 scale = 0;
9050 goto movw_common;
9051 case BFD_RELOC_AARCH64_MOVW_G1:
9052 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9053 case BFD_RELOC_AARCH64_MOVW_G1_S:
9054 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9055 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9056 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9057 scale = 16;
9058 goto movw_common;
9059 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9060 scale = 0;
9061 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9062 /* Should always be exported to object file, see
9063 aarch64_force_relocation(). */
9064 gas_assert (!fixP->fx_done);
9065 gas_assert (seg->use_rela_p);
9066 goto movw_common;
9067 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9068 scale = 16;
9069 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9070 /* Should always be exported to object file, see
9071 aarch64_force_relocation(). */
9072 gas_assert (!fixP->fx_done);
9073 gas_assert (seg->use_rela_p);
9074 goto movw_common;
9075 case BFD_RELOC_AARCH64_MOVW_G2:
9076 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9077 case BFD_RELOC_AARCH64_MOVW_G2_S:
9078 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9079 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9080 scale = 32;
9081 goto movw_common;
9082 case BFD_RELOC_AARCH64_MOVW_G3:
9083 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9084 scale = 48;
9085 movw_common:
9086 if (fixP->fx_done || !seg->use_rela_p)
9087 {
9088 insn = get_aarch64_insn (buf);
9089
9090 if (!fixP->fx_done)
9091 {
9092 /* REL signed addend must fit in 16 bits */
9093 if (signed_overflow (value, 16))
9094 as_bad_where (fixP->fx_file, fixP->fx_line,
9095 _("offset out of range"));
9096 }
9097 else
9098 {
9099 /* Check for overflow and scale. */
9100 switch (fixP->fx_r_type)
9101 {
9102 case BFD_RELOC_AARCH64_MOVW_G0:
9103 case BFD_RELOC_AARCH64_MOVW_G1:
9104 case BFD_RELOC_AARCH64_MOVW_G2:
9105 case BFD_RELOC_AARCH64_MOVW_G3:
9106 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9107 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9108 if (unsigned_overflow (value, scale + 16))
9109 as_bad_where (fixP->fx_file, fixP->fx_line,
9110 _("unsigned value out of range"));
9111 break;
9112 case BFD_RELOC_AARCH64_MOVW_G0_S:
9113 case BFD_RELOC_AARCH64_MOVW_G1_S:
9114 case BFD_RELOC_AARCH64_MOVW_G2_S:
9115 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9116 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9117 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9118 /* NOTE: We can only come here with movz or movn. */
9119 if (signed_overflow (value, scale + 16))
9120 as_bad_where (fixP->fx_file, fixP->fx_line,
9121 _("signed value out of range"));
9122 if (value < 0)
9123 {
9124 /* Force use of MOVN. */
9125 value = ~value;
9126 insn = reencode_movzn_to_movn (insn);
9127 }
9128 else
9129 {
9130 /* Force use of MOVZ. */
9131 insn = reencode_movzn_to_movz (insn);
9132 }
9133 break;
9134 default:
9135 /* Unchecked relocations. */
9136 break;
9137 }
9138 value >>= scale;
9139 }
9140
9141 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9142 insn |= encode_movw_imm (value & 0xffff);
9143
9144 put_aarch64_insn (buf, insn);
9145 }
9146 break;
9147
9148 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9149 fixP->fx_r_type = (ilp32_p
9150 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9151 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9152 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9153 /* Should always be exported to object file, see
9154 aarch64_force_relocation(). */
9155 gas_assert (!fixP->fx_done);
9156 gas_assert (seg->use_rela_p);
9157 break;
9158
9159 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9160 fixP->fx_r_type = (ilp32_p
9161 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9162 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9163 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9164 /* Should always be exported to object file, see
9165 aarch64_force_relocation(). */
9166 gas_assert (!fixP->fx_done);
9167 gas_assert (seg->use_rela_p);
9168 break;
9169
9170 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9171 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9172 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9173 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9174 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9175 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9176 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9177 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9178 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9179 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9180 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9181 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9182 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9183 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9184 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9185 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9186 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9187 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9188 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9189 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9190 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9191 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9192 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9193 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9194 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9195 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9196 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9197 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9198 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9199 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9200 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9201 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9202 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9203 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9204 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9205 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9206 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9207 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9208 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9209 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9210 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9211 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9212 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9213 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9214 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9215 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9216 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9217 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9218 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9219 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9220 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9221 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9222 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9223 /* Should always be exported to object file, see
9224 aarch64_force_relocation(). */
9225 gas_assert (!fixP->fx_done);
9226 gas_assert (seg->use_rela_p);
9227 break;
9228
9229 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9230 /* Should always be exported to object file, see
9231 aarch64_force_relocation(). */
9232 fixP->fx_r_type = (ilp32_p
9233 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9234 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9235 gas_assert (!fixP->fx_done);
9236 gas_assert (seg->use_rela_p);
9237 break;
9238
9239 case BFD_RELOC_AARCH64_ADD_LO12:
9240 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9241 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9242 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9243 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9244 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9245 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9246 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9247 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9248 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9249 case BFD_RELOC_AARCH64_LDST128_LO12:
9250 case BFD_RELOC_AARCH64_LDST16_LO12:
9251 case BFD_RELOC_AARCH64_LDST32_LO12:
9252 case BFD_RELOC_AARCH64_LDST64_LO12:
9253 case BFD_RELOC_AARCH64_LDST8_LO12:
9254 /* Should always be exported to object file, see
9255 aarch64_force_relocation(). */
9256 gas_assert (!fixP->fx_done);
9257 gas_assert (seg->use_rela_p);
9258 break;
9259
9260 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9261 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9262 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9263 break;
9264
9265 case BFD_RELOC_UNUSED:
9266 /* An error will already have been reported. */
9267 break;
9268
9269 case BFD_RELOC_RVA:
9270 case BFD_RELOC_32_SECREL:
9271 case BFD_RELOC_16_SECIDX:
9272 break;
9273
9274 default:
9275 as_bad_where (fixP->fx_file, fixP->fx_line,
9276 _("unexpected %s fixup"),
9277 bfd_get_reloc_code_name (fixP->fx_r_type));
9278 break;
9279 }
9280
9281 apply_fix_return:
9282 /* Free the allocated the struct aarch64_inst.
9283 N.B. currently there are very limited number of fix-up types actually use
9284 this field, so the impact on the performance should be minimal . */
9285 free (fixP->tc_fix_data.inst);
9286
9287 return;
9288 }
9289
9290 /* Translate internal representation of relocation info to BFD target
9291 format. */
9292
9293 arelent *
9294 tc_gen_reloc (asection * section, fixS * fixp)
9295 {
9296 arelent *reloc;
9297 bfd_reloc_code_real_type code;
9298
9299 reloc = XNEW (arelent);
9300
9301 reloc->sym_ptr_ptr = XNEW (asymbol *);
9302 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9303 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9304
9305 if (fixp->fx_pcrel)
9306 {
9307 if (section->use_rela_p)
9308 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9309 else
9310 fixp->fx_offset = reloc->address;
9311 }
9312 reloc->addend = fixp->fx_offset;
9313
9314 code = fixp->fx_r_type;
9315 switch (code)
9316 {
9317 case BFD_RELOC_16:
9318 if (fixp->fx_pcrel)
9319 code = BFD_RELOC_16_PCREL;
9320 break;
9321
9322 case BFD_RELOC_32:
9323 if (fixp->fx_pcrel)
9324 code = BFD_RELOC_32_PCREL;
9325 break;
9326
9327 case BFD_RELOC_64:
9328 if (fixp->fx_pcrel)
9329 code = BFD_RELOC_64_PCREL;
9330 break;
9331
9332 default:
9333 break;
9334 }
9335
9336 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9337 if (reloc->howto == NULL)
9338 {
9339 as_bad_where (fixp->fx_file, fixp->fx_line,
9340 _
9341 ("cannot represent %s relocation in this object file format"),
9342 bfd_get_reloc_code_name (code));
9343 return NULL;
9344 }
9345
9346 return reloc;
9347 }
9348
9349 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9350
9351 void
9352 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9353 {
9354 bfd_reloc_code_real_type type;
9355 int pcrel = 0;
9356
9357 #ifdef TE_PE
9358 if (exp->X_op == O_secrel)
9359 {
9360 exp->X_op = O_symbol;
9361 type = BFD_RELOC_32_SECREL;
9362 }
9363 else if (exp->X_op == O_secidx)
9364 {
9365 exp->X_op = O_symbol;
9366 type = BFD_RELOC_16_SECIDX;
9367 }
9368 else
9369 {
9370 #endif
9371 /* Pick a reloc.
9372 FIXME: @@ Should look at CPU word size. */
9373 switch (size)
9374 {
9375 case 1:
9376 type = BFD_RELOC_8;
9377 break;
9378 case 2:
9379 type = BFD_RELOC_16;
9380 break;
9381 case 4:
9382 type = BFD_RELOC_32;
9383 break;
9384 case 8:
9385 type = BFD_RELOC_64;
9386 break;
9387 default:
9388 as_bad (_("cannot do %u-byte relocation"), size);
9389 type = BFD_RELOC_UNUSED;
9390 break;
9391 }
9392 #ifdef TE_PE
9393 }
9394 #endif
9395
9396 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9397 }
9398
9399 /* Implement md_after_parse_args. This is the earliest time we need to decide
9400 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9401
9402 void
9403 aarch64_after_parse_args (void)
9404 {
9405 if (aarch64_abi != AARCH64_ABI_NONE)
9406 return;
9407
9408 #ifdef OBJ_ELF
9409 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9410 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9411 aarch64_abi = AARCH64_ABI_ILP32;
9412 else
9413 aarch64_abi = AARCH64_ABI_LP64;
9414 #else
9415 aarch64_abi = AARCH64_ABI_LLP64;
9416 #endif
9417 }
9418
9419 #ifdef OBJ_ELF
9420 const char *
9421 elf64_aarch64_target_format (void)
9422 {
9423 #ifdef TE_CLOUDABI
9424 /* FIXME: What to do for ilp32_p ? */
9425 if (target_big_endian)
9426 return "elf64-bigaarch64-cloudabi";
9427 else
9428 return "elf64-littleaarch64-cloudabi";
9429 #else
9430 if (target_big_endian)
9431 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9432 else
9433 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9434 #endif
9435 }
9436
9437 void
9438 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9439 {
9440 elf_frob_symbol (symp, puntp);
9441 }
9442 #elif defined OBJ_COFF
9443 const char *
9444 coff_aarch64_target_format (void)
9445 {
9446 return "pe-aarch64-little";
9447 }
9448 #endif
9449
9450 /* MD interface: Finalization. */
9451
9452 /* A good place to do this, although this was probably not intended
9453 for this kind of use. We need to dump the literal pool before
9454 references are made to a null symbol pointer. */
9455
9456 void
9457 aarch64_cleanup (void)
9458 {
9459 literal_pool *pool;
9460
9461 for (pool = list_of_pools; pool; pool = pool->next)
9462 {
9463 /* Put it at the end of the relevant section. */
9464 subseg_set (pool->section, pool->sub_section);
9465 s_ltorg (0);
9466 }
9467 }
9468
9469 #ifdef OBJ_ELF
9470 /* Remove any excess mapping symbols generated for alignment frags in
9471 SEC. We may have created a mapping symbol before a zero byte
9472 alignment; remove it if there's a mapping symbol after the
9473 alignment. */
9474 static void
9475 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9476 void *dummy ATTRIBUTE_UNUSED)
9477 {
9478 segment_info_type *seginfo = seg_info (sec);
9479 fragS *fragp;
9480
9481 if (seginfo == NULL || seginfo->frchainP == NULL)
9482 return;
9483
9484 for (fragp = seginfo->frchainP->frch_root;
9485 fragp != NULL; fragp = fragp->fr_next)
9486 {
9487 symbolS *sym = fragp->tc_frag_data.last_map;
9488 fragS *next = fragp->fr_next;
9489
9490 /* Variable-sized frags have been converted to fixed size by
9491 this point. But if this was variable-sized to start with,
9492 there will be a fixed-size frag after it. So don't handle
9493 next == NULL. */
9494 if (sym == NULL || next == NULL)
9495 continue;
9496
9497 if (S_GET_VALUE (sym) < next->fr_address)
9498 /* Not at the end of this frag. */
9499 continue;
9500 know (S_GET_VALUE (sym) == next->fr_address);
9501
9502 do
9503 {
9504 if (next->tc_frag_data.first_map != NULL)
9505 {
9506 /* Next frag starts with a mapping symbol. Discard this
9507 one. */
9508 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9509 break;
9510 }
9511
9512 if (next->fr_next == NULL)
9513 {
9514 /* This mapping symbol is at the end of the section. Discard
9515 it. */
9516 know (next->fr_fix == 0 && next->fr_var == 0);
9517 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9518 break;
9519 }
9520
9521 /* As long as we have empty frags without any mapping symbols,
9522 keep looking. */
9523 /* If the next frag is non-empty and does not start with a
9524 mapping symbol, then this mapping symbol is required. */
9525 if (next->fr_address != next->fr_next->fr_address)
9526 break;
9527
9528 next = next->fr_next;
9529 }
9530 while (next != NULL);
9531 }
9532 }
9533 #endif
9534
9535 /* Adjust the symbol table. */
9536
9537 void
9538 aarch64_adjust_symtab (void)
9539 {
9540 #ifdef OBJ_ELF
9541 /* Remove any overlapping mapping symbols generated by alignment frags. */
9542 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9543 /* Now do generic ELF adjustments. */
9544 elf_adjust_symtab ();
9545 #endif
9546 }
9547
9548 static void
9549 checked_hash_insert (htab_t table, const char *key, void *value)
9550 {
9551 str_hash_insert (table, key, value, 0);
9552 }
9553
9554 static void
9555 sysreg_hash_insert (htab_t table, const char *key, void *value)
9556 {
9557 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9558 checked_hash_insert (table, key, value);
9559 }
9560
9561 static void
9562 fill_instruction_hash_table (void)
9563 {
9564 const aarch64_opcode *opcode = aarch64_opcode_table;
9565
9566 while (opcode->name != NULL)
9567 {
9568 templates *templ, *new_templ;
9569 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9570
9571 new_templ = XNEW (templates);
9572 new_templ->opcode = opcode;
9573 new_templ->next = NULL;
9574
9575 if (!templ)
9576 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9577 else
9578 {
9579 new_templ->next = templ->next;
9580 templ->next = new_templ;
9581 }
9582 ++opcode;
9583 }
9584 }
9585
9586 static inline void
9587 convert_to_upper (char *dst, const char *src, size_t num)
9588 {
9589 unsigned int i;
9590 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9591 *dst = TOUPPER (*src);
9592 *dst = '\0';
9593 }
9594
9595 /* Assume STR point to a lower-case string, allocate, convert and return
9596 the corresponding upper-case string. */
9597 static inline const char*
9598 get_upper_str (const char *str)
9599 {
9600 char *ret;
9601 size_t len = strlen (str);
9602 ret = XNEWVEC (char, len + 1);
9603 convert_to_upper (ret, str, len);
9604 return ret;
9605 }
9606
9607 /* MD interface: Initialization. */
9608
9609 void
9610 md_begin (void)
9611 {
9612 unsigned mach;
9613 unsigned int i;
9614
9615 aarch64_ops_hsh = str_htab_create ();
9616 aarch64_cond_hsh = str_htab_create ();
9617 aarch64_shift_hsh = str_htab_create ();
9618 aarch64_sys_regs_hsh = str_htab_create ();
9619 aarch64_pstatefield_hsh = str_htab_create ();
9620 aarch64_sys_regs_ic_hsh = str_htab_create ();
9621 aarch64_sys_regs_dc_hsh = str_htab_create ();
9622 aarch64_sys_regs_at_hsh = str_htab_create ();
9623 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9624 aarch64_sys_regs_sr_hsh = str_htab_create ();
9625 aarch64_reg_hsh = str_htab_create ();
9626 aarch64_barrier_opt_hsh = str_htab_create ();
9627 aarch64_nzcv_hsh = str_htab_create ();
9628 aarch64_pldop_hsh = str_htab_create ();
9629 aarch64_hint_opt_hsh = str_htab_create ();
9630
9631 fill_instruction_hash_table ();
9632
9633 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9634 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9635 (void *) (aarch64_sys_regs + i));
9636
9637 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9638 sysreg_hash_insert (aarch64_pstatefield_hsh,
9639 aarch64_pstatefields[i].name,
9640 (void *) (aarch64_pstatefields + i));
9641
9642 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9643 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9644 aarch64_sys_regs_ic[i].name,
9645 (void *) (aarch64_sys_regs_ic + i));
9646
9647 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9648 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9649 aarch64_sys_regs_dc[i].name,
9650 (void *) (aarch64_sys_regs_dc + i));
9651
9652 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9653 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9654 aarch64_sys_regs_at[i].name,
9655 (void *) (aarch64_sys_regs_at + i));
9656
9657 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9658 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9659 aarch64_sys_regs_tlbi[i].name,
9660 (void *) (aarch64_sys_regs_tlbi + i));
9661
9662 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9663 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9664 aarch64_sys_regs_sr[i].name,
9665 (void *) (aarch64_sys_regs_sr + i));
9666
9667 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9668 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9669 (void *) (reg_names + i));
9670
9671 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9672 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9673 (void *) (nzcv_names + i));
9674
9675 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9676 {
9677 const char *name = aarch64_operand_modifiers[i].name;
9678 checked_hash_insert (aarch64_shift_hsh, name,
9679 (void *) (aarch64_operand_modifiers + i));
9680 /* Also hash the name in the upper case. */
9681 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9682 (void *) (aarch64_operand_modifiers + i));
9683 }
9684
9685 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9686 {
9687 unsigned int j;
9688 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9689 the same condition code. */
9690 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9691 {
9692 const char *name = aarch64_conds[i].names[j];
9693 if (name == NULL)
9694 break;
9695 checked_hash_insert (aarch64_cond_hsh, name,
9696 (void *) (aarch64_conds + i));
9697 /* Also hash the name in the upper case. */
9698 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9699 (void *) (aarch64_conds + i));
9700 }
9701 }
9702
9703 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9704 {
9705 const char *name = aarch64_barrier_options[i].name;
9706 /* Skip xx00 - the unallocated values of option. */
9707 if ((i & 0x3) == 0)
9708 continue;
9709 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9710 (void *) (aarch64_barrier_options + i));
9711 /* Also hash the name in the upper case. */
9712 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9713 (void *) (aarch64_barrier_options + i));
9714 }
9715
9716 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9717 {
9718 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9719 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9720 (void *) (aarch64_barrier_dsb_nxs_options + i));
9721 /* Also hash the name in the upper case. */
9722 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9723 (void *) (aarch64_barrier_dsb_nxs_options + i));
9724 }
9725
9726 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9727 {
9728 const char* name = aarch64_prfops[i].name;
9729 /* Skip the unallocated hint encodings. */
9730 if (name == NULL)
9731 continue;
9732 checked_hash_insert (aarch64_pldop_hsh, name,
9733 (void *) (aarch64_prfops + i));
9734 /* Also hash the name in the upper case. */
9735 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9736 (void *) (aarch64_prfops + i));
9737 }
9738
9739 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9740 {
9741 const char* name = aarch64_hint_options[i].name;
9742 const char* upper_name = get_upper_str(name);
9743
9744 checked_hash_insert (aarch64_hint_opt_hsh, name,
9745 (void *) (aarch64_hint_options + i));
9746
9747 /* Also hash the name in the upper case if not the same. */
9748 if (strcmp (name, upper_name) != 0)
9749 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9750 (void *) (aarch64_hint_options + i));
9751 }
9752
9753 /* Set the cpu variant based on the command-line options. */
9754 if (!mcpu_cpu_opt)
9755 mcpu_cpu_opt = march_cpu_opt;
9756
9757 if (!mcpu_cpu_opt)
9758 mcpu_cpu_opt = &cpu_default;
9759
9760 cpu_variant = *mcpu_cpu_opt;
9761
9762 /* Record the CPU type. */
9763 if(ilp32_p)
9764 mach = bfd_mach_aarch64_ilp32;
9765 else if (llp64_p)
9766 mach = bfd_mach_aarch64_llp64;
9767 else
9768 mach = bfd_mach_aarch64;
9769
9770 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9771 #ifdef OBJ_ELF
9772 /* FIXME - is there a better way to do it ? */
9773 aarch64_sframe_cfa_sp_reg = 31;
9774 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9775 aarch64_sframe_cfa_ra_reg = 30;
9776 #endif
9777 }
9778
9779 /* Command line processing. */
9780
9781 const char *md_shortopts = "m:";
9782
9783 #ifdef AARCH64_BI_ENDIAN
9784 #define OPTION_EB (OPTION_MD_BASE + 0)
9785 #define OPTION_EL (OPTION_MD_BASE + 1)
9786 #else
9787 #if TARGET_BYTES_BIG_ENDIAN
9788 #define OPTION_EB (OPTION_MD_BASE + 0)
9789 #else
9790 #define OPTION_EL (OPTION_MD_BASE + 1)
9791 #endif
9792 #endif
9793
9794 struct option md_longopts[] = {
9795 #ifdef OPTION_EB
9796 {"EB", no_argument, NULL, OPTION_EB},
9797 #endif
9798 #ifdef OPTION_EL
9799 {"EL", no_argument, NULL, OPTION_EL},
9800 #endif
9801 {NULL, no_argument, NULL, 0}
9802 };
9803
9804 size_t md_longopts_size = sizeof (md_longopts);
9805
9806 struct aarch64_option_table
9807 {
9808 const char *option; /* Option name to match. */
9809 const char *help; /* Help information. */
9810 int *var; /* Variable to change. */
9811 int value; /* What to change it to. */
9812 char *deprecated; /* If non-null, print this message. */
9813 };
9814
9815 static struct aarch64_option_table aarch64_opts[] = {
9816 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9817 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9818 NULL},
9819 #ifdef DEBUG_AARCH64
9820 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9821 #endif /* DEBUG_AARCH64 */
9822 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9823 NULL},
9824 {"mno-verbose-error", N_("do not output verbose error messages"),
9825 &verbose_error_p, 0, NULL},
9826 {NULL, NULL, NULL, 0, NULL}
9827 };
9828
9829 struct aarch64_cpu_option_table
9830 {
9831 const char *name;
9832 const aarch64_feature_set value;
9833 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9834 case. */
9835 const char *canonical_name;
9836 };
9837
9838 /* This list should, at a minimum, contain all the cpu names
9839 recognized by GCC. */
9840 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9841 {"all", AARCH64_ANY, NULL},
9842 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9843 AARCH64_FEATURE_CRC), "Cortex-A34"},
9844 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9845 AARCH64_FEATURE_CRC), "Cortex-A35"},
9846 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9847 AARCH64_FEATURE_CRC), "Cortex-A53"},
9848 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9849 AARCH64_FEATURE_CRC), "Cortex-A57"},
9850 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9851 AARCH64_FEATURE_CRC), "Cortex-A72"},
9852 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9853 AARCH64_FEATURE_CRC), "Cortex-A73"},
9854 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9855 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9856 "Cortex-A55"},
9857 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9858 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9859 "Cortex-A75"},
9860 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9861 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9862 "Cortex-A76"},
9863 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9864 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9865 | AARCH64_FEATURE_DOTPROD
9866 | AARCH64_FEATURE_SSBS),
9867 "Cortex-A76AE"},
9868 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9869 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9870 | AARCH64_FEATURE_DOTPROD
9871 | AARCH64_FEATURE_SSBS),
9872 "Cortex-A77"},
9873 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9874 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9875 | AARCH64_FEATURE_DOTPROD
9876 | AARCH64_FEATURE_SSBS),
9877 "Cortex-A65"},
9878 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9879 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9880 | AARCH64_FEATURE_DOTPROD
9881 | AARCH64_FEATURE_SSBS),
9882 "Cortex-A65AE"},
9883 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9884 AARCH64_FEATURE_F16
9885 | AARCH64_FEATURE_RCPC
9886 | AARCH64_FEATURE_DOTPROD
9887 | AARCH64_FEATURE_SSBS
9888 | AARCH64_FEATURE_PROFILE),
9889 "Cortex-A78"},
9890 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9891 AARCH64_FEATURE_F16
9892 | AARCH64_FEATURE_RCPC
9893 | AARCH64_FEATURE_DOTPROD
9894 | AARCH64_FEATURE_SSBS
9895 | AARCH64_FEATURE_PROFILE),
9896 "Cortex-A78AE"},
9897 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9898 AARCH64_FEATURE_DOTPROD
9899 | AARCH64_FEATURE_F16
9900 | AARCH64_FEATURE_FLAGM
9901 | AARCH64_FEATURE_PAC
9902 | AARCH64_FEATURE_PROFILE
9903 | AARCH64_FEATURE_RCPC
9904 | AARCH64_FEATURE_SSBS),
9905 "Cortex-A78C"},
9906 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9907 AARCH64_FEATURE_BFLOAT16
9908 | AARCH64_FEATURE_I8MM
9909 | AARCH64_FEATURE_MEMTAG
9910 | AARCH64_FEATURE_SVE2_BITPERM),
9911 "Cortex-A510"},
9912 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9913 AARCH64_FEATURE_BFLOAT16
9914 | AARCH64_FEATURE_I8MM
9915 | AARCH64_FEATURE_MEMTAG
9916 | AARCH64_FEATURE_SVE2_BITPERM),
9917 "Cortex-A710"},
9918 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9919 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9920 | AARCH64_FEATURE_DOTPROD
9921 | AARCH64_FEATURE_PROFILE),
9922 "Ares"},
9923 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9924 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9925 "Samsung Exynos M1"},
9926 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9927 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9928 | AARCH64_FEATURE_RDMA),
9929 "Qualcomm Falkor"},
9930 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9931 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9932 | AARCH64_FEATURE_DOTPROD
9933 | AARCH64_FEATURE_SSBS),
9934 "Neoverse E1"},
9935 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9936 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9937 | AARCH64_FEATURE_DOTPROD
9938 | AARCH64_FEATURE_PROFILE),
9939 "Neoverse N1"},
9940 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9941 AARCH64_FEATURE_BFLOAT16
9942 | AARCH64_FEATURE_I8MM
9943 | AARCH64_FEATURE_F16
9944 | AARCH64_FEATURE_SVE
9945 | AARCH64_FEATURE_SVE2
9946 | AARCH64_FEATURE_SVE2_BITPERM
9947 | AARCH64_FEATURE_MEMTAG
9948 | AARCH64_FEATURE_RNG),
9949 "Neoverse N2"},
9950 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9951 AARCH64_FEATURE_PROFILE
9952 | AARCH64_FEATURE_CVADP
9953 | AARCH64_FEATURE_SVE
9954 | AARCH64_FEATURE_SSBS
9955 | AARCH64_FEATURE_RNG
9956 | AARCH64_FEATURE_F16
9957 | AARCH64_FEATURE_BFLOAT16
9958 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9959 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9960 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9961 | AARCH64_FEATURE_RDMA),
9962 "Qualcomm QDF24XX"},
9963 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9964 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9965 "Qualcomm Saphira"},
9966 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9967 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9968 "Cavium ThunderX"},
9969 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9970 AARCH64_FEATURE_CRYPTO),
9971 "Broadcom Vulcan"},
9972 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9973 in earlier releases and is superseded by 'xgene1' in all
9974 tools. */
9975 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9976 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9977 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9978 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9979 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9980 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9981 AARCH64_FEATURE_F16
9982 | AARCH64_FEATURE_RCPC
9983 | AARCH64_FEATURE_DOTPROD
9984 | AARCH64_FEATURE_SSBS
9985 | AARCH64_FEATURE_PROFILE),
9986 "Cortex-X1"},
9987 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9988 AARCH64_FEATURE_BFLOAT16
9989 | AARCH64_FEATURE_I8MM
9990 | AARCH64_FEATURE_MEMTAG
9991 | AARCH64_FEATURE_SVE2_BITPERM),
9992 "Cortex-X2"},
9993 {"generic", AARCH64_ARCH_V8, NULL},
9994
9995 {NULL, AARCH64_ARCH_NONE, NULL}
9996 };
9997
9998 struct aarch64_arch_option_table
9999 {
10000 const char *name;
10001 const aarch64_feature_set value;
10002 };
10003
10004 /* This list should, at a minimum, contain all the architecture names
10005 recognized by GCC. */
10006 static const struct aarch64_arch_option_table aarch64_archs[] = {
10007 {"all", AARCH64_ANY},
10008 {"armv8-a", AARCH64_ARCH_V8},
10009 {"armv8.1-a", AARCH64_ARCH_V8_1},
10010 {"armv8.2-a", AARCH64_ARCH_V8_2},
10011 {"armv8.3-a", AARCH64_ARCH_V8_3},
10012 {"armv8.4-a", AARCH64_ARCH_V8_4},
10013 {"armv8.5-a", AARCH64_ARCH_V8_5},
10014 {"armv8.6-a", AARCH64_ARCH_V8_6},
10015 {"armv8.7-a", AARCH64_ARCH_V8_7},
10016 {"armv8.8-a", AARCH64_ARCH_V8_8},
10017 {"armv8-r", AARCH64_ARCH_V8_R},
10018 {"armv9-a", AARCH64_ARCH_V9},
10019 {"armv9.1-a", AARCH64_ARCH_V9_1},
10020 {"armv9.2-a", AARCH64_ARCH_V9_2},
10021 {"armv9.3-a", AARCH64_ARCH_V9_3},
10022 {NULL, AARCH64_ARCH_NONE}
10023 };
10024
10025 /* ISA extensions. */
10026 struct aarch64_option_cpu_value_table
10027 {
10028 const char *name;
10029 const aarch64_feature_set value;
10030 const aarch64_feature_set require; /* Feature dependencies. */
10031 };
10032
10033 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10034 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10035 AARCH64_ARCH_NONE},
10036 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10037 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10038 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10039 AARCH64_ARCH_NONE},
10040 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10041 AARCH64_ARCH_NONE},
10042 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10043 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10044 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10045 AARCH64_ARCH_NONE},
10046 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10047 AARCH64_ARCH_NONE},
10048 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10049 AARCH64_ARCH_NONE},
10050 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10051 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10052 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10053 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10054 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10055 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10056 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10057 AARCH64_ARCH_NONE},
10058 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10059 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10060 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10061 AARCH64_ARCH_NONE},
10062 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10063 AARCH64_FEATURE (AARCH64_FEATURE_F16
10064 | AARCH64_FEATURE_SIMD, 0)},
10065 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10066 AARCH64_ARCH_NONE},
10067 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10068 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10069 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10070 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10071 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10072 AARCH64_ARCH_NONE},
10073 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10074 AARCH64_ARCH_NONE},
10075 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10076 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10077 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10078 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10079 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10080 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10081 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10082 AARCH64_ARCH_NONE},
10083 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10084 AARCH64_ARCH_NONE},
10085 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10086 AARCH64_ARCH_NONE},
10087 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10088 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10089 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10090 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10091 | AARCH64_FEATURE_SM4, 0)},
10092 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10093 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10094 | AARCH64_FEATURE_AES, 0)},
10095 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10096 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10097 | AARCH64_FEATURE_SHA3, 0)},
10098 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10099 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10100 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10101 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10102 | AARCH64_FEATURE_BFLOAT16, 0)},
10103 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10104 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10105 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10106 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10107 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10108 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10109 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10110 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10111 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10112 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10113 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10114 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10115 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10116 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10117 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10118 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10119 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10120 AARCH64_ARCH_NONE},
10121 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10122 AARCH64_ARCH_NONE},
10123 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10124 AARCH64_ARCH_NONE},
10125 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10126 AARCH64_ARCH_NONE},
10127 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10128 AARCH64_ARCH_NONE},
10129 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10130 AARCH64_ARCH_NONE},
10131 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10132 };
10133
10134 struct aarch64_long_option_table
10135 {
10136 const char *option; /* Substring to match. */
10137 const char *help; /* Help information. */
10138 int (*func) (const char *subopt); /* Function to decode sub-option. */
10139 char *deprecated; /* If non-null, print this message. */
10140 };
10141
10142 /* Transitive closure of features depending on set. */
10143 static aarch64_feature_set
10144 aarch64_feature_disable_set (aarch64_feature_set set)
10145 {
10146 const struct aarch64_option_cpu_value_table *opt;
10147 aarch64_feature_set prev = 0;
10148
10149 while (prev != set) {
10150 prev = set;
10151 for (opt = aarch64_features; opt->name != NULL; opt++)
10152 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10153 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10154 }
10155 return set;
10156 }
10157
10158 /* Transitive closure of dependencies of set. */
10159 static aarch64_feature_set
10160 aarch64_feature_enable_set (aarch64_feature_set set)
10161 {
10162 const struct aarch64_option_cpu_value_table *opt;
10163 aarch64_feature_set prev = 0;
10164
10165 while (prev != set) {
10166 prev = set;
10167 for (opt = aarch64_features; opt->name != NULL; opt++)
10168 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10169 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10170 }
10171 return set;
10172 }
10173
10174 static int
10175 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10176 bool ext_only)
10177 {
10178 /* We insist on extensions being added before being removed. We achieve
10179 this by using the ADDING_VALUE variable to indicate whether we are
10180 adding an extension (1) or removing it (0) and only allowing it to
10181 change in the order -1 -> 1 -> 0. */
10182 int adding_value = -1;
10183 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10184
10185 /* Copy the feature set, so that we can modify it. */
10186 *ext_set = **opt_p;
10187 *opt_p = ext_set;
10188
10189 while (str != NULL && *str != 0)
10190 {
10191 const struct aarch64_option_cpu_value_table *opt;
10192 const char *ext = NULL;
10193 int optlen;
10194
10195 if (!ext_only)
10196 {
10197 if (*str != '+')
10198 {
10199 as_bad (_("invalid architectural extension"));
10200 return 0;
10201 }
10202
10203 ext = strchr (++str, '+');
10204 }
10205
10206 if (ext != NULL)
10207 optlen = ext - str;
10208 else
10209 optlen = strlen (str);
10210
10211 if (optlen >= 2 && startswith (str, "no"))
10212 {
10213 if (adding_value != 0)
10214 adding_value = 0;
10215 optlen -= 2;
10216 str += 2;
10217 }
10218 else if (optlen > 0)
10219 {
10220 if (adding_value == -1)
10221 adding_value = 1;
10222 else if (adding_value != 1)
10223 {
10224 as_bad (_("must specify extensions to add before specifying "
10225 "those to remove"));
10226 return false;
10227 }
10228 }
10229
10230 if (optlen == 0)
10231 {
10232 as_bad (_("missing architectural extension"));
10233 return 0;
10234 }
10235
10236 gas_assert (adding_value != -1);
10237
10238 for (opt = aarch64_features; opt->name != NULL; opt++)
10239 if (strncmp (opt->name, str, optlen) == 0)
10240 {
10241 aarch64_feature_set set;
10242
10243 /* Add or remove the extension. */
10244 if (adding_value)
10245 {
10246 set = aarch64_feature_enable_set (opt->value);
10247 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10248 }
10249 else
10250 {
10251 set = aarch64_feature_disable_set (opt->value);
10252 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10253 }
10254 break;
10255 }
10256
10257 if (opt->name == NULL)
10258 {
10259 as_bad (_("unknown architectural extension `%s'"), str);
10260 return 0;
10261 }
10262
10263 str = ext;
10264 };
10265
10266 return 1;
10267 }
10268
10269 static int
10270 aarch64_parse_cpu (const char *str)
10271 {
10272 const struct aarch64_cpu_option_table *opt;
10273 const char *ext = strchr (str, '+');
10274 size_t optlen;
10275
10276 if (ext != NULL)
10277 optlen = ext - str;
10278 else
10279 optlen = strlen (str);
10280
10281 if (optlen == 0)
10282 {
10283 as_bad (_("missing cpu name `%s'"), str);
10284 return 0;
10285 }
10286
10287 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10288 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10289 {
10290 mcpu_cpu_opt = &opt->value;
10291 if (ext != NULL)
10292 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10293
10294 return 1;
10295 }
10296
10297 as_bad (_("unknown cpu `%s'"), str);
10298 return 0;
10299 }
10300
10301 static int
10302 aarch64_parse_arch (const char *str)
10303 {
10304 const struct aarch64_arch_option_table *opt;
10305 const char *ext = strchr (str, '+');
10306 size_t optlen;
10307
10308 if (ext != NULL)
10309 optlen = ext - str;
10310 else
10311 optlen = strlen (str);
10312
10313 if (optlen == 0)
10314 {
10315 as_bad (_("missing architecture name `%s'"), str);
10316 return 0;
10317 }
10318
10319 for (opt = aarch64_archs; opt->name != NULL; opt++)
10320 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10321 {
10322 march_cpu_opt = &opt->value;
10323 if (ext != NULL)
10324 return aarch64_parse_features (ext, &march_cpu_opt, false);
10325
10326 return 1;
10327 }
10328
10329 as_bad (_("unknown architecture `%s'\n"), str);
10330 return 0;
10331 }
10332
10333 /* ABIs. */
10334 struct aarch64_option_abi_value_table
10335 {
10336 const char *name;
10337 enum aarch64_abi_type value;
10338 };
10339
10340 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10341 #ifdef OBJ_ELF
10342 {"ilp32", AARCH64_ABI_ILP32},
10343 {"lp64", AARCH64_ABI_LP64},
10344 #else
10345 {"llp64", AARCH64_ABI_LLP64},
10346 #endif
10347 };
10348
10349 static int
10350 aarch64_parse_abi (const char *str)
10351 {
10352 unsigned int i;
10353
10354 if (str[0] == '\0')
10355 {
10356 as_bad (_("missing abi name `%s'"), str);
10357 return 0;
10358 }
10359
10360 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10361 if (strcmp (str, aarch64_abis[i].name) == 0)
10362 {
10363 aarch64_abi = aarch64_abis[i].value;
10364 return 1;
10365 }
10366
10367 as_bad (_("unknown abi `%s'\n"), str);
10368 return 0;
10369 }
10370
10371 static struct aarch64_long_option_table aarch64_long_opts[] = {
10372 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10373 aarch64_parse_abi, NULL},
10374 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10375 aarch64_parse_cpu, NULL},
10376 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10377 aarch64_parse_arch, NULL},
10378 {NULL, NULL, 0, NULL}
10379 };
10380
10381 int
10382 md_parse_option (int c, const char *arg)
10383 {
10384 struct aarch64_option_table *opt;
10385 struct aarch64_long_option_table *lopt;
10386
10387 switch (c)
10388 {
10389 #ifdef OPTION_EB
10390 case OPTION_EB:
10391 target_big_endian = 1;
10392 break;
10393 #endif
10394
10395 #ifdef OPTION_EL
10396 case OPTION_EL:
10397 target_big_endian = 0;
10398 break;
10399 #endif
10400
10401 case 'a':
10402 /* Listing option. Just ignore these, we don't support additional
10403 ones. */
10404 return 0;
10405
10406 default:
10407 for (opt = aarch64_opts; opt->option != NULL; opt++)
10408 {
10409 if (c == opt->option[0]
10410 && ((arg == NULL && opt->option[1] == 0)
10411 || streq (arg, opt->option + 1)))
10412 {
10413 /* If the option is deprecated, tell the user. */
10414 if (opt->deprecated != NULL)
10415 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10416 arg ? arg : "", _(opt->deprecated));
10417
10418 if (opt->var != NULL)
10419 *opt->var = opt->value;
10420
10421 return 1;
10422 }
10423 }
10424
10425 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10426 {
10427 /* These options are expected to have an argument. */
10428 if (c == lopt->option[0]
10429 && arg != NULL
10430 && startswith (arg, lopt->option + 1))
10431 {
10432 /* If the option is deprecated, tell the user. */
10433 if (lopt->deprecated != NULL)
10434 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10435 _(lopt->deprecated));
10436
10437 /* Call the sup-option parser. */
10438 return lopt->func (arg + strlen (lopt->option) - 1);
10439 }
10440 }
10441
10442 return 0;
10443 }
10444
10445 return 1;
10446 }
10447
10448 void
10449 md_show_usage (FILE * fp)
10450 {
10451 struct aarch64_option_table *opt;
10452 struct aarch64_long_option_table *lopt;
10453
10454 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10455
10456 for (opt = aarch64_opts; opt->option != NULL; opt++)
10457 if (opt->help != NULL)
10458 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10459
10460 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10461 if (lopt->help != NULL)
10462 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10463
10464 #ifdef OPTION_EB
10465 fprintf (fp, _("\
10466 -EB assemble code for a big-endian cpu\n"));
10467 #endif
10468
10469 #ifdef OPTION_EL
10470 fprintf (fp, _("\
10471 -EL assemble code for a little-endian cpu\n"));
10472 #endif
10473 }
10474
10475 /* Parse a .cpu directive. */
10476
10477 static void
10478 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10479 {
10480 const struct aarch64_cpu_option_table *opt;
10481 char saved_char;
10482 char *name;
10483 char *ext;
10484 size_t optlen;
10485
10486 name = input_line_pointer;
10487 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10488 saved_char = *input_line_pointer;
10489 *input_line_pointer = 0;
10490
10491 ext = strchr (name, '+');
10492
10493 if (ext != NULL)
10494 optlen = ext - name;
10495 else
10496 optlen = strlen (name);
10497
10498 /* Skip the first "all" entry. */
10499 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10500 if (strlen (opt->name) == optlen
10501 && strncmp (name, opt->name, optlen) == 0)
10502 {
10503 mcpu_cpu_opt = &opt->value;
10504 if (ext != NULL)
10505 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10506 return;
10507
10508 cpu_variant = *mcpu_cpu_opt;
10509
10510 *input_line_pointer = saved_char;
10511 demand_empty_rest_of_line ();
10512 return;
10513 }
10514 as_bad (_("unknown cpu `%s'"), name);
10515 *input_line_pointer = saved_char;
10516 ignore_rest_of_line ();
10517 }
10518
10519
10520 /* Parse a .arch directive. */
10521
10522 static void
10523 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10524 {
10525 const struct aarch64_arch_option_table *opt;
10526 char saved_char;
10527 char *name;
10528 char *ext;
10529 size_t optlen;
10530
10531 name = input_line_pointer;
10532 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10533 saved_char = *input_line_pointer;
10534 *input_line_pointer = 0;
10535
10536 ext = strchr (name, '+');
10537
10538 if (ext != NULL)
10539 optlen = ext - name;
10540 else
10541 optlen = strlen (name);
10542
10543 /* Skip the first "all" entry. */
10544 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10545 if (strlen (opt->name) == optlen
10546 && strncmp (name, opt->name, optlen) == 0)
10547 {
10548 mcpu_cpu_opt = &opt->value;
10549 if (ext != NULL)
10550 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10551 return;
10552
10553 cpu_variant = *mcpu_cpu_opt;
10554
10555 *input_line_pointer = saved_char;
10556 demand_empty_rest_of_line ();
10557 return;
10558 }
10559
10560 as_bad (_("unknown architecture `%s'\n"), name);
10561 *input_line_pointer = saved_char;
10562 ignore_rest_of_line ();
10563 }
10564
10565 /* Parse a .arch_extension directive. */
10566
10567 static void
10568 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10569 {
10570 char saved_char;
10571 char *ext = input_line_pointer;
10572
10573 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10574 saved_char = *input_line_pointer;
10575 *input_line_pointer = 0;
10576
10577 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10578 return;
10579
10580 cpu_variant = *mcpu_cpu_opt;
10581
10582 *input_line_pointer = saved_char;
10583 demand_empty_rest_of_line ();
10584 }
10585
10586 /* Copy symbol information. */
10587
10588 void
10589 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10590 {
10591 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10592 }
10593
10594 #ifdef OBJ_ELF
10595 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10596 This is needed so AArch64 specific st_other values can be independently
10597 specified for an IFUNC resolver (that is called by the dynamic linker)
10598 and the symbol it resolves (aliased to the resolver). In particular,
10599 if a function symbol has special st_other value set via directives,
10600 then attaching an IFUNC resolver to that symbol should not override
10601 the st_other setting. Requiring the directive on the IFUNC resolver
10602 symbol would be unexpected and problematic in C code, where the two
10603 symbols appear as two independent function declarations. */
10604
10605 void
10606 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10607 {
10608 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10609 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10610 /* If size is unset, copy size from src. Because we don't track whether
10611 .size has been used, we can't differentiate .size dest, 0 from the case
10612 where dest's size is unset. */
10613 if (!destelf->size && S_GET_SIZE (dest) == 0)
10614 {
10615 if (srcelf->size)
10616 {
10617 destelf->size = XNEW (expressionS);
10618 *destelf->size = *srcelf->size;
10619 }
10620 S_SET_SIZE (dest, S_GET_SIZE (src));
10621 }
10622 }
10623 #endif