]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Add a aarch64_cpu_supports_inst_p helper
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
165 data fields contain the following information:
166
167 data[0].i:
168 A mask of register types that would have been acceptable as bare
169 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
170 is set if a general parsing error occured for an operand (that is,
171 an error not related to registers, and having no error string).
172
173 data[1].i:
174 A mask of register types that would have been acceptable inside
175 a register list. In addition, SEF_IN_REGLIST is set if the
176 operand contained a '{' and if we got to the point of trying
177 to parse a register inside a list.
178
179 data[2].i:
180 The mask associated with the register that was actually seen, or 0
181 if none. A nonzero value describes a register inside a register
182 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
183 register.
184
185 The idea is that stringless errors from multiple opcode templates can
186 be ORed together to give a summary of the available alternatives. */
187 #define SEF_DEFAULT_ERROR (1U << 31)
188 #define SEF_IN_REGLIST (1U << 31)
189
190 /* Diagnostics inline function utilities.
191
192 These are lightweight utilities which should only be called by parse_operands
193 and other parsers. GAS processes each assembly line by parsing it against
194 instruction template(s), in the case of multiple templates (for the same
195 mnemonic name), those templates are tried one by one until one succeeds or
196 all fail. An assembly line may fail a few templates before being
197 successfully parsed; an error saved here in most cases is not a user error
198 but an error indicating the current template is not the right template.
199 Therefore it is very important that errors can be saved at a low cost during
200 the parsing; we don't want to slow down the whole parsing by recording
201 non-user errors in detail.
202
203 Remember that the objective is to help GAS pick up the most appropriate
204 error message in the case of multiple templates, e.g. FMOV which has 8
205 templates. */
206
207 static inline void
208 clear_error (void)
209 {
210 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
211 inst.parsing_error.kind = AARCH64_OPDE_NIL;
212 }
213
214 static inline bool
215 error_p (void)
216 {
217 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
224 inst.parsing_error.index = -1;
225 inst.parsing_error.kind = kind;
226 inst.parsing_error.error = error;
227 }
228
229 static inline void
230 set_recoverable_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_RECOVERABLE, error);
233 }
234
235 /* Use the DESC field of the corresponding aarch64_operand entry to compose
236 the error message. */
237 static inline void
238 set_default_error (void)
239 {
240 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
241 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
242 }
243
244 static inline void
245 set_expected_error (unsigned int flags)
246 {
247 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
248 inst.parsing_error.data[0].i = flags;
249 }
250
251 static inline void
252 set_syntax_error (const char *error)
253 {
254 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
255 }
256
257 static inline void
258 set_first_syntax_error (const char *error)
259 {
260 if (! error_p ())
261 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
262 }
263
264 static inline void
265 set_fatal_syntax_error (const char *error)
266 {
267 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
268 }
269 \f
270 /* Return value for certain parsers when the parsing fails; those parsers
271 return the information of the parsed result, e.g. register number, on
272 success. */
273 #define PARSE_FAIL -1
274
275 /* This is an invalid condition code that means no conditional field is
276 present. */
277 #define COND_ALWAYS 0x10
278
279 typedef struct
280 {
281 const char *template;
282 uint32_t value;
283 } asm_nzcv;
284
285 struct reloc_entry
286 {
287 char *name;
288 bfd_reloc_code_real_type reloc;
289 };
290
291 /* Macros to define the register types and masks for the purpose
292 of parsing. */
293
294 #undef AARCH64_REG_TYPES
295 #define AARCH64_REG_TYPES \
296 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
297 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
298 BASIC_REG_TYPE(SP_32) /* wsp */ \
299 BASIC_REG_TYPE(SP_64) /* sp */ \
300 BASIC_REG_TYPE(Z_32) /* wzr */ \
301 BASIC_REG_TYPE(Z_64) /* xzr */ \
302 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
303 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
304 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
305 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
306 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
307 BASIC_REG_TYPE(VN) /* v[0-31] */ \
308 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
309 BASIC_REG_TYPE(PN) /* p[0-15] */ \
310 BASIC_REG_TYPE(ZA) /* za */ \
311 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
312 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
313 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
314 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
315 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
316 /* Typecheck: same, plus SVE registers. */ \
317 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
318 | REG_TYPE(ZN)) \
319 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
320 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
322 /* Typecheck: same, plus SVE registers. */ \
323 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
324 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
325 | REG_TYPE(ZN)) \
326 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
327 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
329 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
330 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
331 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
332 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
333 /* Typecheck: any [BHSDQ]P FP. */ \
334 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
335 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
336 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
337 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
338 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
339 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
340 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
341 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
342 be used for SVE instructions, since Zn and Pn are valid symbols \
343 in other contexts. */ \
344 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
345 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
346 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
347 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
348 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
349 | REG_TYPE(ZN) | REG_TYPE(PN)) \
350 /* Any integer register; used for error messages only. */ \
351 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
352 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
353 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
354 /* Any vector register. */ \
355 MULTI_REG_TYPE(VZ, REG_TYPE(VN) | REG_TYPE(ZN)) \
356 /* An SVE vector or predicate register. */ \
357 MULTI_REG_TYPE(ZP, REG_TYPE(ZN) | REG_TYPE(PN)) \
358 /* Any vector or predicate register. */ \
359 MULTI_REG_TYPE(VZP, REG_TYPE(VN) | REG_TYPE(ZN) | REG_TYPE(PN)) \
360 /* The whole of ZA or a single tile. */ \
361 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
362 /* A horizontal or vertical slice of a ZA tile. */ \
363 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
364 /* Pseudo type to mark the end of the enumerator sequence. */ \
365 END_REG_TYPE(MAX)
366
367 #undef BASIC_REG_TYPE
368 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
369 #undef MULTI_REG_TYPE
370 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
371 #undef END_REG_TYPE
372 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
373
374 /* Register type enumerators. */
375 typedef enum aarch64_reg_type_
376 {
377 /* A list of REG_TYPE_*. */
378 AARCH64_REG_TYPES
379 } aarch64_reg_type;
380
381 #undef BASIC_REG_TYPE
382 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
383 #undef REG_TYPE
384 #define REG_TYPE(T) (1 << REG_TYPE_##T)
385 #undef MULTI_REG_TYPE
386 #define MULTI_REG_TYPE(T,V) V,
387 #undef END_REG_TYPE
388 #define END_REG_TYPE(T) 0
389
390 /* Structure for a hash table entry for a register. */
391 typedef struct
392 {
393 const char *name;
394 unsigned char number;
395 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
396 unsigned char builtin;
397 } reg_entry;
398
399 /* Values indexed by aarch64_reg_type to assist the type checking. */
400 static const unsigned reg_type_masks[] =
401 {
402 AARCH64_REG_TYPES
403 };
404
405 #undef BASIC_REG_TYPE
406 #undef REG_TYPE
407 #undef MULTI_REG_TYPE
408 #undef END_REG_TYPE
409 #undef AARCH64_REG_TYPES
410
411 /* We expected one of the registers in MASK to be specified. If a register
412 of some kind was specified, SEEN is a mask that contains that register,
413 otherwise it is zero.
414
415 If it is possible to provide a relatively pithy message that describes
416 the error exactly, return a string that does so, reporting the error
417 against "operand %d". Return null otherwise.
418
419 From a QoI perspective, any REG_TYPE_* that is passed as the first
420 argument to set_expected_reg_error should generally have its own message.
421 Providing messages for combinations of such REG_TYPE_*s can be useful if
422 it is possible to summarize the combination in a relatively natural way.
423 On the other hand, it seems better to avoid long lists of unrelated
424 things. */
425
426 static const char *
427 get_reg_expected_msg (unsigned int mask, unsigned int seen)
428 {
429 /* First handle messages that use SEEN. */
430 if ((mask & reg_type_masks[REG_TYPE_ZAT])
431 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
432 return N_("expected an unsuffixed ZA tile at operand %d");
433
434 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
435 && (seen & reg_type_masks[REG_TYPE_ZAT]))
436 return N_("missing horizontal or vertical suffix at operand %d");
437
438 if ((mask & reg_type_masks[REG_TYPE_ZA])
439 && (seen & (reg_type_masks[REG_TYPE_ZAT]
440 | reg_type_masks[REG_TYPE_ZATHV])))
441 return N_("expected 'za' rather than a ZA tile at operand %d");
442
443 /* Integer, zero and stack registers. */
444 if (mask == reg_type_masks[REG_TYPE_R_64])
445 return N_("expected a 64-bit integer register at operand %d");
446 if (mask == reg_type_masks[REG_TYPE_R_Z])
447 return N_("expected an integer or zero register at operand %d");
448 if (mask == reg_type_masks[REG_TYPE_R_SP])
449 return N_("expected an integer or stack pointer register at operand %d");
450
451 /* Floating-point and SIMD registers. */
452 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
453 return N_("expected a scalar SIMD or floating-point register"
454 " at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_VN])
456 return N_("expected an Advanced SIMD vector register at operand %d");
457 if (mask == reg_type_masks[REG_TYPE_ZN])
458 return N_("expected an SVE vector register at operand %d");
459 if (mask == reg_type_masks[REG_TYPE_PN])
460 return N_("expected an SVE predicate register at operand %d");
461 if (mask == reg_type_masks[REG_TYPE_VZ])
462 return N_("expected a vector register at operand %d");
463 if (mask == reg_type_masks[REG_TYPE_ZP])
464 return N_("expected an SVE vector or predicate register at operand %d");
465 if (mask == reg_type_masks[REG_TYPE_VZP])
466 return N_("expected a vector or predicate register at operand %d");
467
468 /* ZA-related registers. */
469 if (mask == reg_type_masks[REG_TYPE_ZA])
470 return N_("expected a ZA array vector at operand %d");
471 if (mask == reg_type_masks[REG_TYPE_ZA_ZAT])
472 return N_("expected 'za' or a ZA tile at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_ZAT])
474 return N_("expected a ZA tile at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_ZATHV])
476 return N_("expected a ZA tile slice at operand %d");
477
478 /* Integer and vector combos. */
479 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VN]))
480 return N_("expected an integer register or Advanced SIMD vector register"
481 " at operand %d");
482 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_ZN]))
483 return N_("expected an integer register or SVE vector register"
484 " at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZ]))
486 return N_("expected an integer or vector register at operand %d");
487 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_PN]))
488 return N_("expected an integer or predicate register at operand %d");
489 if (mask == (reg_type_masks[REG_TYPE_R_Z] | reg_type_masks[REG_TYPE_VZP]))
490 return N_("expected an integer, vector or predicate register"
491 " at operand %d");
492
493 /* SVE and SME combos. */
494 if (mask == (reg_type_masks[REG_TYPE_ZN] | reg_type_masks[REG_TYPE_ZATHV]))
495 return N_("expected an SVE vector register or ZA tile slice"
496 " at operand %d");
497
498 return NULL;
499 }
500
501 /* Record that we expected a register of type TYPE but didn't see one.
502 REG is the register that we actually saw, or null if we didn't see a
503 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
504 contents of a register list, otherwise it is zero. */
505
506 static inline void
507 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
508 unsigned int flags)
509 {
510 assert (flags == 0 || flags == SEF_IN_REGLIST);
511 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
512 if (flags & SEF_IN_REGLIST)
513 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
514 else
515 inst.parsing_error.data[0].i = reg_type_masks[type];
516 if (reg)
517 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
518 }
519
520 /* Record that we expected a register list containing registers of type TYPE,
521 but didn't see the opening '{'. If we saw a register instead, REG is the
522 register that we saw, otherwise it is null. */
523
524 static inline void
525 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
526 {
527 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
528 inst.parsing_error.data[1].i = reg_type_masks[type];
529 if (reg)
530 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
531 }
532
533 /* Some well known registers that we refer to directly elsewhere. */
534 #define REG_SP 31
535 #define REG_ZR 31
536
537 /* Instructions take 4 bytes in the object file. */
538 #define INSN_SIZE 4
539
540 static htab_t aarch64_ops_hsh;
541 static htab_t aarch64_cond_hsh;
542 static htab_t aarch64_shift_hsh;
543 static htab_t aarch64_sys_regs_hsh;
544 static htab_t aarch64_pstatefield_hsh;
545 static htab_t aarch64_sys_regs_ic_hsh;
546 static htab_t aarch64_sys_regs_dc_hsh;
547 static htab_t aarch64_sys_regs_at_hsh;
548 static htab_t aarch64_sys_regs_tlbi_hsh;
549 static htab_t aarch64_sys_regs_sr_hsh;
550 static htab_t aarch64_reg_hsh;
551 static htab_t aarch64_barrier_opt_hsh;
552 static htab_t aarch64_nzcv_hsh;
553 static htab_t aarch64_pldop_hsh;
554 static htab_t aarch64_hint_opt_hsh;
555
556 /* Stuff needed to resolve the label ambiguity
557 As:
558 ...
559 label: <insn>
560 may differ from:
561 ...
562 label:
563 <insn> */
564
565 static symbolS *last_label_seen;
566
567 /* Literal pool structure. Held on a per-section
568 and per-sub-section basis. */
569
570 #define MAX_LITERAL_POOL_SIZE 1024
571 typedef struct literal_expression
572 {
573 expressionS exp;
574 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
575 LITTLENUM_TYPE * bignum;
576 } literal_expression;
577
578 typedef struct literal_pool
579 {
580 literal_expression literals[MAX_LITERAL_POOL_SIZE];
581 unsigned int next_free_entry;
582 unsigned int id;
583 symbolS *symbol;
584 segT section;
585 subsegT sub_section;
586 int size;
587 struct literal_pool *next;
588 } literal_pool;
589
590 /* Pointer to a linked list of literal pools. */
591 static literal_pool *list_of_pools = NULL;
592 \f
593 /* Pure syntax. */
594
595 /* This array holds the chars that always start a comment. If the
596 pre-processor is disabled, these aren't very useful. */
597 const char comment_chars[] = "";
598
599 /* This array holds the chars that only start a comment at the beginning of
600 a line. If the line seems to have the form '# 123 filename'
601 .line and .file directives will appear in the pre-processed output. */
602 /* Note that input_file.c hand checks for '#' at the beginning of the
603 first line of the input file. This is because the compiler outputs
604 #NO_APP at the beginning of its output. */
605 /* Also note that comments like this one will always work. */
606 const char line_comment_chars[] = "#";
607
608 const char line_separator_chars[] = ";";
609
610 /* Chars that can be used to separate mant
611 from exp in floating point numbers. */
612 const char EXP_CHARS[] = "eE";
613
614 /* Chars that mean this number is a floating point constant. */
615 /* As in 0f12.456 */
616 /* or 0d1.2345e12 */
617
618 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
619
620 /* Prefix character that indicates the start of an immediate value. */
621 #define is_immediate_prefix(C) ((C) == '#')
622
623 /* Separator character handling. */
624
625 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
626
627 static inline bool
628 skip_past_char (char **str, char c)
629 {
630 if (**str == c)
631 {
632 (*str)++;
633 return true;
634 }
635 else
636 return false;
637 }
638
639 #define skip_past_comma(str) skip_past_char (str, ',')
640
641 /* Arithmetic expressions (possibly involving symbols). */
642
643 static bool in_aarch64_get_expression = false;
644
645 /* Third argument to aarch64_get_expression. */
646 #define GE_NO_PREFIX false
647 #define GE_OPT_PREFIX true
648
649 /* Fourth argument to aarch64_get_expression. */
650 #define ALLOW_ABSENT false
651 #define REJECT_ABSENT true
652
653 /* Return TRUE if the string pointed by *STR is successfully parsed
654 as an valid expression; *EP will be filled with the information of
655 such an expression. Otherwise return FALSE.
656
657 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
658 If REJECT_ABSENT is true then trat missing expressions as an error. */
659
660 static bool
661 aarch64_get_expression (expressionS * ep,
662 char ** str,
663 bool allow_immediate_prefix,
664 bool reject_absent)
665 {
666 char *save_in;
667 segT seg;
668 bool prefix_present = false;
669
670 if (allow_immediate_prefix)
671 {
672 if (is_immediate_prefix (**str))
673 {
674 (*str)++;
675 prefix_present = true;
676 }
677 }
678
679 memset (ep, 0, sizeof (expressionS));
680
681 save_in = input_line_pointer;
682 input_line_pointer = *str;
683 in_aarch64_get_expression = true;
684 seg = expression (ep);
685 in_aarch64_get_expression = false;
686
687 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
688 {
689 /* We found a bad expression in md_operand(). */
690 *str = input_line_pointer;
691 input_line_pointer = save_in;
692 if (prefix_present && ! error_p ())
693 set_fatal_syntax_error (_("bad expression"));
694 else
695 set_first_syntax_error (_("bad expression"));
696 return false;
697 }
698
699 #ifdef OBJ_AOUT
700 if (seg != absolute_section
701 && seg != text_section
702 && seg != data_section
703 && seg != bss_section
704 && seg != undefined_section)
705 {
706 set_syntax_error (_("bad segment"));
707 *str = input_line_pointer;
708 input_line_pointer = save_in;
709 return false;
710 }
711 #else
712 (void) seg;
713 #endif
714
715 *str = input_line_pointer;
716 input_line_pointer = save_in;
717 return true;
718 }
719
720 /* Turn a string in input_line_pointer into a floating point constant
721 of type TYPE, and store the appropriate bytes in *LITP. The number
722 of LITTLENUMS emitted is stored in *SIZEP. An error message is
723 returned, or NULL on OK. */
724
725 const char *
726 md_atof (int type, char *litP, int *sizeP)
727 {
728 return ieee_md_atof (type, litP, sizeP, target_big_endian);
729 }
730
731 /* We handle all bad expressions here, so that we can report the faulty
732 instruction in the error message. */
733 void
734 md_operand (expressionS * exp)
735 {
736 if (in_aarch64_get_expression)
737 exp->X_op = O_illegal;
738 }
739
740 /* Immediate values. */
741
742 /* Errors may be set multiple times during parsing or bit encoding
743 (particularly in the Neon bits), but usually the earliest error which is set
744 will be the most meaningful. Avoid overwriting it with later (cascading)
745 errors by calling this function. */
746
747 static void
748 first_error (const char *error)
749 {
750 if (! error_p ())
751 set_syntax_error (error);
752 }
753
754 /* Similar to first_error, but this function accepts formatted error
755 message. */
756 static void
757 first_error_fmt (const char *format, ...)
758 {
759 va_list args;
760 enum
761 { size = 100 };
762 /* N.B. this single buffer will not cause error messages for different
763 instructions to pollute each other; this is because at the end of
764 processing of each assembly line, error message if any will be
765 collected by as_bad. */
766 static char buffer[size];
767
768 if (! error_p ())
769 {
770 int ret ATTRIBUTE_UNUSED;
771 va_start (args, format);
772 ret = vsnprintf (buffer, size, format, args);
773 know (ret <= size - 1 && ret >= 0);
774 va_end (args);
775 set_syntax_error (buffer);
776 }
777 }
778
779 /* Internal helper routine converting a vector_type_el structure *VECTYPE
780 to a corresponding operand qualifier. */
781
782 static inline aarch64_opnd_qualifier_t
783 vectype_to_qualifier (const struct vector_type_el *vectype)
784 {
785 /* Element size in bytes indexed by vector_el_type. */
786 const unsigned char ele_size[5]
787 = {1, 2, 4, 8, 16};
788 const unsigned int ele_base [5] =
789 {
790 AARCH64_OPND_QLF_V_4B,
791 AARCH64_OPND_QLF_V_2H,
792 AARCH64_OPND_QLF_V_2S,
793 AARCH64_OPND_QLF_V_1D,
794 AARCH64_OPND_QLF_V_1Q
795 };
796
797 if (!vectype->defined || vectype->type == NT_invtype)
798 goto vectype_conversion_fail;
799
800 if (vectype->type == NT_zero)
801 return AARCH64_OPND_QLF_P_Z;
802 if (vectype->type == NT_merge)
803 return AARCH64_OPND_QLF_P_M;
804
805 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
806
807 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
808 {
809 /* Special case S_4B. */
810 if (vectype->type == NT_b && vectype->width == 4)
811 return AARCH64_OPND_QLF_S_4B;
812
813 /* Special case S_2H. */
814 if (vectype->type == NT_h && vectype->width == 2)
815 return AARCH64_OPND_QLF_S_2H;
816
817 /* Vector element register. */
818 return AARCH64_OPND_QLF_S_B + vectype->type;
819 }
820 else
821 {
822 /* Vector register. */
823 int reg_size = ele_size[vectype->type] * vectype->width;
824 unsigned offset;
825 unsigned shift;
826 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
827 goto vectype_conversion_fail;
828
829 /* The conversion is by calculating the offset from the base operand
830 qualifier for the vector type. The operand qualifiers are regular
831 enough that the offset can established by shifting the vector width by
832 a vector-type dependent amount. */
833 shift = 0;
834 if (vectype->type == NT_b)
835 shift = 3;
836 else if (vectype->type == NT_h || vectype->type == NT_s)
837 shift = 2;
838 else if (vectype->type >= NT_d)
839 shift = 1;
840 else
841 gas_assert (0);
842
843 offset = ele_base [vectype->type] + (vectype->width >> shift);
844 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
845 && offset <= AARCH64_OPND_QLF_V_1Q);
846 return offset;
847 }
848
849 vectype_conversion_fail:
850 first_error (_("bad vector arrangement type"));
851 return AARCH64_OPND_QLF_NIL;
852 }
853
854 /* Register parsing. */
855
856 /* Generic register parser which is called by other specialized
857 register parsers.
858 CCP points to what should be the beginning of a register name.
859 If it is indeed a valid register name, advance CCP over it and
860 return the reg_entry structure; otherwise return NULL.
861 It does not issue diagnostics. */
862
863 static reg_entry *
864 parse_reg (char **ccp)
865 {
866 char *start = *ccp;
867 char *p;
868 reg_entry *reg;
869
870 #ifdef REGISTER_PREFIX
871 if (*start != REGISTER_PREFIX)
872 return NULL;
873 start++;
874 #endif
875
876 p = start;
877 if (!ISALPHA (*p) || !is_name_beginner (*p))
878 return NULL;
879
880 do
881 p++;
882 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
883
884 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
885
886 if (!reg)
887 return NULL;
888
889 *ccp = p;
890 return reg;
891 }
892
893 /* Return the operand qualifier associated with all uses of REG, or
894 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
895 that qualifiers don't apply to REG or that qualifiers are added
896 using suffixes. */
897
898 static aarch64_opnd_qualifier_t
899 inherent_reg_qualifier (const reg_entry *reg)
900 {
901 switch (reg->type)
902 {
903 case REG_TYPE_R_32:
904 case REG_TYPE_SP_32:
905 case REG_TYPE_Z_32:
906 return AARCH64_OPND_QLF_W;
907
908 case REG_TYPE_R_64:
909 case REG_TYPE_SP_64:
910 case REG_TYPE_Z_64:
911 return AARCH64_OPND_QLF_X;
912
913 case REG_TYPE_FP_B:
914 case REG_TYPE_FP_H:
915 case REG_TYPE_FP_S:
916 case REG_TYPE_FP_D:
917 case REG_TYPE_FP_Q:
918 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
919
920 default:
921 return AARCH64_OPND_QLF_NIL;
922 }
923 }
924
925 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
926 return FALSE. */
927 static bool
928 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
929 {
930 return (reg_type_masks[type] & (1 << reg->type)) != 0;
931 }
932
933 /* Try to parse a base or offset register. Allow SVE base and offset
934 registers if REG_TYPE includes SVE registers. Return the register
935 entry on success, setting *QUALIFIER to the register qualifier.
936 Return null otherwise.
937
938 Note that this function does not issue any diagnostics. */
939
940 static const reg_entry *
941 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
942 aarch64_opnd_qualifier_t *qualifier)
943 {
944 char *str = *ccp;
945 const reg_entry *reg = parse_reg (&str);
946
947 if (reg == NULL)
948 return NULL;
949
950 switch (reg->type)
951 {
952 case REG_TYPE_ZN:
953 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
954 || str[0] != '.')
955 return NULL;
956 switch (TOLOWER (str[1]))
957 {
958 case 's':
959 *qualifier = AARCH64_OPND_QLF_S_S;
960 break;
961 case 'd':
962 *qualifier = AARCH64_OPND_QLF_S_D;
963 break;
964 default:
965 return NULL;
966 }
967 str += 2;
968 break;
969
970 default:
971 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
972 return NULL;
973 *qualifier = inherent_reg_qualifier (reg);
974 break;
975 }
976
977 *ccp = str;
978
979 return reg;
980 }
981
982 /* Try to parse a base or offset register. Return the register entry
983 on success, setting *QUALIFIER to the register qualifier. Return null
984 otherwise.
985
986 Note that this function does not issue any diagnostics. */
987
988 static const reg_entry *
989 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
990 {
991 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
992 }
993
994 /* Parse the qualifier of a vector register or vector element of type
995 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
996 succeeds; otherwise return FALSE.
997
998 Accept only one occurrence of:
999 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1000 b h s d q */
1001 static bool
1002 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1003 struct vector_type_el *parsed_type, char **str)
1004 {
1005 char *ptr = *str;
1006 unsigned width;
1007 unsigned element_size;
1008 enum vector_el_type type;
1009
1010 /* skip '.' */
1011 gas_assert (*ptr == '.');
1012 ptr++;
1013
1014 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
1015 {
1016 width = 0;
1017 goto elt_size;
1018 }
1019 width = strtoul (ptr, &ptr, 10);
1020 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1021 {
1022 first_error_fmt (_("bad size %d in vector width specifier"), width);
1023 return false;
1024 }
1025
1026 elt_size:
1027 switch (TOLOWER (*ptr))
1028 {
1029 case 'b':
1030 type = NT_b;
1031 element_size = 8;
1032 break;
1033 case 'h':
1034 type = NT_h;
1035 element_size = 16;
1036 break;
1037 case 's':
1038 type = NT_s;
1039 element_size = 32;
1040 break;
1041 case 'd':
1042 type = NT_d;
1043 element_size = 64;
1044 break;
1045 case 'q':
1046 if (reg_type != REG_TYPE_VN || width == 1)
1047 {
1048 type = NT_q;
1049 element_size = 128;
1050 break;
1051 }
1052 /* fall through. */
1053 default:
1054 if (*ptr != '\0')
1055 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1056 else
1057 first_error (_("missing element size"));
1058 return false;
1059 }
1060 if (width != 0 && width * element_size != 64
1061 && width * element_size != 128
1062 && !(width == 2 && element_size == 16)
1063 && !(width == 4 && element_size == 8))
1064 {
1065 first_error_fmt (_
1066 ("invalid element size %d and vector size combination %c"),
1067 width, *ptr);
1068 return false;
1069 }
1070 ptr++;
1071
1072 parsed_type->type = type;
1073 parsed_type->width = width;
1074 parsed_type->element_size = element_size;
1075
1076 *str = ptr;
1077
1078 return true;
1079 }
1080
1081 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1082 *PARSED_TYPE and point *STR at the end of the suffix. */
1083
1084 static bool
1085 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1086 {
1087 char *ptr = *str;
1088
1089 /* Skip '/'. */
1090 gas_assert (*ptr == '/');
1091 ptr++;
1092 switch (TOLOWER (*ptr))
1093 {
1094 case 'z':
1095 parsed_type->type = NT_zero;
1096 break;
1097 case 'm':
1098 parsed_type->type = NT_merge;
1099 break;
1100 default:
1101 if (*ptr != '\0' && *ptr != ',')
1102 first_error_fmt (_("unexpected character `%c' in predication type"),
1103 *ptr);
1104 else
1105 first_error (_("missing predication type"));
1106 return false;
1107 }
1108 parsed_type->width = 0;
1109 *str = ptr + 1;
1110 return true;
1111 }
1112
1113 /* Return true if CH is a valid suffix character for registers of
1114 type TYPE. */
1115
1116 static bool
1117 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1118 {
1119 switch (type)
1120 {
1121 case REG_TYPE_VN:
1122 case REG_TYPE_ZN:
1123 case REG_TYPE_ZA:
1124 case REG_TYPE_ZAT:
1125 case REG_TYPE_ZATH:
1126 case REG_TYPE_ZATV:
1127 return ch == '.';
1128
1129 case REG_TYPE_PN:
1130 return ch == '.' || ch == '/';
1131
1132 default:
1133 return false;
1134 }
1135 }
1136
1137 /* Parse an index expression at *STR, storing it in *IMM on success. */
1138
1139 static bool
1140 parse_index_expression (char **str, int64_t *imm)
1141 {
1142 expressionS exp;
1143
1144 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1145 if (exp.X_op != O_constant)
1146 {
1147 first_error (_("constant expression required"));
1148 return false;
1149 }
1150 *imm = exp.X_add_number;
1151 return true;
1152 }
1153
1154 /* Parse a register of the type TYPE.
1155
1156 Return null if the string pointed to by *CCP is not a valid register
1157 name or the parsed register is not of TYPE.
1158
1159 Otherwise return the register, and optionally return the register
1160 shape and element index information in *TYPEINFO.
1161
1162 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1163
1164 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1165 register index.
1166
1167 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1168 an operand that we can be confident that it is a good match. */
1169
1170 #define PTR_IN_REGLIST (1U << 0)
1171 #define PTR_FULL_REG (1U << 1)
1172 #define PTR_GOOD_MATCH (1U << 2)
1173
1174 static const reg_entry *
1175 parse_typed_reg (char **ccp, aarch64_reg_type type,
1176 struct vector_type_el *typeinfo, unsigned int flags)
1177 {
1178 char *str = *ccp;
1179 bool isalpha = ISALPHA (*str);
1180 const reg_entry *reg = parse_reg (&str);
1181 struct vector_type_el atype;
1182 struct vector_type_el parsetype;
1183 bool is_typed_vecreg = false;
1184 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1185
1186 atype.defined = 0;
1187 atype.type = NT_invtype;
1188 atype.width = -1;
1189 atype.element_size = 0;
1190 atype.index = 0;
1191
1192 if (reg == NULL)
1193 {
1194 if (typeinfo)
1195 *typeinfo = atype;
1196 if (!isalpha && (flags & PTR_IN_REGLIST))
1197 set_fatal_syntax_error (_("syntax error in register list"));
1198 else if (flags & PTR_GOOD_MATCH)
1199 set_fatal_syntax_error (NULL);
1200 else
1201 set_expected_reg_error (type, reg, err_flags);
1202 return NULL;
1203 }
1204
1205 if (! aarch64_check_reg_type (reg, type))
1206 {
1207 DEBUG_TRACE ("reg type check failed");
1208 if (flags & PTR_GOOD_MATCH)
1209 set_fatal_syntax_error (NULL);
1210 else
1211 set_expected_reg_error (type, reg, err_flags);
1212 return NULL;
1213 }
1214 type = reg->type;
1215
1216 if (aarch64_valid_suffix_char_p (reg->type, *str))
1217 {
1218 if (*str == '.')
1219 {
1220 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1221 return NULL;
1222 if ((reg->type == REG_TYPE_ZAT
1223 || reg->type == REG_TYPE_ZATH
1224 || reg->type == REG_TYPE_ZATV)
1225 && reg->number * 8 >= parsetype.element_size)
1226 {
1227 set_syntax_error (_("ZA tile number out of range"));
1228 return NULL;
1229 }
1230 }
1231 else
1232 {
1233 if (!parse_predication_for_operand (&parsetype, &str))
1234 return NULL;
1235 }
1236
1237 /* Register if of the form Vn.[bhsdq]. */
1238 is_typed_vecreg = true;
1239
1240 if (type != REG_TYPE_VN)
1241 {
1242 /* The width is always variable; we don't allow an integer width
1243 to be specified. */
1244 gas_assert (parsetype.width == 0);
1245 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1246 }
1247 else if (parsetype.width == 0)
1248 /* Expect index. In the new scheme we cannot have
1249 Vn.[bhsdq] represent a scalar. Therefore any
1250 Vn.[bhsdq] should have an index following it.
1251 Except in reglists of course. */
1252 atype.defined |= NTA_HASINDEX;
1253 else
1254 atype.defined |= NTA_HASTYPE;
1255
1256 atype.type = parsetype.type;
1257 atype.width = parsetype.width;
1258 }
1259
1260 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1261 {
1262 /* Reject Sn[index] syntax. */
1263 if (!is_typed_vecreg)
1264 {
1265 first_error (_("this type of register can't be indexed"));
1266 return NULL;
1267 }
1268
1269 if (flags & PTR_IN_REGLIST)
1270 {
1271 first_error (_("index not allowed inside register list"));
1272 return NULL;
1273 }
1274
1275 atype.defined |= NTA_HASINDEX;
1276
1277 if (!parse_index_expression (&str, &atype.index))
1278 return NULL;
1279
1280 if (! skip_past_char (&str, ']'))
1281 return NULL;
1282 }
1283 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1284 {
1285 /* Indexed vector register expected. */
1286 first_error (_("indexed vector register expected"));
1287 return NULL;
1288 }
1289
1290 /* A vector reg Vn should be typed or indexed. */
1291 if (type == REG_TYPE_VN && atype.defined == 0)
1292 {
1293 first_error (_("invalid use of vector register"));
1294 }
1295
1296 if (typeinfo)
1297 *typeinfo = atype;
1298
1299 *ccp = str;
1300
1301 return reg;
1302 }
1303
1304 /* Parse register.
1305
1306 Return the register on success; return null otherwise.
1307
1308 If this is a NEON vector register with additional type information, fill
1309 in the struct pointed to by VECTYPE (if non-NULL).
1310
1311 This parser does not handle register lists. */
1312
1313 static const reg_entry *
1314 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1315 struct vector_type_el *vectype)
1316 {
1317 return parse_typed_reg (ccp, type, vectype, 0);
1318 }
1319
1320 static inline bool
1321 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1322 {
1323 return (e1.type == e2.type
1324 && e1.defined == e2.defined
1325 && e1.width == e2.width
1326 && e1.element_size == e2.element_size
1327 && e1.index == e2.index);
1328 }
1329
1330 /* This function parses a list of vector registers of type TYPE.
1331 On success, it returns the parsed register list information in the
1332 following encoded format:
1333
1334 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1335 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1336
1337 The information of the register shape and/or index is returned in
1338 *VECTYPE.
1339
1340 It returns PARSE_FAIL if the register list is invalid.
1341
1342 The list contains one to four registers.
1343 Each register can be one of:
1344 <Vt>.<T>[<index>]
1345 <Vt>.<T>
1346 All <T> should be identical.
1347 All <index> should be identical.
1348 There are restrictions on <Vt> numbers which are checked later
1349 (by reg_list_valid_p). */
1350
1351 static int
1352 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1353 struct vector_type_el *vectype)
1354 {
1355 char *str = *ccp;
1356 int nb_regs;
1357 struct vector_type_el typeinfo, typeinfo_first;
1358 int val, val_range;
1359 int in_range;
1360 int ret_val;
1361 int i;
1362 bool error = false;
1363 bool expect_index = false;
1364 unsigned int ptr_flags = PTR_IN_REGLIST;
1365
1366 if (*str != '{')
1367 {
1368 set_expected_reglist_error (type, parse_reg (&str));
1369 return PARSE_FAIL;
1370 }
1371 str++;
1372
1373 nb_regs = 0;
1374 typeinfo_first.defined = 0;
1375 typeinfo_first.type = NT_invtype;
1376 typeinfo_first.width = -1;
1377 typeinfo_first.element_size = 0;
1378 typeinfo_first.index = 0;
1379 ret_val = 0;
1380 val = -1;
1381 val_range = -1;
1382 in_range = 0;
1383 do
1384 {
1385 if (in_range)
1386 {
1387 str++; /* skip over '-' */
1388 val_range = val;
1389 }
1390 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1391 ptr_flags);
1392 if (!reg)
1393 {
1394 set_first_syntax_error (_("invalid vector register in list"));
1395 error = true;
1396 continue;
1397 }
1398 val = reg->number;
1399 /* reject [bhsd]n */
1400 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1401 {
1402 set_first_syntax_error (_("invalid scalar register in list"));
1403 error = true;
1404 continue;
1405 }
1406
1407 if (typeinfo.defined & NTA_HASINDEX)
1408 expect_index = true;
1409
1410 if (in_range)
1411 {
1412 if (val < val_range)
1413 {
1414 set_first_syntax_error
1415 (_("invalid range in vector register list"));
1416 error = true;
1417 }
1418 val_range++;
1419 }
1420 else
1421 {
1422 val_range = val;
1423 if (nb_regs == 0)
1424 typeinfo_first = typeinfo;
1425 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1426 {
1427 set_first_syntax_error
1428 (_("type mismatch in vector register list"));
1429 error = true;
1430 }
1431 }
1432 if (! error)
1433 for (i = val_range; i <= val; i++)
1434 {
1435 ret_val |= i << (5 * nb_regs);
1436 nb_regs++;
1437 }
1438 in_range = 0;
1439 ptr_flags |= PTR_GOOD_MATCH;
1440 }
1441 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1442
1443 skip_whitespace (str);
1444 if (*str != '}')
1445 {
1446 set_first_syntax_error (_("end of vector register list not found"));
1447 error = true;
1448 }
1449 str++;
1450
1451 skip_whitespace (str);
1452
1453 if (expect_index)
1454 {
1455 if (skip_past_char (&str, '['))
1456 {
1457 if (!parse_index_expression (&str, &typeinfo_first.index))
1458 error = true;
1459 if (! skip_past_char (&str, ']'))
1460 error = true;
1461 }
1462 else
1463 {
1464 set_first_syntax_error (_("expected index"));
1465 error = true;
1466 }
1467 }
1468
1469 if (nb_regs > 4)
1470 {
1471 set_first_syntax_error (_("too many registers in vector register list"));
1472 error = true;
1473 }
1474 else if (nb_regs == 0)
1475 {
1476 set_first_syntax_error (_("empty vector register list"));
1477 error = true;
1478 }
1479
1480 *ccp = str;
1481 if (! error)
1482 *vectype = typeinfo_first;
1483
1484 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1485 }
1486
1487 /* Directives: register aliases. */
1488
1489 static reg_entry *
1490 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1491 {
1492 reg_entry *new;
1493 const char *name;
1494
1495 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1496 {
1497 if (new->builtin)
1498 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1499 str);
1500
1501 /* Only warn about a redefinition if it's not defined as the
1502 same register. */
1503 else if (new->number != number || new->type != type)
1504 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1505
1506 return NULL;
1507 }
1508
1509 name = xstrdup (str);
1510 new = XNEW (reg_entry);
1511
1512 new->name = name;
1513 new->number = number;
1514 new->type = type;
1515 new->builtin = false;
1516
1517 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1518
1519 return new;
1520 }
1521
1522 /* Look for the .req directive. This is of the form:
1523
1524 new_register_name .req existing_register_name
1525
1526 If we find one, or if it looks sufficiently like one that we want to
1527 handle any error here, return TRUE. Otherwise return FALSE. */
1528
1529 static bool
1530 create_register_alias (char *newname, char *p)
1531 {
1532 const reg_entry *old;
1533 char *oldname, *nbuf;
1534 size_t nlen;
1535
1536 /* The input scrubber ensures that whitespace after the mnemonic is
1537 collapsed to single spaces. */
1538 oldname = p;
1539 if (!startswith (oldname, " .req "))
1540 return false;
1541
1542 oldname += 6;
1543 if (*oldname == '\0')
1544 return false;
1545
1546 old = str_hash_find (aarch64_reg_hsh, oldname);
1547 if (!old)
1548 {
1549 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1550 return true;
1551 }
1552
1553 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1554 the desired alias name, and p points to its end. If not, then
1555 the desired alias name is in the global original_case_string. */
1556 #ifdef TC_CASE_SENSITIVE
1557 nlen = p - newname;
1558 #else
1559 newname = original_case_string;
1560 nlen = strlen (newname);
1561 #endif
1562
1563 nbuf = xmemdup0 (newname, nlen);
1564
1565 /* Create aliases under the new name as stated; an all-lowercase
1566 version of the new name; and an all-uppercase version of the new
1567 name. */
1568 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1569 {
1570 for (p = nbuf; *p; p++)
1571 *p = TOUPPER (*p);
1572
1573 if (strncmp (nbuf, newname, nlen))
1574 {
1575 /* If this attempt to create an additional alias fails, do not bother
1576 trying to create the all-lower case alias. We will fail and issue
1577 a second, duplicate error message. This situation arises when the
1578 programmer does something like:
1579 foo .req r0
1580 Foo .req r1
1581 The second .req creates the "Foo" alias but then fails to create
1582 the artificial FOO alias because it has already been created by the
1583 first .req. */
1584 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1585 {
1586 free (nbuf);
1587 return true;
1588 }
1589 }
1590
1591 for (p = nbuf; *p; p++)
1592 *p = TOLOWER (*p);
1593
1594 if (strncmp (nbuf, newname, nlen))
1595 insert_reg_alias (nbuf, old->number, old->type);
1596 }
1597
1598 free (nbuf);
1599 return true;
1600 }
1601
1602 /* Should never be called, as .req goes between the alias and the
1603 register name, not at the beginning of the line. */
1604 static void
1605 s_req (int a ATTRIBUTE_UNUSED)
1606 {
1607 as_bad (_("invalid syntax for .req directive"));
1608 }
1609
1610 /* The .unreq directive deletes an alias which was previously defined
1611 by .req. For example:
1612
1613 my_alias .req r11
1614 .unreq my_alias */
1615
1616 static void
1617 s_unreq (int a ATTRIBUTE_UNUSED)
1618 {
1619 char *name;
1620 char saved_char;
1621
1622 name = input_line_pointer;
1623 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1624 saved_char = *input_line_pointer;
1625 *input_line_pointer = 0;
1626
1627 if (!*name)
1628 as_bad (_("invalid syntax for .unreq directive"));
1629 else
1630 {
1631 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1632
1633 if (!reg)
1634 as_bad (_("unknown register alias '%s'"), name);
1635 else if (reg->builtin)
1636 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1637 name);
1638 else
1639 {
1640 char *p;
1641 char *nbuf;
1642
1643 str_hash_delete (aarch64_reg_hsh, name);
1644 free ((char *) reg->name);
1645 free (reg);
1646
1647 /* Also locate the all upper case and all lower case versions.
1648 Do not complain if we cannot find one or the other as it
1649 was probably deleted above. */
1650
1651 nbuf = strdup (name);
1652 for (p = nbuf; *p; p++)
1653 *p = TOUPPER (*p);
1654 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1655 if (reg)
1656 {
1657 str_hash_delete (aarch64_reg_hsh, nbuf);
1658 free ((char *) reg->name);
1659 free (reg);
1660 }
1661
1662 for (p = nbuf; *p; p++)
1663 *p = TOLOWER (*p);
1664 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1665 if (reg)
1666 {
1667 str_hash_delete (aarch64_reg_hsh, nbuf);
1668 free ((char *) reg->name);
1669 free (reg);
1670 }
1671
1672 free (nbuf);
1673 }
1674 }
1675
1676 *input_line_pointer = saved_char;
1677 demand_empty_rest_of_line ();
1678 }
1679
1680 /* Directives: Instruction set selection. */
1681
1682 #if defined OBJ_ELF || defined OBJ_COFF
1683 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1684 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1685 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1686 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1687
1688 /* Create a new mapping symbol for the transition to STATE. */
1689
1690 static void
1691 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1692 {
1693 symbolS *symbolP;
1694 const char *symname;
1695 int type;
1696
1697 switch (state)
1698 {
1699 case MAP_DATA:
1700 symname = "$d";
1701 type = BSF_NO_FLAGS;
1702 break;
1703 case MAP_INSN:
1704 symname = "$x";
1705 type = BSF_NO_FLAGS;
1706 break;
1707 default:
1708 abort ();
1709 }
1710
1711 symbolP = symbol_new (symname, now_seg, frag, value);
1712 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1713
1714 /* Save the mapping symbols for future reference. Also check that
1715 we do not place two mapping symbols at the same offset within a
1716 frag. We'll handle overlap between frags in
1717 check_mapping_symbols.
1718
1719 If .fill or other data filling directive generates zero sized data,
1720 the mapping symbol for the following code will have the same value
1721 as the one generated for the data filling directive. In this case,
1722 we replace the old symbol with the new one at the same address. */
1723 if (value == 0)
1724 {
1725 if (frag->tc_frag_data.first_map != NULL)
1726 {
1727 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1728 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1729 &symbol_lastP);
1730 }
1731 frag->tc_frag_data.first_map = symbolP;
1732 }
1733 if (frag->tc_frag_data.last_map != NULL)
1734 {
1735 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1736 S_GET_VALUE (symbolP));
1737 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1738 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1739 &symbol_lastP);
1740 }
1741 frag->tc_frag_data.last_map = symbolP;
1742 }
1743
1744 /* We must sometimes convert a region marked as code to data during
1745 code alignment, if an odd number of bytes have to be padded. The
1746 code mapping symbol is pushed to an aligned address. */
1747
1748 static void
1749 insert_data_mapping_symbol (enum mstate state,
1750 valueT value, fragS * frag, offsetT bytes)
1751 {
1752 /* If there was already a mapping symbol, remove it. */
1753 if (frag->tc_frag_data.last_map != NULL
1754 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1755 frag->fr_address + value)
1756 {
1757 symbolS *symp = frag->tc_frag_data.last_map;
1758
1759 if (value == 0)
1760 {
1761 know (frag->tc_frag_data.first_map == symp);
1762 frag->tc_frag_data.first_map = NULL;
1763 }
1764 frag->tc_frag_data.last_map = NULL;
1765 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1766 }
1767
1768 make_mapping_symbol (MAP_DATA, value, frag);
1769 make_mapping_symbol (state, value + bytes, frag);
1770 }
1771
1772 static void mapping_state_2 (enum mstate state, int max_chars);
1773
1774 /* Set the mapping state to STATE. Only call this when about to
1775 emit some STATE bytes to the file. */
1776
1777 void
1778 mapping_state (enum mstate state)
1779 {
1780 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1781
1782 if (state == MAP_INSN)
1783 /* AArch64 instructions require 4-byte alignment. When emitting
1784 instructions into any section, record the appropriate section
1785 alignment. */
1786 record_alignment (now_seg, 2);
1787
1788 if (mapstate == state)
1789 /* The mapping symbol has already been emitted.
1790 There is nothing else to do. */
1791 return;
1792
1793 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1794 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1795 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1796 evaluated later in the next else. */
1797 return;
1798 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1799 {
1800 /* Only add the symbol if the offset is > 0:
1801 if we're at the first frag, check it's size > 0;
1802 if we're not at the first frag, then for sure
1803 the offset is > 0. */
1804 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1805 const int add_symbol = (frag_now != frag_first)
1806 || (frag_now_fix () > 0);
1807
1808 if (add_symbol)
1809 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1810 }
1811 #undef TRANSITION
1812
1813 mapping_state_2 (state, 0);
1814 }
1815
1816 /* Same as mapping_state, but MAX_CHARS bytes have already been
1817 allocated. Put the mapping symbol that far back. */
1818
1819 static void
1820 mapping_state_2 (enum mstate state, int max_chars)
1821 {
1822 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1823
1824 if (!SEG_NORMAL (now_seg))
1825 return;
1826
1827 if (mapstate == state)
1828 /* The mapping symbol has already been emitted.
1829 There is nothing else to do. */
1830 return;
1831
1832 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1833 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1834 }
1835 #else
1836 #define mapping_state(x) /* nothing */
1837 #define mapping_state_2(x, y) /* nothing */
1838 #endif
1839
1840 /* Directives: sectioning and alignment. */
1841
1842 static void
1843 s_bss (int ignore ATTRIBUTE_UNUSED)
1844 {
1845 /* We don't support putting frags in the BSS segment, we fake it by
1846 marking in_bss, then looking at s_skip for clues. */
1847 subseg_set (bss_section, 0);
1848 demand_empty_rest_of_line ();
1849 mapping_state (MAP_DATA);
1850 }
1851
1852 static void
1853 s_even (int ignore ATTRIBUTE_UNUSED)
1854 {
1855 /* Never make frag if expect extra pass. */
1856 if (!need_pass_2)
1857 frag_align (1, 0, 0);
1858
1859 record_alignment (now_seg, 1);
1860
1861 demand_empty_rest_of_line ();
1862 }
1863
1864 /* Directives: Literal pools. */
1865
1866 static literal_pool *
1867 find_literal_pool (int size)
1868 {
1869 literal_pool *pool;
1870
1871 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1872 {
1873 if (pool->section == now_seg
1874 && pool->sub_section == now_subseg && pool->size == size)
1875 break;
1876 }
1877
1878 return pool;
1879 }
1880
1881 static literal_pool *
1882 find_or_make_literal_pool (int size)
1883 {
1884 /* Next literal pool ID number. */
1885 static unsigned int latest_pool_num = 1;
1886 literal_pool *pool;
1887
1888 pool = find_literal_pool (size);
1889
1890 if (pool == NULL)
1891 {
1892 /* Create a new pool. */
1893 pool = XNEW (literal_pool);
1894 if (!pool)
1895 return NULL;
1896
1897 /* Currently we always put the literal pool in the current text
1898 section. If we were generating "small" model code where we
1899 knew that all code and initialised data was within 1MB then
1900 we could output literals to mergeable, read-only data
1901 sections. */
1902
1903 pool->next_free_entry = 0;
1904 pool->section = now_seg;
1905 pool->sub_section = now_subseg;
1906 pool->size = size;
1907 pool->next = list_of_pools;
1908 pool->symbol = NULL;
1909
1910 /* Add it to the list. */
1911 list_of_pools = pool;
1912 }
1913
1914 /* New pools, and emptied pools, will have a NULL symbol. */
1915 if (pool->symbol == NULL)
1916 {
1917 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1918 &zero_address_frag, 0);
1919 pool->id = latest_pool_num++;
1920 }
1921
1922 /* Done. */
1923 return pool;
1924 }
1925
1926 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1927 Return TRUE on success, otherwise return FALSE. */
1928 static bool
1929 add_to_lit_pool (expressionS *exp, int size)
1930 {
1931 literal_pool *pool;
1932 unsigned int entry;
1933
1934 pool = find_or_make_literal_pool (size);
1935
1936 /* Check if this literal value is already in the pool. */
1937 for (entry = 0; entry < pool->next_free_entry; entry++)
1938 {
1939 expressionS * litexp = & pool->literals[entry].exp;
1940
1941 if ((litexp->X_op == exp->X_op)
1942 && (exp->X_op == O_constant)
1943 && (litexp->X_add_number == exp->X_add_number)
1944 && (litexp->X_unsigned == exp->X_unsigned))
1945 break;
1946
1947 if ((litexp->X_op == exp->X_op)
1948 && (exp->X_op == O_symbol)
1949 && (litexp->X_add_number == exp->X_add_number)
1950 && (litexp->X_add_symbol == exp->X_add_symbol)
1951 && (litexp->X_op_symbol == exp->X_op_symbol))
1952 break;
1953 }
1954
1955 /* Do we need to create a new entry? */
1956 if (entry == pool->next_free_entry)
1957 {
1958 if (entry >= MAX_LITERAL_POOL_SIZE)
1959 {
1960 set_syntax_error (_("literal pool overflow"));
1961 return false;
1962 }
1963
1964 pool->literals[entry].exp = *exp;
1965 pool->next_free_entry += 1;
1966 if (exp->X_op == O_big)
1967 {
1968 /* PR 16688: Bignums are held in a single global array. We must
1969 copy and preserve that value now, before it is overwritten. */
1970 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1971 exp->X_add_number);
1972 memcpy (pool->literals[entry].bignum, generic_bignum,
1973 CHARS_PER_LITTLENUM * exp->X_add_number);
1974 }
1975 else
1976 pool->literals[entry].bignum = NULL;
1977 }
1978
1979 exp->X_op = O_symbol;
1980 exp->X_add_number = ((int) entry) * size;
1981 exp->X_add_symbol = pool->symbol;
1982
1983 return true;
1984 }
1985
1986 /* Can't use symbol_new here, so have to create a symbol and then at
1987 a later date assign it a value. That's what these functions do. */
1988
1989 static void
1990 symbol_locate (symbolS * symbolP,
1991 const char *name,/* It is copied, the caller can modify. */
1992 segT segment, /* Segment identifier (SEG_<something>). */
1993 valueT valu, /* Symbol value. */
1994 fragS * frag) /* Associated fragment. */
1995 {
1996 size_t name_length;
1997 char *preserved_copy_of_name;
1998
1999 name_length = strlen (name) + 1; /* +1 for \0. */
2000 obstack_grow (&notes, name, name_length);
2001 preserved_copy_of_name = obstack_finish (&notes);
2002
2003 #ifdef tc_canonicalize_symbol_name
2004 preserved_copy_of_name =
2005 tc_canonicalize_symbol_name (preserved_copy_of_name);
2006 #endif
2007
2008 S_SET_NAME (symbolP, preserved_copy_of_name);
2009
2010 S_SET_SEGMENT (symbolP, segment);
2011 S_SET_VALUE (symbolP, valu);
2012 symbol_clear_list_pointers (symbolP);
2013
2014 symbol_set_frag (symbolP, frag);
2015
2016 /* Link to end of symbol chain. */
2017 {
2018 extern int symbol_table_frozen;
2019
2020 if (symbol_table_frozen)
2021 abort ();
2022 }
2023
2024 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2025
2026 obj_symbol_new_hook (symbolP);
2027
2028 #ifdef tc_symbol_new_hook
2029 tc_symbol_new_hook (symbolP);
2030 #endif
2031
2032 #ifdef DEBUG_SYMS
2033 verify_symbol_chain (symbol_rootP, symbol_lastP);
2034 #endif /* DEBUG_SYMS */
2035 }
2036
2037
2038 static void
2039 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2040 {
2041 unsigned int entry;
2042 literal_pool *pool;
2043 char sym_name[20];
2044 int align;
2045
2046 for (align = 2; align <= 4; align++)
2047 {
2048 int size = 1 << align;
2049
2050 pool = find_literal_pool (size);
2051 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2052 continue;
2053
2054 /* Align pool as you have word accesses.
2055 Only make a frag if we have to. */
2056 if (!need_pass_2)
2057 frag_align (align, 0, 0);
2058
2059 mapping_state (MAP_DATA);
2060
2061 record_alignment (now_seg, align);
2062
2063 sprintf (sym_name, "$$lit_\002%x", pool->id);
2064
2065 symbol_locate (pool->symbol, sym_name, now_seg,
2066 (valueT) frag_now_fix (), frag_now);
2067 symbol_table_insert (pool->symbol);
2068
2069 for (entry = 0; entry < pool->next_free_entry; entry++)
2070 {
2071 expressionS * exp = & pool->literals[entry].exp;
2072
2073 if (exp->X_op == O_big)
2074 {
2075 /* PR 16688: Restore the global bignum value. */
2076 gas_assert (pool->literals[entry].bignum != NULL);
2077 memcpy (generic_bignum, pool->literals[entry].bignum,
2078 CHARS_PER_LITTLENUM * exp->X_add_number);
2079 }
2080
2081 /* First output the expression in the instruction to the pool. */
2082 emit_expr (exp, size); /* .word|.xword */
2083
2084 if (exp->X_op == O_big)
2085 {
2086 free (pool->literals[entry].bignum);
2087 pool->literals[entry].bignum = NULL;
2088 }
2089 }
2090
2091 /* Mark the pool as empty. */
2092 pool->next_free_entry = 0;
2093 pool->symbol = NULL;
2094 }
2095 }
2096
2097 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2098 /* Forward declarations for functions below, in the MD interface
2099 section. */
2100 static struct reloc_table_entry * find_reloc_table_entry (char **);
2101
2102 /* Directives: Data. */
2103 /* N.B. the support for relocation suffix in this directive needs to be
2104 implemented properly. */
2105
2106 static void
2107 s_aarch64_cons (int nbytes)
2108 {
2109 expressionS exp;
2110
2111 #ifdef md_flush_pending_output
2112 md_flush_pending_output ();
2113 #endif
2114
2115 if (is_it_end_of_statement ())
2116 {
2117 demand_empty_rest_of_line ();
2118 return;
2119 }
2120
2121 #ifdef md_cons_align
2122 md_cons_align (nbytes);
2123 #endif
2124
2125 mapping_state (MAP_DATA);
2126 do
2127 {
2128 struct reloc_table_entry *reloc;
2129
2130 expression (&exp);
2131
2132 if (exp.X_op != O_symbol)
2133 emit_expr (&exp, (unsigned int) nbytes);
2134 else
2135 {
2136 skip_past_char (&input_line_pointer, '#');
2137 if (skip_past_char (&input_line_pointer, ':'))
2138 {
2139 reloc = find_reloc_table_entry (&input_line_pointer);
2140 if (reloc == NULL)
2141 as_bad (_("unrecognized relocation suffix"));
2142 else
2143 as_bad (_("unimplemented relocation suffix"));
2144 ignore_rest_of_line ();
2145 return;
2146 }
2147 else
2148 emit_expr (&exp, (unsigned int) nbytes);
2149 }
2150 }
2151 while (*input_line_pointer++ == ',');
2152
2153 /* Put terminator back into stream. */
2154 input_line_pointer--;
2155 demand_empty_rest_of_line ();
2156 }
2157 #endif
2158
2159 #ifdef OBJ_ELF
2160 /* Forward declarations for functions below, in the MD interface
2161 section. */
2162 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2163
2164 /* Mark symbol that it follows a variant PCS convention. */
2165
2166 static void
2167 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2168 {
2169 char *name;
2170 char c;
2171 symbolS *sym;
2172 asymbol *bfdsym;
2173 elf_symbol_type *elfsym;
2174
2175 c = get_symbol_name (&name);
2176 if (!*name)
2177 as_bad (_("Missing symbol name in directive"));
2178 sym = symbol_find_or_make (name);
2179 restore_line_pointer (c);
2180 demand_empty_rest_of_line ();
2181 bfdsym = symbol_get_bfdsym (sym);
2182 elfsym = elf_symbol_from (bfdsym);
2183 gas_assert (elfsym);
2184 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2185 }
2186 #endif /* OBJ_ELF */
2187
2188 /* Output a 32-bit word, but mark as an instruction. */
2189
2190 static void
2191 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2192 {
2193 expressionS exp;
2194 unsigned n = 0;
2195
2196 #ifdef md_flush_pending_output
2197 md_flush_pending_output ();
2198 #endif
2199
2200 if (is_it_end_of_statement ())
2201 {
2202 demand_empty_rest_of_line ();
2203 return;
2204 }
2205
2206 /* Sections are assumed to start aligned. In executable section, there is no
2207 MAP_DATA symbol pending. So we only align the address during
2208 MAP_DATA --> MAP_INSN transition.
2209 For other sections, this is not guaranteed. */
2210 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2211 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2212 frag_align_code (2, 0);
2213
2214 #ifdef OBJ_ELF
2215 mapping_state (MAP_INSN);
2216 #endif
2217
2218 do
2219 {
2220 expression (&exp);
2221 if (exp.X_op != O_constant)
2222 {
2223 as_bad (_("constant expression required"));
2224 ignore_rest_of_line ();
2225 return;
2226 }
2227
2228 if (target_big_endian)
2229 {
2230 unsigned int val = exp.X_add_number;
2231 exp.X_add_number = SWAP_32 (val);
2232 }
2233 emit_expr (&exp, INSN_SIZE);
2234 ++n;
2235 }
2236 while (*input_line_pointer++ == ',');
2237
2238 dwarf2_emit_insn (n * INSN_SIZE);
2239
2240 /* Put terminator back into stream. */
2241 input_line_pointer--;
2242 demand_empty_rest_of_line ();
2243 }
2244
2245 static void
2246 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2247 {
2248 demand_empty_rest_of_line ();
2249 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2250 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2251 }
2252
2253 #ifdef OBJ_ELF
2254 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2255
2256 static void
2257 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2258 {
2259 expressionS exp;
2260
2261 expression (&exp);
2262 frag_grow (4);
2263 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2264 BFD_RELOC_AARCH64_TLSDESC_ADD);
2265
2266 demand_empty_rest_of_line ();
2267 }
2268
2269 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2270
2271 static void
2272 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2273 {
2274 expressionS exp;
2275
2276 /* Since we're just labelling the code, there's no need to define a
2277 mapping symbol. */
2278 expression (&exp);
2279 /* Make sure there is enough room in this frag for the following
2280 blr. This trick only works if the blr follows immediately after
2281 the .tlsdesc directive. */
2282 frag_grow (4);
2283 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2284 BFD_RELOC_AARCH64_TLSDESC_CALL);
2285
2286 demand_empty_rest_of_line ();
2287 }
2288
2289 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2290
2291 static void
2292 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2293 {
2294 expressionS exp;
2295
2296 expression (&exp);
2297 frag_grow (4);
2298 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2299 BFD_RELOC_AARCH64_TLSDESC_LDR);
2300
2301 demand_empty_rest_of_line ();
2302 }
2303 #endif /* OBJ_ELF */
2304
2305 #ifdef TE_PE
2306 static void
2307 s_secrel (int dummy ATTRIBUTE_UNUSED)
2308 {
2309 expressionS exp;
2310
2311 do
2312 {
2313 expression (&exp);
2314 if (exp.X_op == O_symbol)
2315 exp.X_op = O_secrel;
2316
2317 emit_expr (&exp, 4);
2318 }
2319 while (*input_line_pointer++ == ',');
2320
2321 input_line_pointer--;
2322 demand_empty_rest_of_line ();
2323 }
2324
2325 void
2326 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2327 {
2328 expressionS exp;
2329
2330 exp.X_op = O_secrel;
2331 exp.X_add_symbol = symbol;
2332 exp.X_add_number = 0;
2333 emit_expr (&exp, size);
2334 }
2335
2336 static void
2337 s_secidx (int dummy ATTRIBUTE_UNUSED)
2338 {
2339 expressionS exp;
2340
2341 do
2342 {
2343 expression (&exp);
2344 if (exp.X_op == O_symbol)
2345 exp.X_op = O_secidx;
2346
2347 emit_expr (&exp, 2);
2348 }
2349 while (*input_line_pointer++ == ',');
2350
2351 input_line_pointer--;
2352 demand_empty_rest_of_line ();
2353 }
2354 #endif /* TE_PE */
2355
2356 static void s_aarch64_arch (int);
2357 static void s_aarch64_cpu (int);
2358 static void s_aarch64_arch_extension (int);
2359
2360 /* This table describes all the machine specific pseudo-ops the assembler
2361 has to support. The fields are:
2362 pseudo-op name without dot
2363 function to call to execute this pseudo-op
2364 Integer arg to pass to the function. */
2365
2366 const pseudo_typeS md_pseudo_table[] = {
2367 /* Never called because '.req' does not start a line. */
2368 {"req", s_req, 0},
2369 {"unreq", s_unreq, 0},
2370 {"bss", s_bss, 0},
2371 {"even", s_even, 0},
2372 {"ltorg", s_ltorg, 0},
2373 {"pool", s_ltorg, 0},
2374 {"cpu", s_aarch64_cpu, 0},
2375 {"arch", s_aarch64_arch, 0},
2376 {"arch_extension", s_aarch64_arch_extension, 0},
2377 {"inst", s_aarch64_inst, 0},
2378 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2379 #ifdef OBJ_ELF
2380 {"tlsdescadd", s_tlsdescadd, 0},
2381 {"tlsdesccall", s_tlsdesccall, 0},
2382 {"tlsdescldr", s_tlsdescldr, 0},
2383 {"variant_pcs", s_variant_pcs, 0},
2384 #endif
2385 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2386 {"word", s_aarch64_cons, 4},
2387 {"long", s_aarch64_cons, 4},
2388 {"xword", s_aarch64_cons, 8},
2389 {"dword", s_aarch64_cons, 8},
2390 #endif
2391 #ifdef TE_PE
2392 {"secrel32", s_secrel, 0},
2393 {"secidx", s_secidx, 0},
2394 #endif
2395 {"float16", float_cons, 'h'},
2396 {"bfloat16", float_cons, 'b'},
2397 {0, 0, 0}
2398 };
2399 \f
2400
2401 /* Check whether STR points to a register name followed by a comma or the
2402 end of line; REG_TYPE indicates which register types are checked
2403 against. Return TRUE if STR is such a register name; otherwise return
2404 FALSE. The function does not intend to produce any diagnostics, but since
2405 the register parser aarch64_reg_parse, which is called by this function,
2406 does produce diagnostics, we call clear_error to clear any diagnostics
2407 that may be generated by aarch64_reg_parse.
2408 Also, the function returns FALSE directly if there is any user error
2409 present at the function entry. This prevents the existing diagnostics
2410 state from being spoiled.
2411 The function currently serves parse_constant_immediate and
2412 parse_big_immediate only. */
2413 static bool
2414 reg_name_p (char *str, aarch64_reg_type reg_type)
2415 {
2416 const reg_entry *reg;
2417
2418 /* Prevent the diagnostics state from being spoiled. */
2419 if (error_p ())
2420 return false;
2421
2422 reg = aarch64_reg_parse (&str, reg_type, NULL);
2423
2424 /* Clear the parsing error that may be set by the reg parser. */
2425 clear_error ();
2426
2427 if (!reg)
2428 return false;
2429
2430 skip_whitespace (str);
2431 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2432 return true;
2433
2434 return false;
2435 }
2436
2437 /* Parser functions used exclusively in instruction operands. */
2438
2439 /* Parse an immediate expression which may not be constant.
2440
2441 To prevent the expression parser from pushing a register name
2442 into the symbol table as an undefined symbol, firstly a check is
2443 done to find out whether STR is a register of type REG_TYPE followed
2444 by a comma or the end of line. Return FALSE if STR is such a string. */
2445
2446 static bool
2447 parse_immediate_expression (char **str, expressionS *exp,
2448 aarch64_reg_type reg_type)
2449 {
2450 if (reg_name_p (*str, reg_type))
2451 {
2452 set_recoverable_error (_("immediate operand required"));
2453 return false;
2454 }
2455
2456 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2457
2458 if (exp->X_op == O_absent)
2459 {
2460 set_fatal_syntax_error (_("missing immediate expression"));
2461 return false;
2462 }
2463
2464 return true;
2465 }
2466
2467 /* Constant immediate-value read function for use in insn parsing.
2468 STR points to the beginning of the immediate (with the optional
2469 leading #); *VAL receives the value. REG_TYPE says which register
2470 names should be treated as registers rather than as symbolic immediates.
2471
2472 Return TRUE on success; otherwise return FALSE. */
2473
2474 static bool
2475 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2476 {
2477 expressionS exp;
2478
2479 if (! parse_immediate_expression (str, &exp, reg_type))
2480 return false;
2481
2482 if (exp.X_op != O_constant)
2483 {
2484 set_syntax_error (_("constant expression required"));
2485 return false;
2486 }
2487
2488 *val = exp.X_add_number;
2489 return true;
2490 }
2491
2492 static uint32_t
2493 encode_imm_float_bits (uint32_t imm)
2494 {
2495 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2496 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2497 }
2498
2499 /* Return TRUE if the single-precision floating-point value encoded in IMM
2500 can be expressed in the AArch64 8-bit signed floating-point format with
2501 3-bit exponent and normalized 4 bits of precision; in other words, the
2502 floating-point value must be expressable as
2503 (+/-) n / 16 * power (2, r)
2504 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2505
2506 static bool
2507 aarch64_imm_float_p (uint32_t imm)
2508 {
2509 /* If a single-precision floating-point value has the following bit
2510 pattern, it can be expressed in the AArch64 8-bit floating-point
2511 format:
2512
2513 3 32222222 2221111111111
2514 1 09876543 21098765432109876543210
2515 n Eeeeeexx xxxx0000000000000000000
2516
2517 where n, e and each x are either 0 or 1 independently, with
2518 E == ~ e. */
2519
2520 uint32_t pattern;
2521
2522 /* Prepare the pattern for 'Eeeeee'. */
2523 if (((imm >> 30) & 0x1) == 0)
2524 pattern = 0x3e000000;
2525 else
2526 pattern = 0x40000000;
2527
2528 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2529 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2530 }
2531
2532 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2533 as an IEEE float without any loss of precision. Store the value in
2534 *FPWORD if so. */
2535
2536 static bool
2537 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2538 {
2539 /* If a double-precision floating-point value has the following bit
2540 pattern, it can be expressed in a float:
2541
2542 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2543 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2544 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2545
2546 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2547 if Eeee_eeee != 1111_1111
2548
2549 where n, e, s and S are either 0 or 1 independently and where ~ is the
2550 inverse of E. */
2551
2552 uint32_t pattern;
2553 uint32_t high32 = imm >> 32;
2554 uint32_t low32 = imm;
2555
2556 /* Lower 29 bits need to be 0s. */
2557 if ((imm & 0x1fffffff) != 0)
2558 return false;
2559
2560 /* Prepare the pattern for 'Eeeeeeeee'. */
2561 if (((high32 >> 30) & 0x1) == 0)
2562 pattern = 0x38000000;
2563 else
2564 pattern = 0x40000000;
2565
2566 /* Check E~~~. */
2567 if ((high32 & 0x78000000) != pattern)
2568 return false;
2569
2570 /* Check Eeee_eeee != 1111_1111. */
2571 if ((high32 & 0x7ff00000) == 0x47f00000)
2572 return false;
2573
2574 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2575 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2576 | (low32 >> 29)); /* 3 S bits. */
2577 return true;
2578 }
2579
2580 /* Return true if we should treat OPERAND as a double-precision
2581 floating-point operand rather than a single-precision one. */
2582 static bool
2583 double_precision_operand_p (const aarch64_opnd_info *operand)
2584 {
2585 /* Check for unsuffixed SVE registers, which are allowed
2586 for LDR and STR but not in instructions that require an
2587 immediate. We get better error messages if we arbitrarily
2588 pick one size, parse the immediate normally, and then
2589 report the match failure in the normal way. */
2590 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2591 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2592 }
2593
2594 /* Parse a floating-point immediate. Return TRUE on success and return the
2595 value in *IMMED in the format of IEEE754 single-precision encoding.
2596 *CCP points to the start of the string; DP_P is TRUE when the immediate
2597 is expected to be in double-precision (N.B. this only matters when
2598 hexadecimal representation is involved). REG_TYPE says which register
2599 names should be treated as registers rather than as symbolic immediates.
2600
2601 This routine accepts any IEEE float; it is up to the callers to reject
2602 invalid ones. */
2603
2604 static bool
2605 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2606 aarch64_reg_type reg_type)
2607 {
2608 char *str = *ccp;
2609 char *fpnum;
2610 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2611 int64_t val = 0;
2612 unsigned fpword = 0;
2613 bool hex_p = false;
2614
2615 skip_past_char (&str, '#');
2616
2617 fpnum = str;
2618 skip_whitespace (fpnum);
2619
2620 if (startswith (fpnum, "0x"))
2621 {
2622 /* Support the hexadecimal representation of the IEEE754 encoding.
2623 Double-precision is expected when DP_P is TRUE, otherwise the
2624 representation should be in single-precision. */
2625 if (! parse_constant_immediate (&str, &val, reg_type))
2626 goto invalid_fp;
2627
2628 if (dp_p)
2629 {
2630 if (!can_convert_double_to_float (val, &fpword))
2631 goto invalid_fp;
2632 }
2633 else if ((uint64_t) val > 0xffffffff)
2634 goto invalid_fp;
2635 else
2636 fpword = val;
2637
2638 hex_p = true;
2639 }
2640 else if (reg_name_p (str, reg_type))
2641 {
2642 set_recoverable_error (_("immediate operand required"));
2643 return false;
2644 }
2645
2646 if (! hex_p)
2647 {
2648 int i;
2649
2650 if ((str = atof_ieee (str, 's', words)) == NULL)
2651 goto invalid_fp;
2652
2653 /* Our FP word must be 32 bits (single-precision FP). */
2654 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2655 {
2656 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2657 fpword |= words[i];
2658 }
2659 }
2660
2661 *immed = fpword;
2662 *ccp = str;
2663 return true;
2664
2665 invalid_fp:
2666 set_fatal_syntax_error (_("invalid floating-point constant"));
2667 return false;
2668 }
2669
2670 /* Less-generic immediate-value read function with the possibility of loading
2671 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2672 instructions.
2673
2674 To prevent the expression parser from pushing a register name into the
2675 symbol table as an undefined symbol, a check is firstly done to find
2676 out whether STR is a register of type REG_TYPE followed by a comma or
2677 the end of line. Return FALSE if STR is such a register. */
2678
2679 static bool
2680 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2681 {
2682 char *ptr = *str;
2683
2684 if (reg_name_p (ptr, reg_type))
2685 {
2686 set_syntax_error (_("immediate operand required"));
2687 return false;
2688 }
2689
2690 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2691
2692 if (inst.reloc.exp.X_op == O_constant)
2693 *imm = inst.reloc.exp.X_add_number;
2694
2695 *str = ptr;
2696
2697 return true;
2698 }
2699
2700 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2701 if NEED_LIBOPCODES is non-zero, the fixup will need
2702 assistance from the libopcodes. */
2703
2704 static inline void
2705 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2706 const aarch64_opnd_info *operand,
2707 int need_libopcodes_p)
2708 {
2709 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2710 reloc->opnd = operand->type;
2711 if (need_libopcodes_p)
2712 reloc->need_libopcodes_p = 1;
2713 };
2714
2715 /* Return TRUE if the instruction needs to be fixed up later internally by
2716 the GAS; otherwise return FALSE. */
2717
2718 static inline bool
2719 aarch64_gas_internal_fixup_p (void)
2720 {
2721 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2722 }
2723
2724 /* Assign the immediate value to the relevant field in *OPERAND if
2725 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2726 needs an internal fixup in a later stage.
2727 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2728 IMM.VALUE that may get assigned with the constant. */
2729 static inline void
2730 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2731 aarch64_opnd_info *operand,
2732 int addr_off_p,
2733 int need_libopcodes_p,
2734 int skip_p)
2735 {
2736 if (reloc->exp.X_op == O_constant)
2737 {
2738 if (addr_off_p)
2739 operand->addr.offset.imm = reloc->exp.X_add_number;
2740 else
2741 operand->imm.value = reloc->exp.X_add_number;
2742 reloc->type = BFD_RELOC_UNUSED;
2743 }
2744 else
2745 {
2746 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2747 /* Tell libopcodes to ignore this operand or not. This is helpful
2748 when one of the operands needs to be fixed up later but we need
2749 libopcodes to check the other operands. */
2750 operand->skip = skip_p;
2751 }
2752 }
2753
2754 /* Relocation modifiers. Each entry in the table contains the textual
2755 name for the relocation which may be placed before a symbol used as
2756 a load/store offset, or add immediate. It must be surrounded by a
2757 leading and trailing colon, for example:
2758
2759 ldr x0, [x1, #:rello:varsym]
2760 add x0, x1, #:rello:varsym */
2761
2762 struct reloc_table_entry
2763 {
2764 const char *name;
2765 int pc_rel;
2766 bfd_reloc_code_real_type adr_type;
2767 bfd_reloc_code_real_type adrp_type;
2768 bfd_reloc_code_real_type movw_type;
2769 bfd_reloc_code_real_type add_type;
2770 bfd_reloc_code_real_type ldst_type;
2771 bfd_reloc_code_real_type ld_literal_type;
2772 };
2773
2774 static struct reloc_table_entry reloc_table[] =
2775 {
2776 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2777 {"lo12", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_ADD_LO12,
2782 BFD_RELOC_AARCH64_LDST_LO12,
2783 0},
2784
2785 /* Higher 21 bits of pc-relative page offset: ADRP */
2786 {"pg_hi21", 1,
2787 0, /* adr_type */
2788 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2789 0,
2790 0,
2791 0,
2792 0},
2793
2794 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2795 {"pg_hi21_nc", 1,
2796 0, /* adr_type */
2797 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2798 0,
2799 0,
2800 0,
2801 0},
2802
2803 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2804 {"abs_g0", 0,
2805 0, /* adr_type */
2806 0,
2807 BFD_RELOC_AARCH64_MOVW_G0,
2808 0,
2809 0,
2810 0},
2811
2812 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2813 {"abs_g0_s", 0,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_MOVW_G0_S,
2817 0,
2818 0,
2819 0},
2820
2821 /* Less significant bits 0-15 of address/value: MOVK, no check */
2822 {"abs_g0_nc", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_G0_NC,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2831 {"abs_g1", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_G1,
2835 0,
2836 0,
2837 0},
2838
2839 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2840 {"abs_g1_s", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_MOVW_G1_S,
2844 0,
2845 0,
2846 0},
2847
2848 /* Less significant bits 16-31 of address/value: MOVK, no check */
2849 {"abs_g1_nc", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_MOVW_G1_NC,
2853 0,
2854 0,
2855 0},
2856
2857 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2858 {"abs_g2", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_G2,
2862 0,
2863 0,
2864 0},
2865
2866 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2867 {"abs_g2_s", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_G2_S,
2871 0,
2872 0,
2873 0},
2874
2875 /* Less significant bits 32-47 of address/value: MOVK, no check */
2876 {"abs_g2_nc", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_MOVW_G2_NC,
2880 0,
2881 0,
2882 0},
2883
2884 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2885 {"abs_g3", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_MOVW_G3,
2889 0,
2890 0,
2891 0},
2892
2893 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2894 {"prel_g0", 1,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2898 0,
2899 0,
2900 0},
2901
2902 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2903 {"prel_g0_nc", 1,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2907 0,
2908 0,
2909 0},
2910
2911 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2912 {"prel_g1", 1,
2913 0, /* adr_type */
2914 0,
2915 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2916 0,
2917 0,
2918 0},
2919
2920 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2921 {"prel_g1_nc", 1,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2930 {"prel_g2", 1,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2939 {"prel_g2_nc", 1,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2948 {"prel_g3", 1,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2952 0,
2953 0,
2954 0},
2955
2956 /* Get to the page containing GOT entry for a symbol. */
2957 {"got", 1,
2958 0, /* adr_type */
2959 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2960 0,
2961 0,
2962 0,
2963 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2964
2965 /* 12 bit offset into the page containing GOT entry for that symbol. */
2966 {"got_lo12", 0,
2967 0, /* adr_type */
2968 0,
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2972 0},
2973
2974 /* 0-15 bits of address/value: MOVk, no check. */
2975 {"gotoff_g0_nc", 0,
2976 0, /* adr_type */
2977 0,
2978 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2979 0,
2980 0,
2981 0},
2982
2983 /* Most significant bits 16-31 of address/value: MOVZ. */
2984 {"gotoff_g1", 0,
2985 0, /* adr_type */
2986 0,
2987 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2988 0,
2989 0,
2990 0},
2991
2992 /* 15 bit offset into the page containing GOT entry for that symbol. */
2993 {"gotoff_lo15", 0,
2994 0, /* adr_type */
2995 0,
2996 0,
2997 0,
2998 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2999 0},
3000
3001 /* Get to the page containing GOT TLS entry for a symbol */
3002 {"gottprel_g0_nc", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3006 0,
3007 0,
3008 0},
3009
3010 /* Get to the page containing GOT TLS entry for a symbol */
3011 {"gottprel_g1", 0,
3012 0, /* adr_type */
3013 0,
3014 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3015 0,
3016 0,
3017 0},
3018
3019 /* Get to the page containing GOT TLS entry for a symbol */
3020 {"tlsgd", 0,
3021 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3022 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3023 0,
3024 0,
3025 0,
3026 0},
3027
3028 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3029 {"tlsgd_lo12", 0,
3030 0, /* adr_type */
3031 0,
3032 0,
3033 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3034 0,
3035 0},
3036
3037 /* Lower 16 bits address/value: MOVk. */
3038 {"tlsgd_g0_nc", 0,
3039 0, /* adr_type */
3040 0,
3041 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3042 0,
3043 0,
3044 0},
3045
3046 /* Most significant bits 16-31 of address/value: MOVZ. */
3047 {"tlsgd_g1", 0,
3048 0, /* adr_type */
3049 0,
3050 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3051 0,
3052 0,
3053 0},
3054
3055 /* Get to the page containing GOT TLS entry for a symbol */
3056 {"tlsdesc", 0,
3057 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3058 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3059 0,
3060 0,
3061 0,
3062 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3063
3064 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3065 {"tlsdesc_lo12", 0,
3066 0, /* adr_type */
3067 0,
3068 0,
3069 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3070 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3071 0},
3072
3073 /* Get to the page containing GOT TLS entry for a symbol.
3074 The same as GD, we allocate two consecutive GOT slots
3075 for module index and module offset, the only difference
3076 with GD is the module offset should be initialized to
3077 zero without any outstanding runtime relocation. */
3078 {"tlsldm", 0,
3079 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3080 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3081 0,
3082 0,
3083 0,
3084 0},
3085
3086 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3087 {"tlsldm_lo12_nc", 0,
3088 0, /* adr_type */
3089 0,
3090 0,
3091 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3092 0,
3093 0},
3094
3095 /* 12 bit offset into the module TLS base address. */
3096 {"dtprel_lo12", 0,
3097 0, /* adr_type */
3098 0,
3099 0,
3100 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3101 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3102 0},
3103
3104 /* Same as dtprel_lo12, no overflow check. */
3105 {"dtprel_lo12_nc", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3110 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3111 0},
3112
3113 /* bits[23:12] of offset to the module TLS base address. */
3114 {"dtprel_hi12", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3119 0,
3120 0},
3121
3122 /* bits[15:0] of offset to the module TLS base address. */
3123 {"dtprel_g0", 0,
3124 0, /* adr_type */
3125 0,
3126 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3127 0,
3128 0,
3129 0},
3130
3131 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3132 {"dtprel_g0_nc", 0,
3133 0, /* adr_type */
3134 0,
3135 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3136 0,
3137 0,
3138 0},
3139
3140 /* bits[31:16] of offset to the module TLS base address. */
3141 {"dtprel_g1", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3145 0,
3146 0,
3147 0},
3148
3149 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3150 {"dtprel_g1_nc", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3154 0,
3155 0,
3156 0},
3157
3158 /* bits[47:32] of offset to the module TLS base address. */
3159 {"dtprel_g2", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3163 0,
3164 0,
3165 0},
3166
3167 /* Lower 16 bit offset into GOT entry for a symbol */
3168 {"tlsdesc_off_g0_nc", 0,
3169 0, /* adr_type */
3170 0,
3171 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3172 0,
3173 0,
3174 0},
3175
3176 /* Higher 16 bit offset into GOT entry for a symbol */
3177 {"tlsdesc_off_g1", 0,
3178 0, /* adr_type */
3179 0,
3180 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3181 0,
3182 0,
3183 0},
3184
3185 /* Get to the page containing GOT TLS entry for a symbol */
3186 {"gottprel", 0,
3187 0, /* adr_type */
3188 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3189 0,
3190 0,
3191 0,
3192 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3193
3194 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3195 {"gottprel_lo12", 0,
3196 0, /* adr_type */
3197 0,
3198 0,
3199 0,
3200 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3201 0},
3202
3203 /* Get tp offset for a symbol. */
3204 {"tprel", 0,
3205 0, /* adr_type */
3206 0,
3207 0,
3208 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3209 0,
3210 0},
3211
3212 /* Get tp offset for a symbol. */
3213 {"tprel_lo12", 0,
3214 0, /* adr_type */
3215 0,
3216 0,
3217 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3218 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3219 0},
3220
3221 /* Get tp offset for a symbol. */
3222 {"tprel_hi12", 0,
3223 0, /* adr_type */
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3227 0,
3228 0},
3229
3230 /* Get tp offset for a symbol. */
3231 {"tprel_lo12_nc", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3236 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3237 0},
3238
3239 /* Most significant bits 32-47 of address/value: MOVZ. */
3240 {"tprel_g2", 0,
3241 0, /* adr_type */
3242 0,
3243 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3244 0,
3245 0,
3246 0},
3247
3248 /* Most significant bits 16-31 of address/value: MOVZ. */
3249 {"tprel_g1", 0,
3250 0, /* adr_type */
3251 0,
3252 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3253 0,
3254 0,
3255 0},
3256
3257 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3258 {"tprel_g1_nc", 0,
3259 0, /* adr_type */
3260 0,
3261 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3262 0,
3263 0,
3264 0},
3265
3266 /* Most significant bits 0-15 of address/value: MOVZ. */
3267 {"tprel_g0", 0,
3268 0, /* adr_type */
3269 0,
3270 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3271 0,
3272 0,
3273 0},
3274
3275 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3276 {"tprel_g0_nc", 0,
3277 0, /* adr_type */
3278 0,
3279 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3280 0,
3281 0,
3282 0},
3283
3284 /* 15bit offset from got entry to base address of GOT table. */
3285 {"gotpage_lo15", 0,
3286 0,
3287 0,
3288 0,
3289 0,
3290 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3291 0},
3292
3293 /* 14bit offset from got entry to base address of GOT table. */
3294 {"gotpage_lo14", 0,
3295 0,
3296 0,
3297 0,
3298 0,
3299 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3300 0},
3301 };
3302
3303 /* Given the address of a pointer pointing to the textual name of a
3304 relocation as may appear in assembler source, attempt to find its
3305 details in reloc_table. The pointer will be updated to the character
3306 after the trailing colon. On failure, NULL will be returned;
3307 otherwise return the reloc_table_entry. */
3308
3309 static struct reloc_table_entry *
3310 find_reloc_table_entry (char **str)
3311 {
3312 unsigned int i;
3313 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3314 {
3315 int length = strlen (reloc_table[i].name);
3316
3317 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3318 && (*str)[length] == ':')
3319 {
3320 *str += (length + 1);
3321 return &reloc_table[i];
3322 }
3323 }
3324
3325 return NULL;
3326 }
3327
3328 /* Returns 0 if the relocation should never be forced,
3329 1 if the relocation must be forced, and -1 if either
3330 result is OK. */
3331
3332 static signed int
3333 aarch64_force_reloc (unsigned int type)
3334 {
3335 switch (type)
3336 {
3337 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3338 /* Perform these "immediate" internal relocations
3339 even if the symbol is extern or weak. */
3340 return 0;
3341
3342 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3343 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3344 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3345 /* Pseudo relocs that need to be fixed up according to
3346 ilp32_p. */
3347 return 1;
3348
3349 case BFD_RELOC_AARCH64_ADD_LO12:
3350 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3351 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3352 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3353 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3354 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3355 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3356 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3357 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3358 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3359 case BFD_RELOC_AARCH64_LDST128_LO12:
3360 case BFD_RELOC_AARCH64_LDST16_LO12:
3361 case BFD_RELOC_AARCH64_LDST32_LO12:
3362 case BFD_RELOC_AARCH64_LDST64_LO12:
3363 case BFD_RELOC_AARCH64_LDST8_LO12:
3364 case BFD_RELOC_AARCH64_LDST_LO12:
3365 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3366 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3367 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3368 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3369 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3370 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3371 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3372 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3373 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3374 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3375 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3376 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3377 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3378 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3379 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3380 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3381 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3382 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3383 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3384 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3385 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3386 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3387 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3388 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3389 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3390 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3391 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3392 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3393 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3394 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3395 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3396 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3397 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3399 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3400 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3401 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3402 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3403 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3404 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3405 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3406 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3407 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3408 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3409 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3410 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3411 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3412 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3413 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3414 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3415 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3416 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3417 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3418 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3419 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3420 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3421 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3422 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3423 /* Always leave these relocations for the linker. */
3424 return 1;
3425
3426 default:
3427 return -1;
3428 }
3429 }
3430
3431 int
3432 aarch64_force_relocation (struct fix *fixp)
3433 {
3434 int res = aarch64_force_reloc (fixp->fx_r_type);
3435
3436 if (res == -1)
3437 return generic_force_reloc (fixp);
3438 return res;
3439 }
3440
3441 /* Mode argument to parse_shift and parser_shifter_operand. */
3442 enum parse_shift_mode
3443 {
3444 SHIFTED_NONE, /* no shifter allowed */
3445 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3446 "#imm{,lsl #n}" */
3447 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3448 "#imm" */
3449 SHIFTED_LSL, /* bare "lsl #n" */
3450 SHIFTED_MUL, /* bare "mul #n" */
3451 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3452 SHIFTED_MUL_VL, /* "mul vl" */
3453 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3454 };
3455
3456 /* Parse a <shift> operator on an AArch64 data processing instruction.
3457 Return TRUE on success; otherwise return FALSE. */
3458 static bool
3459 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3460 {
3461 const struct aarch64_name_value_pair *shift_op;
3462 enum aarch64_modifier_kind kind;
3463 expressionS exp;
3464 int exp_has_prefix;
3465 char *s = *str;
3466 char *p = s;
3467
3468 for (p = *str; ISALPHA (*p); p++)
3469 ;
3470
3471 if (p == *str)
3472 {
3473 set_syntax_error (_("shift expression expected"));
3474 return false;
3475 }
3476
3477 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3478
3479 if (shift_op == NULL)
3480 {
3481 set_syntax_error (_("shift operator expected"));
3482 return false;
3483 }
3484
3485 kind = aarch64_get_operand_modifier (shift_op);
3486
3487 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3488 {
3489 set_syntax_error (_("invalid use of 'MSL'"));
3490 return false;
3491 }
3492
3493 if (kind == AARCH64_MOD_MUL
3494 && mode != SHIFTED_MUL
3495 && mode != SHIFTED_MUL_VL)
3496 {
3497 set_syntax_error (_("invalid use of 'MUL'"));
3498 return false;
3499 }
3500
3501 switch (mode)
3502 {
3503 case SHIFTED_LOGIC_IMM:
3504 if (aarch64_extend_operator_p (kind))
3505 {
3506 set_syntax_error (_("extending shift is not permitted"));
3507 return false;
3508 }
3509 break;
3510
3511 case SHIFTED_ARITH_IMM:
3512 if (kind == AARCH64_MOD_ROR)
3513 {
3514 set_syntax_error (_("'ROR' shift is not permitted"));
3515 return false;
3516 }
3517 break;
3518
3519 case SHIFTED_LSL:
3520 if (kind != AARCH64_MOD_LSL)
3521 {
3522 set_syntax_error (_("only 'LSL' shift is permitted"));
3523 return false;
3524 }
3525 break;
3526
3527 case SHIFTED_MUL:
3528 if (kind != AARCH64_MOD_MUL)
3529 {
3530 set_syntax_error (_("only 'MUL' is permitted"));
3531 return false;
3532 }
3533 break;
3534
3535 case SHIFTED_MUL_VL:
3536 /* "MUL VL" consists of two separate tokens. Require the first
3537 token to be "MUL" and look for a following "VL". */
3538 if (kind == AARCH64_MOD_MUL)
3539 {
3540 skip_whitespace (p);
3541 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3542 {
3543 p += 2;
3544 kind = AARCH64_MOD_MUL_VL;
3545 break;
3546 }
3547 }
3548 set_syntax_error (_("only 'MUL VL' is permitted"));
3549 return false;
3550
3551 case SHIFTED_REG_OFFSET:
3552 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3553 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3554 {
3555 set_fatal_syntax_error
3556 (_("invalid shift for the register offset addressing mode"));
3557 return false;
3558 }
3559 break;
3560
3561 case SHIFTED_LSL_MSL:
3562 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3563 {
3564 set_syntax_error (_("invalid shift operator"));
3565 return false;
3566 }
3567 break;
3568
3569 default:
3570 abort ();
3571 }
3572
3573 /* Whitespace can appear here if the next thing is a bare digit. */
3574 skip_whitespace (p);
3575
3576 /* Parse shift amount. */
3577 exp_has_prefix = 0;
3578 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3579 exp.X_op = O_absent;
3580 else
3581 {
3582 if (is_immediate_prefix (*p))
3583 {
3584 p++;
3585 exp_has_prefix = 1;
3586 }
3587 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3588 }
3589 if (kind == AARCH64_MOD_MUL_VL)
3590 /* For consistency, give MUL VL the same shift amount as an implicit
3591 MUL #1. */
3592 operand->shifter.amount = 1;
3593 else if (exp.X_op == O_absent)
3594 {
3595 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3596 {
3597 set_syntax_error (_("missing shift amount"));
3598 return false;
3599 }
3600 operand->shifter.amount = 0;
3601 }
3602 else if (exp.X_op != O_constant)
3603 {
3604 set_syntax_error (_("constant shift amount required"));
3605 return false;
3606 }
3607 /* For parsing purposes, MUL #n has no inherent range. The range
3608 depends on the operand and will be checked by operand-specific
3609 routines. */
3610 else if (kind != AARCH64_MOD_MUL
3611 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3612 {
3613 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3614 return false;
3615 }
3616 else
3617 {
3618 operand->shifter.amount = exp.X_add_number;
3619 operand->shifter.amount_present = 1;
3620 }
3621
3622 operand->shifter.operator_present = 1;
3623 operand->shifter.kind = kind;
3624
3625 *str = p;
3626 return true;
3627 }
3628
3629 /* Parse a <shifter_operand> for a data processing instruction:
3630
3631 #<immediate>
3632 #<immediate>, LSL #imm
3633
3634 Validation of immediate operands is deferred to md_apply_fix.
3635
3636 Return TRUE on success; otherwise return FALSE. */
3637
3638 static bool
3639 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3640 enum parse_shift_mode mode)
3641 {
3642 char *p;
3643
3644 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3645 return false;
3646
3647 p = *str;
3648
3649 /* Accept an immediate expression. */
3650 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3651 REJECT_ABSENT))
3652 return false;
3653
3654 /* Accept optional LSL for arithmetic immediate values. */
3655 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3656 if (! parse_shift (&p, operand, SHIFTED_LSL))
3657 return false;
3658
3659 /* Not accept any shifter for logical immediate values. */
3660 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3661 && parse_shift (&p, operand, mode))
3662 {
3663 set_syntax_error (_("unexpected shift operator"));
3664 return false;
3665 }
3666
3667 *str = p;
3668 return true;
3669 }
3670
3671 /* Parse a <shifter_operand> for a data processing instruction:
3672
3673 <Rm>
3674 <Rm>, <shift>
3675 #<immediate>
3676 #<immediate>, LSL #imm
3677
3678 where <shift> is handled by parse_shift above, and the last two
3679 cases are handled by the function above.
3680
3681 Validation of immediate operands is deferred to md_apply_fix.
3682
3683 Return TRUE on success; otherwise return FALSE. */
3684
3685 static bool
3686 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3687 enum parse_shift_mode mode)
3688 {
3689 const reg_entry *reg;
3690 aarch64_opnd_qualifier_t qualifier;
3691 enum aarch64_operand_class opd_class
3692 = aarch64_get_operand_class (operand->type);
3693
3694 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3695 if (reg)
3696 {
3697 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3698 {
3699 set_syntax_error (_("unexpected register in the immediate operand"));
3700 return false;
3701 }
3702
3703 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3704 {
3705 set_expected_reg_error (REG_TYPE_R_Z, reg, 0);
3706 return false;
3707 }
3708
3709 operand->reg.regno = reg->number;
3710 operand->qualifier = qualifier;
3711
3712 /* Accept optional shift operation on register. */
3713 if (! skip_past_comma (str))
3714 return true;
3715
3716 if (! parse_shift (str, operand, mode))
3717 return false;
3718
3719 return true;
3720 }
3721 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3722 {
3723 set_syntax_error
3724 (_("integer register expected in the extended/shifted operand "
3725 "register"));
3726 return false;
3727 }
3728
3729 /* We have a shifted immediate variable. */
3730 return parse_shifter_operand_imm (str, operand, mode);
3731 }
3732
3733 /* Return TRUE on success; return FALSE otherwise. */
3734
3735 static bool
3736 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3737 enum parse_shift_mode mode)
3738 {
3739 char *p = *str;
3740
3741 /* Determine if we have the sequence of characters #: or just :
3742 coming next. If we do, then we check for a :rello: relocation
3743 modifier. If we don't, punt the whole lot to
3744 parse_shifter_operand. */
3745
3746 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3747 {
3748 struct reloc_table_entry *entry;
3749
3750 if (p[0] == '#')
3751 p += 2;
3752 else
3753 p++;
3754 *str = p;
3755
3756 /* Try to parse a relocation. Anything else is an error. */
3757 if (!(entry = find_reloc_table_entry (str)))
3758 {
3759 set_syntax_error (_("unknown relocation modifier"));
3760 return false;
3761 }
3762
3763 if (entry->add_type == 0)
3764 {
3765 set_syntax_error
3766 (_("this relocation modifier is not allowed on this instruction"));
3767 return false;
3768 }
3769
3770 /* Save str before we decompose it. */
3771 p = *str;
3772
3773 /* Next, we parse the expression. */
3774 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3775 REJECT_ABSENT))
3776 return false;
3777
3778 /* Record the relocation type (use the ADD variant here). */
3779 inst.reloc.type = entry->add_type;
3780 inst.reloc.pc_rel = entry->pc_rel;
3781
3782 /* If str is empty, we've reached the end, stop here. */
3783 if (**str == '\0')
3784 return true;
3785
3786 /* Otherwise, we have a shifted reloc modifier, so rewind to
3787 recover the variable name and continue parsing for the shifter. */
3788 *str = p;
3789 return parse_shifter_operand_imm (str, operand, mode);
3790 }
3791
3792 return parse_shifter_operand (str, operand, mode);
3793 }
3794
3795 /* Parse all forms of an address expression. Information is written
3796 to *OPERAND and/or inst.reloc.
3797
3798 The A64 instruction set has the following addressing modes:
3799
3800 Offset
3801 [base] // in SIMD ld/st structure
3802 [base{,#0}] // in ld/st exclusive
3803 [base{,#imm}]
3804 [base,Xm{,LSL #imm}]
3805 [base,Xm,SXTX {#imm}]
3806 [base,Wm,(S|U)XTW {#imm}]
3807 Pre-indexed
3808 [base]! // in ldraa/ldrab exclusive
3809 [base,#imm]!
3810 Post-indexed
3811 [base],#imm
3812 [base],Xm // in SIMD ld/st structure
3813 PC-relative (literal)
3814 label
3815 SVE:
3816 [base,#imm,MUL VL]
3817 [base,Zm.D{,LSL #imm}]
3818 [base,Zm.S,(S|U)XTW {#imm}]
3819 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3820 [Zn.S,#imm]
3821 [Zn.D,#imm]
3822 [Zn.S{, Xm}]
3823 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3824 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3825 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3826
3827 (As a convenience, the notation "=immediate" is permitted in conjunction
3828 with the pc-relative literal load instructions to automatically place an
3829 immediate value or symbolic address in a nearby literal pool and generate
3830 a hidden label which references it.)
3831
3832 Upon a successful parsing, the address structure in *OPERAND will be
3833 filled in the following way:
3834
3835 .base_regno = <base>
3836 .offset.is_reg // 1 if the offset is a register
3837 .offset.imm = <imm>
3838 .offset.regno = <Rm>
3839
3840 For different addressing modes defined in the A64 ISA:
3841
3842 Offset
3843 .pcrel=0; .preind=1; .postind=0; .writeback=0
3844 Pre-indexed
3845 .pcrel=0; .preind=1; .postind=0; .writeback=1
3846 Post-indexed
3847 .pcrel=0; .preind=0; .postind=1; .writeback=1
3848 PC-relative (literal)
3849 .pcrel=1; .preind=1; .postind=0; .writeback=0
3850
3851 The shift/extension information, if any, will be stored in .shifter.
3852 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3853 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3854 corresponding register.
3855
3856 BASE_TYPE says which types of base register should be accepted and
3857 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3858 is the type of shifter that is allowed for immediate offsets,
3859 or SHIFTED_NONE if none.
3860
3861 In all other respects, it is the caller's responsibility to check
3862 for addressing modes not supported by the instruction, and to set
3863 inst.reloc.type. */
3864
3865 static bool
3866 parse_address_main (char **str, aarch64_opnd_info *operand,
3867 aarch64_opnd_qualifier_t *base_qualifier,
3868 aarch64_opnd_qualifier_t *offset_qualifier,
3869 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3870 enum parse_shift_mode imm_shift_mode)
3871 {
3872 char *p = *str;
3873 const reg_entry *reg;
3874 expressionS *exp = &inst.reloc.exp;
3875
3876 *base_qualifier = AARCH64_OPND_QLF_NIL;
3877 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3878 if (! skip_past_char (&p, '['))
3879 {
3880 /* =immediate or label. */
3881 operand->addr.pcrel = 1;
3882 operand->addr.preind = 1;
3883
3884 /* #:<reloc_op>:<symbol> */
3885 skip_past_char (&p, '#');
3886 if (skip_past_char (&p, ':'))
3887 {
3888 bfd_reloc_code_real_type ty;
3889 struct reloc_table_entry *entry;
3890
3891 /* Try to parse a relocation modifier. Anything else is
3892 an error. */
3893 entry = find_reloc_table_entry (&p);
3894 if (! entry)
3895 {
3896 set_syntax_error (_("unknown relocation modifier"));
3897 return false;
3898 }
3899
3900 switch (operand->type)
3901 {
3902 case AARCH64_OPND_ADDR_PCREL21:
3903 /* adr */
3904 ty = entry->adr_type;
3905 break;
3906
3907 default:
3908 ty = entry->ld_literal_type;
3909 break;
3910 }
3911
3912 if (ty == 0)
3913 {
3914 set_syntax_error
3915 (_("this relocation modifier is not allowed on this "
3916 "instruction"));
3917 return false;
3918 }
3919
3920 /* #:<reloc_op>: */
3921 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3922 {
3923 set_syntax_error (_("invalid relocation expression"));
3924 return false;
3925 }
3926 /* #:<reloc_op>:<expr> */
3927 /* Record the relocation type. */
3928 inst.reloc.type = ty;
3929 inst.reloc.pc_rel = entry->pc_rel;
3930 }
3931 else
3932 {
3933 if (skip_past_char (&p, '='))
3934 /* =immediate; need to generate the literal in the literal pool. */
3935 inst.gen_lit_pool = 1;
3936
3937 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3938 {
3939 set_syntax_error (_("invalid address"));
3940 return false;
3941 }
3942 }
3943
3944 *str = p;
3945 return true;
3946 }
3947
3948 /* [ */
3949
3950 bool alpha_base_p = ISALPHA (*p);
3951 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3952 if (!reg || !aarch64_check_reg_type (reg, base_type))
3953 {
3954 if (reg
3955 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3956 && *base_qualifier == AARCH64_OPND_QLF_W)
3957 set_syntax_error (_("expected a 64-bit base register"));
3958 else if (alpha_base_p)
3959 set_syntax_error (_("invalid base register"));
3960 else
3961 set_syntax_error (_("expected a base register"));
3962 return false;
3963 }
3964 operand->addr.base_regno = reg->number;
3965
3966 /* [Xn */
3967 if (skip_past_comma (&p))
3968 {
3969 /* [Xn, */
3970 operand->addr.preind = 1;
3971
3972 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3973 if (reg)
3974 {
3975 if (!aarch64_check_reg_type (reg, offset_type))
3976 {
3977 set_syntax_error (_("invalid offset register"));
3978 return false;
3979 }
3980
3981 /* [Xn,Rm */
3982 operand->addr.offset.regno = reg->number;
3983 operand->addr.offset.is_reg = 1;
3984 /* Shifted index. */
3985 if (skip_past_comma (&p))
3986 {
3987 /* [Xn,Rm, */
3988 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3989 /* Use the diagnostics set in parse_shift, so not set new
3990 error message here. */
3991 return false;
3992 }
3993 /* We only accept:
3994 [base,Xm] # For vector plus scalar SVE2 indexing.
3995 [base,Xm{,LSL #imm}]
3996 [base,Xm,SXTX {#imm}]
3997 [base,Wm,(S|U)XTW {#imm}] */
3998 if (operand->shifter.kind == AARCH64_MOD_NONE
3999 || operand->shifter.kind == AARCH64_MOD_LSL
4000 || operand->shifter.kind == AARCH64_MOD_SXTX)
4001 {
4002 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4003 {
4004 set_syntax_error (_("invalid use of 32-bit register offset"));
4005 return false;
4006 }
4007 if (aarch64_get_qualifier_esize (*base_qualifier)
4008 != aarch64_get_qualifier_esize (*offset_qualifier)
4009 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4010 || *base_qualifier != AARCH64_OPND_QLF_S_S
4011 || *offset_qualifier != AARCH64_OPND_QLF_X))
4012 {
4013 set_syntax_error (_("offset has different size from base"));
4014 return false;
4015 }
4016 }
4017 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4018 {
4019 set_syntax_error (_("invalid use of 64-bit register offset"));
4020 return false;
4021 }
4022 }
4023 else
4024 {
4025 /* [Xn,#:<reloc_op>:<symbol> */
4026 skip_past_char (&p, '#');
4027 if (skip_past_char (&p, ':'))
4028 {
4029 struct reloc_table_entry *entry;
4030
4031 /* Try to parse a relocation modifier. Anything else is
4032 an error. */
4033 if (!(entry = find_reloc_table_entry (&p)))
4034 {
4035 set_syntax_error (_("unknown relocation modifier"));
4036 return false;
4037 }
4038
4039 if (entry->ldst_type == 0)
4040 {
4041 set_syntax_error
4042 (_("this relocation modifier is not allowed on this "
4043 "instruction"));
4044 return false;
4045 }
4046
4047 /* [Xn,#:<reloc_op>: */
4048 /* We now have the group relocation table entry corresponding to
4049 the name in the assembler source. Next, we parse the
4050 expression. */
4051 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4052 {
4053 set_syntax_error (_("invalid relocation expression"));
4054 return false;
4055 }
4056
4057 /* [Xn,#:<reloc_op>:<expr> */
4058 /* Record the load/store relocation type. */
4059 inst.reloc.type = entry->ldst_type;
4060 inst.reloc.pc_rel = entry->pc_rel;
4061 }
4062 else
4063 {
4064 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4065 {
4066 set_syntax_error (_("invalid expression in the address"));
4067 return false;
4068 }
4069 /* [Xn,<expr> */
4070 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4071 /* [Xn,<expr>,<shifter> */
4072 if (! parse_shift (&p, operand, imm_shift_mode))
4073 return false;
4074 }
4075 }
4076 }
4077
4078 if (! skip_past_char (&p, ']'))
4079 {
4080 set_syntax_error (_("']' expected"));
4081 return false;
4082 }
4083
4084 if (skip_past_char (&p, '!'))
4085 {
4086 if (operand->addr.preind && operand->addr.offset.is_reg)
4087 {
4088 set_syntax_error (_("register offset not allowed in pre-indexed "
4089 "addressing mode"));
4090 return false;
4091 }
4092 /* [Xn]! */
4093 operand->addr.writeback = 1;
4094 }
4095 else if (skip_past_comma (&p))
4096 {
4097 /* [Xn], */
4098 operand->addr.postind = 1;
4099 operand->addr.writeback = 1;
4100
4101 if (operand->addr.preind)
4102 {
4103 set_syntax_error (_("cannot combine pre- and post-indexing"));
4104 return false;
4105 }
4106
4107 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4108 if (reg)
4109 {
4110 /* [Xn],Xm */
4111 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4112 {
4113 set_syntax_error (_("invalid offset register"));
4114 return false;
4115 }
4116
4117 operand->addr.offset.regno = reg->number;
4118 operand->addr.offset.is_reg = 1;
4119 }
4120 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4121 {
4122 /* [Xn],#expr */
4123 set_syntax_error (_("invalid expression in the address"));
4124 return false;
4125 }
4126 }
4127
4128 /* If at this point neither .preind nor .postind is set, we have a
4129 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4130 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4131 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4132 [Zn.<T>, xzr]. */
4133 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4134 {
4135 if (operand->addr.writeback)
4136 {
4137 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4138 {
4139 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4140 operand->addr.offset.is_reg = 0;
4141 operand->addr.offset.imm = 0;
4142 operand->addr.preind = 1;
4143 }
4144 else
4145 {
4146 /* Reject [Rn]! */
4147 set_syntax_error (_("missing offset in the pre-indexed address"));
4148 return false;
4149 }
4150 }
4151 else
4152 {
4153 operand->addr.preind = 1;
4154 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4155 {
4156 operand->addr.offset.is_reg = 1;
4157 operand->addr.offset.regno = REG_ZR;
4158 *offset_qualifier = AARCH64_OPND_QLF_X;
4159 }
4160 else
4161 {
4162 inst.reloc.exp.X_op = O_constant;
4163 inst.reloc.exp.X_add_number = 0;
4164 }
4165 }
4166 }
4167
4168 *str = p;
4169 return true;
4170 }
4171
4172 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4173 on success. */
4174 static bool
4175 parse_address (char **str, aarch64_opnd_info *operand)
4176 {
4177 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4178 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4179 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4180 }
4181
4182 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4183 The arguments have the same meaning as for parse_address_main.
4184 Return TRUE on success. */
4185 static bool
4186 parse_sve_address (char **str, aarch64_opnd_info *operand,
4187 aarch64_opnd_qualifier_t *base_qualifier,
4188 aarch64_opnd_qualifier_t *offset_qualifier)
4189 {
4190 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4191 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4192 SHIFTED_MUL_VL);
4193 }
4194
4195 /* Parse a register X0-X30. The register must be 64-bit and register 31
4196 is unallocated. */
4197 static bool
4198 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4199 {
4200 const reg_entry *reg = parse_reg (str);
4201 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4202 {
4203 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4204 return false;
4205 }
4206 operand->reg.regno = reg->number;
4207 operand->qualifier = AARCH64_OPND_QLF_X;
4208 return true;
4209 }
4210
4211 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4212 Return TRUE on success; otherwise return FALSE. */
4213 static bool
4214 parse_half (char **str, int *internal_fixup_p)
4215 {
4216 char *p = *str;
4217
4218 skip_past_char (&p, '#');
4219
4220 gas_assert (internal_fixup_p);
4221 *internal_fixup_p = 0;
4222
4223 if (*p == ':')
4224 {
4225 struct reloc_table_entry *entry;
4226
4227 /* Try to parse a relocation. Anything else is an error. */
4228 ++p;
4229
4230 if (!(entry = find_reloc_table_entry (&p)))
4231 {
4232 set_syntax_error (_("unknown relocation modifier"));
4233 return false;
4234 }
4235
4236 if (entry->movw_type == 0)
4237 {
4238 set_syntax_error
4239 (_("this relocation modifier is not allowed on this instruction"));
4240 return false;
4241 }
4242
4243 inst.reloc.type = entry->movw_type;
4244 }
4245 else
4246 *internal_fixup_p = 1;
4247
4248 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4249 return false;
4250
4251 *str = p;
4252 return true;
4253 }
4254
4255 /* Parse an operand for an ADRP instruction:
4256 ADRP <Xd>, <label>
4257 Return TRUE on success; otherwise return FALSE. */
4258
4259 static bool
4260 parse_adrp (char **str)
4261 {
4262 char *p;
4263
4264 p = *str;
4265 if (*p == ':')
4266 {
4267 struct reloc_table_entry *entry;
4268
4269 /* Try to parse a relocation. Anything else is an error. */
4270 ++p;
4271 if (!(entry = find_reloc_table_entry (&p)))
4272 {
4273 set_syntax_error (_("unknown relocation modifier"));
4274 return false;
4275 }
4276
4277 if (entry->adrp_type == 0)
4278 {
4279 set_syntax_error
4280 (_("this relocation modifier is not allowed on this instruction"));
4281 return false;
4282 }
4283
4284 inst.reloc.type = entry->adrp_type;
4285 }
4286 else
4287 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4288
4289 inst.reloc.pc_rel = 1;
4290 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4291 return false;
4292 *str = p;
4293 return true;
4294 }
4295
4296 /* Miscellaneous. */
4297
4298 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4299 of SIZE tokens in which index I gives the token for field value I,
4300 or is null if field value I is invalid. REG_TYPE says which register
4301 names should be treated as registers rather than as symbolic immediates.
4302
4303 Return true on success, moving *STR past the operand and storing the
4304 field value in *VAL. */
4305
4306 static int
4307 parse_enum_string (char **str, int64_t *val, const char *const *array,
4308 size_t size, aarch64_reg_type reg_type)
4309 {
4310 expressionS exp;
4311 char *p, *q;
4312 size_t i;
4313
4314 /* Match C-like tokens. */
4315 p = q = *str;
4316 while (ISALNUM (*q))
4317 q++;
4318
4319 for (i = 0; i < size; ++i)
4320 if (array[i]
4321 && strncasecmp (array[i], p, q - p) == 0
4322 && array[i][q - p] == 0)
4323 {
4324 *val = i;
4325 *str = q;
4326 return true;
4327 }
4328
4329 if (!parse_immediate_expression (&p, &exp, reg_type))
4330 return false;
4331
4332 if (exp.X_op == O_constant
4333 && (uint64_t) exp.X_add_number < size)
4334 {
4335 *val = exp.X_add_number;
4336 *str = p;
4337 return true;
4338 }
4339
4340 /* Use the default error for this operand. */
4341 return false;
4342 }
4343
4344 /* Parse an option for a preload instruction. Returns the encoding for the
4345 option, or PARSE_FAIL. */
4346
4347 static int
4348 parse_pldop (char **str)
4349 {
4350 char *p, *q;
4351 const struct aarch64_name_value_pair *o;
4352
4353 p = q = *str;
4354 while (ISALNUM (*q))
4355 q++;
4356
4357 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4358 if (!o)
4359 return PARSE_FAIL;
4360
4361 *str = q;
4362 return o->value;
4363 }
4364
4365 /* Parse an option for a barrier instruction. Returns the encoding for the
4366 option, or PARSE_FAIL. */
4367
4368 static int
4369 parse_barrier (char **str)
4370 {
4371 char *p, *q;
4372 const struct aarch64_name_value_pair *o;
4373
4374 p = q = *str;
4375 while (ISALPHA (*q))
4376 q++;
4377
4378 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4379 if (!o)
4380 return PARSE_FAIL;
4381
4382 *str = q;
4383 return o->value;
4384 }
4385
4386 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4387 return 0 if successful. Otherwise return PARSE_FAIL. */
4388
4389 static int
4390 parse_barrier_psb (char **str,
4391 const struct aarch64_name_value_pair ** hint_opt)
4392 {
4393 char *p, *q;
4394 const struct aarch64_name_value_pair *o;
4395
4396 p = q = *str;
4397 while (ISALPHA (*q))
4398 q++;
4399
4400 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4401 if (!o)
4402 {
4403 set_fatal_syntax_error
4404 ( _("unknown or missing option to PSB/TSB"));
4405 return PARSE_FAIL;
4406 }
4407
4408 if (o->value != 0x11)
4409 {
4410 /* PSB only accepts option name 'CSYNC'. */
4411 set_syntax_error
4412 (_("the specified option is not accepted for PSB/TSB"));
4413 return PARSE_FAIL;
4414 }
4415
4416 *str = q;
4417 *hint_opt = o;
4418 return 0;
4419 }
4420
4421 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4422 return 0 if successful. Otherwise return PARSE_FAIL. */
4423
4424 static int
4425 parse_bti_operand (char **str,
4426 const struct aarch64_name_value_pair ** hint_opt)
4427 {
4428 char *p, *q;
4429 const struct aarch64_name_value_pair *o;
4430
4431 p = q = *str;
4432 while (ISALPHA (*q))
4433 q++;
4434
4435 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4436 if (!o)
4437 {
4438 set_fatal_syntax_error
4439 ( _("unknown option to BTI"));
4440 return PARSE_FAIL;
4441 }
4442
4443 switch (o->value)
4444 {
4445 /* Valid BTI operands. */
4446 case HINT_OPD_C:
4447 case HINT_OPD_J:
4448 case HINT_OPD_JC:
4449 break;
4450
4451 default:
4452 set_syntax_error
4453 (_("unknown option to BTI"));
4454 return PARSE_FAIL;
4455 }
4456
4457 *str = q;
4458 *hint_opt = o;
4459 return 0;
4460 }
4461
4462 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4463 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4464 on failure. Format:
4465
4466 REG_TYPE.QUALIFIER
4467
4468 Side effect: Update STR with current parse position of success.
4469
4470 FLAGS is as for parse_typed_reg. */
4471
4472 static const reg_entry *
4473 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4474 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4475 {
4476 struct vector_type_el vectype;
4477 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4478 PTR_FULL_REG | flags);
4479 if (!reg)
4480 return NULL;
4481
4482 if (vectype.type == NT_invtype)
4483 *qualifier = AARCH64_OPND_QLF_NIL;
4484 else
4485 {
4486 *qualifier = vectype_to_qualifier (&vectype);
4487 if (*qualifier == AARCH64_OPND_QLF_NIL)
4488 return NULL;
4489 }
4490
4491 return reg;
4492 }
4493
4494 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4495
4496 #<imm>
4497 <imm>
4498
4499 Function return TRUE if immediate was found, or FALSE.
4500 */
4501 static bool
4502 parse_sme_immediate (char **str, int64_t *imm)
4503 {
4504 int64_t val;
4505 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4506 return false;
4507
4508 *imm = val;
4509 return true;
4510 }
4511
4512 /* Parse index with selection register and immediate offset:
4513
4514 [<Wv>, <imm>]
4515 [<Wv>, #<imm>]
4516
4517 Return true on success, populating OPND with the parsed index. */
4518
4519 static bool
4520 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4521 {
4522 const reg_entry *reg;
4523
4524 if (!skip_past_char (str, '['))
4525 {
4526 set_syntax_error (_("expected '['"));
4527 return false;
4528 }
4529
4530 /* The selection register, encoded in the 2-bit Rv field. */
4531 reg = parse_reg (str);
4532 if (reg == NULL || reg->type != REG_TYPE_R_32)
4533 {
4534 set_syntax_error (_("expected a 32-bit selection register"));
4535 return false;
4536 }
4537 opnd->index.regno = reg->number;
4538
4539 if (!skip_past_char (str, ','))
4540 {
4541 set_syntax_error (_("missing immediate offset"));
4542 return false;
4543 }
4544
4545 if (!parse_sme_immediate (str, &opnd->index.imm))
4546 {
4547 set_syntax_error (_("expected a constant immediate offset"));
4548 return false;
4549 }
4550
4551 if (!skip_past_char (str, ']'))
4552 {
4553 set_syntax_error (_("expected ']'"));
4554 return false;
4555 }
4556
4557 return true;
4558 }
4559
4560 /* Parse a register of type REG_TYPE that might have an element type
4561 qualifier and that is indexed by two values: a 32-bit register,
4562 followed by an immediate. The ranges of the register and the
4563 immediate vary by opcode and are checked in libopcodes.
4564
4565 Return true on success, populating OPND with information about
4566 the operand and setting QUALIFIER to the register qualifier.
4567
4568 Field format examples:
4569
4570 <Pm>.<T>[<Wv>< #<imm>]
4571 ZA[<Wv>, #<imm>]
4572 <ZAn><HV>.<T>[<Wv>, #<imm>]
4573
4574 FLAGS is as for parse_typed_reg. */
4575
4576 static bool
4577 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4578 struct aarch64_indexed_za *opnd,
4579 aarch64_opnd_qualifier_t *qualifier,
4580 unsigned int flags)
4581 {
4582 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4583 if (!reg)
4584 return false;
4585
4586 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4587 opnd->regno = reg->number;
4588
4589 return parse_sme_za_index (str, opnd);
4590 }
4591
4592 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4593 operand. */
4594
4595 static bool
4596 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4597 struct aarch64_indexed_za *opnd,
4598 aarch64_opnd_qualifier_t *qualifier)
4599 {
4600 if (!skip_past_char (str, '{'))
4601 {
4602 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4603 return false;
4604 }
4605
4606 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4607 PTR_IN_REGLIST))
4608 return false;
4609
4610 if (!skip_past_char (str, '}'))
4611 {
4612 set_syntax_error (_("expected '}'"));
4613 return false;
4614 }
4615
4616 return true;
4617 }
4618
4619 /* Parse list of up to eight 64-bit element tile names separated by commas in
4620 SME's ZERO instruction:
4621
4622 ZERO { <mask> }
4623
4624 Function returns <mask>:
4625
4626 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4627 */
4628 static int
4629 parse_sme_zero_mask(char **str)
4630 {
4631 char *q;
4632 int mask;
4633 aarch64_opnd_qualifier_t qualifier;
4634 unsigned int ptr_flags = PTR_IN_REGLIST;
4635
4636 mask = 0x00;
4637 q = *str;
4638 do
4639 {
4640 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4641 &qualifier, ptr_flags);
4642 if (!reg)
4643 return PARSE_FAIL;
4644
4645 if (reg->type == REG_TYPE_ZA)
4646 {
4647 if (qualifier != AARCH64_OPND_QLF_NIL)
4648 {
4649 set_syntax_error ("ZA should not have a size suffix");
4650 return PARSE_FAIL;
4651 }
4652 /* { ZA } is assembled as all-ones immediate. */
4653 mask = 0xff;
4654 }
4655 else
4656 {
4657 int regno = reg->number;
4658 if (qualifier == AARCH64_OPND_QLF_S_B)
4659 {
4660 /* { ZA0.B } is assembled as all-ones immediate. */
4661 mask = 0xff;
4662 }
4663 else if (qualifier == AARCH64_OPND_QLF_S_H)
4664 mask |= 0x55 << regno;
4665 else if (qualifier == AARCH64_OPND_QLF_S_S)
4666 mask |= 0x11 << regno;
4667 else if (qualifier == AARCH64_OPND_QLF_S_D)
4668 mask |= 0x01 << regno;
4669 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4670 {
4671 set_syntax_error (_("ZA tile masks do not operate at .Q"
4672 " granularity"));
4673 return PARSE_FAIL;
4674 }
4675 else if (qualifier == AARCH64_OPND_QLF_NIL)
4676 {
4677 set_syntax_error (_("missing ZA tile size"));
4678 return PARSE_FAIL;
4679 }
4680 else
4681 {
4682 set_syntax_error (_("invalid ZA tile"));
4683 return PARSE_FAIL;
4684 }
4685 }
4686 ptr_flags |= PTR_GOOD_MATCH;
4687 }
4688 while (skip_past_char (&q, ','));
4689
4690 *str = q;
4691 return mask;
4692 }
4693
4694 /* Wraps in curly braces <mask> operand ZERO instruction:
4695
4696 ZERO { <mask> }
4697
4698 Function returns value of <mask> bit-field.
4699 */
4700 static int
4701 parse_sme_list_of_64bit_tiles (char **str)
4702 {
4703 int regno;
4704
4705 if (!skip_past_char (str, '{'))
4706 {
4707 set_syntax_error (_("expected '{'"));
4708 return PARSE_FAIL;
4709 }
4710
4711 /* Empty <mask> list is an all-zeros immediate. */
4712 if (!skip_past_char (str, '}'))
4713 {
4714 regno = parse_sme_zero_mask (str);
4715 if (regno == PARSE_FAIL)
4716 return PARSE_FAIL;
4717
4718 if (!skip_past_char (str, '}'))
4719 {
4720 set_syntax_error (_("expected '}'"));
4721 return PARSE_FAIL;
4722 }
4723 }
4724 else
4725 regno = 0x00;
4726
4727 return regno;
4728 }
4729
4730 /* Parse streaming mode operand for SMSTART and SMSTOP.
4731
4732 {SM | ZA}
4733
4734 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4735 */
4736 static int
4737 parse_sme_sm_za (char **str)
4738 {
4739 char *p, *q;
4740
4741 p = q = *str;
4742 while (ISALPHA (*q))
4743 q++;
4744
4745 if ((q - p != 2)
4746 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4747 {
4748 set_syntax_error (_("expected SM or ZA operand"));
4749 return PARSE_FAIL;
4750 }
4751
4752 *str = q;
4753 return TOLOWER (p[0]);
4754 }
4755
4756 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4757 Returns the encoding for the option, or PARSE_FAIL.
4758
4759 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4760 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4761
4762 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4763 field, otherwise as a system register.
4764 */
4765
4766 static int
4767 parse_sys_reg (char **str, htab_t sys_regs,
4768 int imple_defined_p, int pstatefield_p,
4769 uint32_t* flags)
4770 {
4771 char *p, *q;
4772 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4773 const aarch64_sys_reg *o;
4774 int value;
4775
4776 p = buf;
4777 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4778 if (p < buf + (sizeof (buf) - 1))
4779 *p++ = TOLOWER (*q);
4780 *p = '\0';
4781
4782 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4783 valid system register. This is enforced by construction of the hash
4784 table. */
4785 if (p - buf != q - *str)
4786 return PARSE_FAIL;
4787
4788 o = str_hash_find (sys_regs, buf);
4789 if (!o)
4790 {
4791 if (!imple_defined_p)
4792 return PARSE_FAIL;
4793 else
4794 {
4795 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4796 unsigned int op0, op1, cn, cm, op2;
4797
4798 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4799 != 5)
4800 return PARSE_FAIL;
4801 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4802 return PARSE_FAIL;
4803 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4804 if (flags)
4805 *flags = 0;
4806 }
4807 }
4808 else
4809 {
4810 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4811 as_bad (_("selected processor does not support PSTATE field "
4812 "name '%s'"), buf);
4813 if (!pstatefield_p
4814 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4815 o->value, o->flags, o->features))
4816 as_bad (_("selected processor does not support system register "
4817 "name '%s'"), buf);
4818 if (aarch64_sys_reg_deprecated_p (o->flags))
4819 as_warn (_("system register name '%s' is deprecated and may be "
4820 "removed in a future release"), buf);
4821 value = o->value;
4822 if (flags)
4823 *flags = o->flags;
4824 }
4825
4826 *str = q;
4827 return value;
4828 }
4829
4830 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4831 for the option, or NULL. */
4832
4833 static const aarch64_sys_ins_reg *
4834 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4835 {
4836 char *p, *q;
4837 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4838 const aarch64_sys_ins_reg *o;
4839
4840 p = buf;
4841 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4842 if (p < buf + (sizeof (buf) - 1))
4843 *p++ = TOLOWER (*q);
4844 *p = '\0';
4845
4846 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4847 valid system register. This is enforced by construction of the hash
4848 table. */
4849 if (p - buf != q - *str)
4850 return NULL;
4851
4852 o = str_hash_find (sys_ins_regs, buf);
4853 if (!o)
4854 return NULL;
4855
4856 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4857 o->name, o->value, o->flags, 0))
4858 as_bad (_("selected processor does not support system register "
4859 "name '%s'"), buf);
4860 if (aarch64_sys_reg_deprecated_p (o->flags))
4861 as_warn (_("system register name '%s' is deprecated and may be "
4862 "removed in a future release"), buf);
4863
4864 *str = q;
4865 return o;
4866 }
4867 \f
4868 #define po_char_or_fail(chr) do { \
4869 if (! skip_past_char (&str, chr)) \
4870 goto failure; \
4871 } while (0)
4872
4873 #define po_reg_or_fail(regtype) do { \
4874 reg = aarch64_reg_parse (&str, regtype, NULL); \
4875 if (!reg) \
4876 goto failure; \
4877 } while (0)
4878
4879 #define po_int_fp_reg_or_fail(reg_type) do { \
4880 reg = parse_reg (&str); \
4881 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4882 { \
4883 set_expected_reg_error (reg_type, reg, 0); \
4884 goto failure; \
4885 } \
4886 info->reg.regno = reg->number; \
4887 info->qualifier = inherent_reg_qualifier (reg); \
4888 } while (0)
4889
4890 #define po_imm_nc_or_fail() do { \
4891 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4892 goto failure; \
4893 } while (0)
4894
4895 #define po_imm_or_fail(min, max) do { \
4896 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4897 goto failure; \
4898 if (val < min || val > max) \
4899 { \
4900 set_fatal_syntax_error (_("immediate value out of range "\
4901 #min " to "#max)); \
4902 goto failure; \
4903 } \
4904 } while (0)
4905
4906 #define po_enum_or_fail(array) do { \
4907 if (!parse_enum_string (&str, &val, array, \
4908 ARRAY_SIZE (array), imm_reg_type)) \
4909 goto failure; \
4910 } while (0)
4911
4912 #define po_misc_or_fail(expr) do { \
4913 if (!expr) \
4914 goto failure; \
4915 } while (0)
4916 \f
4917 /* A primitive log calculator. */
4918
4919 static inline unsigned int
4920 get_log2 (unsigned int n)
4921 {
4922 unsigned int count = 0;
4923 while (n > 1)
4924 {
4925 n >>= 1;
4926 count += 1;
4927 }
4928 return count;
4929 }
4930
4931 /* encode the 12-bit imm field of Add/sub immediate */
4932 static inline uint32_t
4933 encode_addsub_imm (uint32_t imm)
4934 {
4935 return imm << 10;
4936 }
4937
4938 /* encode the shift amount field of Add/sub immediate */
4939 static inline uint32_t
4940 encode_addsub_imm_shift_amount (uint32_t cnt)
4941 {
4942 return cnt << 22;
4943 }
4944
4945
4946 /* encode the imm field of Adr instruction */
4947 static inline uint32_t
4948 encode_adr_imm (uint32_t imm)
4949 {
4950 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4951 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4952 }
4953
4954 /* encode the immediate field of Move wide immediate */
4955 static inline uint32_t
4956 encode_movw_imm (uint32_t imm)
4957 {
4958 return imm << 5;
4959 }
4960
4961 /* encode the 26-bit offset of unconditional branch */
4962 static inline uint32_t
4963 encode_branch_ofs_26 (uint32_t ofs)
4964 {
4965 return ofs & ((1 << 26) - 1);
4966 }
4967
4968 /* encode the 19-bit offset of conditional branch and compare & branch */
4969 static inline uint32_t
4970 encode_cond_branch_ofs_19 (uint32_t ofs)
4971 {
4972 return (ofs & ((1 << 19) - 1)) << 5;
4973 }
4974
4975 /* encode the 19-bit offset of ld literal */
4976 static inline uint32_t
4977 encode_ld_lit_ofs_19 (uint32_t ofs)
4978 {
4979 return (ofs & ((1 << 19) - 1)) << 5;
4980 }
4981
4982 /* Encode the 14-bit offset of test & branch. */
4983 static inline uint32_t
4984 encode_tst_branch_ofs_14 (uint32_t ofs)
4985 {
4986 return (ofs & ((1 << 14) - 1)) << 5;
4987 }
4988
4989 /* Encode the 16-bit imm field of svc/hvc/smc. */
4990 static inline uint32_t
4991 encode_svc_imm (uint32_t imm)
4992 {
4993 return imm << 5;
4994 }
4995
4996 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4997 static inline uint32_t
4998 reencode_addsub_switch_add_sub (uint32_t opcode)
4999 {
5000 return opcode ^ (1 << 30);
5001 }
5002
5003 static inline uint32_t
5004 reencode_movzn_to_movz (uint32_t opcode)
5005 {
5006 return opcode | (1 << 30);
5007 }
5008
5009 static inline uint32_t
5010 reencode_movzn_to_movn (uint32_t opcode)
5011 {
5012 return opcode & ~(1 << 30);
5013 }
5014
5015 /* Overall per-instruction processing. */
5016
5017 /* We need to be able to fix up arbitrary expressions in some statements.
5018 This is so that we can handle symbols that are an arbitrary distance from
5019 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5020 which returns part of an address in a form which will be valid for
5021 a data instruction. We do this by pushing the expression into a symbol
5022 in the expr_section, and creating a fix for that. */
5023
5024 static fixS *
5025 fix_new_aarch64 (fragS * frag,
5026 int where,
5027 short int size,
5028 expressionS * exp,
5029 int pc_rel,
5030 int reloc)
5031 {
5032 fixS *new_fix;
5033
5034 switch (exp->X_op)
5035 {
5036 case O_constant:
5037 case O_symbol:
5038 case O_add:
5039 case O_subtract:
5040 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5041 break;
5042
5043 default:
5044 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5045 pc_rel, reloc);
5046 break;
5047 }
5048 return new_fix;
5049 }
5050 \f
5051 /* Diagnostics on operands errors. */
5052
5053 /* By default, output verbose error message.
5054 Disable the verbose error message by -mno-verbose-error. */
5055 static int verbose_error_p = 1;
5056
5057 #ifdef DEBUG_AARCH64
5058 /* N.B. this is only for the purpose of debugging. */
5059 const char* operand_mismatch_kind_names[] =
5060 {
5061 "AARCH64_OPDE_NIL",
5062 "AARCH64_OPDE_RECOVERABLE",
5063 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5064 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5065 "AARCH64_OPDE_SYNTAX_ERROR",
5066 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5067 "AARCH64_OPDE_INVALID_VARIANT",
5068 "AARCH64_OPDE_REG_LIST",
5069 "AARCH64_OPDE_UNTIED_IMMS",
5070 "AARCH64_OPDE_UNTIED_OPERAND",
5071 "AARCH64_OPDE_OUT_OF_RANGE",
5072 "AARCH64_OPDE_UNALIGNED",
5073 "AARCH64_OPDE_OTHER_ERROR",
5074 "AARCH64_OPDE_INVALID_REGNO",
5075 };
5076 #endif /* DEBUG_AARCH64 */
5077
5078 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5079
5080 When multiple errors of different kinds are found in the same assembly
5081 line, only the error of the highest severity will be picked up for
5082 issuing the diagnostics. */
5083
5084 static inline bool
5085 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5086 enum aarch64_operand_error_kind rhs)
5087 {
5088 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5089 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5090 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5091 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5092 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5093 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5094 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5095 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_INVALID_VARIANT);
5096 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST);
5097 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5098 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5099 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5100 return lhs > rhs;
5101 }
5102
5103 /* Helper routine to get the mnemonic name from the assembly instruction
5104 line; should only be called for the diagnosis purpose, as there is
5105 string copy operation involved, which may affect the runtime
5106 performance if used in elsewhere. */
5107
5108 static const char*
5109 get_mnemonic_name (const char *str)
5110 {
5111 static char mnemonic[32];
5112 char *ptr;
5113
5114 /* Get the first 15 bytes and assume that the full name is included. */
5115 strncpy (mnemonic, str, 31);
5116 mnemonic[31] = '\0';
5117
5118 /* Scan up to the end of the mnemonic, which must end in white space,
5119 '.', or end of string. */
5120 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5121 ;
5122
5123 *ptr = '\0';
5124
5125 /* Append '...' to the truncated long name. */
5126 if (ptr - mnemonic == 31)
5127 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5128
5129 return mnemonic;
5130 }
5131
5132 static void
5133 reset_aarch64_instruction (aarch64_instruction *instruction)
5134 {
5135 memset (instruction, '\0', sizeof (aarch64_instruction));
5136 instruction->reloc.type = BFD_RELOC_UNUSED;
5137 }
5138
5139 /* Data structures storing one user error in the assembly code related to
5140 operands. */
5141
5142 struct operand_error_record
5143 {
5144 const aarch64_opcode *opcode;
5145 aarch64_operand_error detail;
5146 struct operand_error_record *next;
5147 };
5148
5149 typedef struct operand_error_record operand_error_record;
5150
5151 struct operand_errors
5152 {
5153 operand_error_record *head;
5154 operand_error_record *tail;
5155 };
5156
5157 typedef struct operand_errors operand_errors;
5158
5159 /* Top-level data structure reporting user errors for the current line of
5160 the assembly code.
5161 The way md_assemble works is that all opcodes sharing the same mnemonic
5162 name are iterated to find a match to the assembly line. In this data
5163 structure, each of the such opcodes will have one operand_error_record
5164 allocated and inserted. In other words, excessive errors related with
5165 a single opcode are disregarded. */
5166 operand_errors operand_error_report;
5167
5168 /* Free record nodes. */
5169 static operand_error_record *free_opnd_error_record_nodes = NULL;
5170
5171 /* Initialize the data structure that stores the operand mismatch
5172 information on assembling one line of the assembly code. */
5173 static void
5174 init_operand_error_report (void)
5175 {
5176 if (operand_error_report.head != NULL)
5177 {
5178 gas_assert (operand_error_report.tail != NULL);
5179 operand_error_report.tail->next = free_opnd_error_record_nodes;
5180 free_opnd_error_record_nodes = operand_error_report.head;
5181 operand_error_report.head = NULL;
5182 operand_error_report.tail = NULL;
5183 return;
5184 }
5185 gas_assert (operand_error_report.tail == NULL);
5186 }
5187
5188 /* Return TRUE if some operand error has been recorded during the
5189 parsing of the current assembly line using the opcode *OPCODE;
5190 otherwise return FALSE. */
5191 static inline bool
5192 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5193 {
5194 operand_error_record *record = operand_error_report.head;
5195 return record && record->opcode == opcode;
5196 }
5197
5198 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5199 OPCODE field is initialized with OPCODE.
5200 N.B. only one record for each opcode, i.e. the maximum of one error is
5201 recorded for each instruction template. */
5202
5203 static void
5204 add_operand_error_record (const operand_error_record* new_record)
5205 {
5206 const aarch64_opcode *opcode = new_record->opcode;
5207 operand_error_record* record = operand_error_report.head;
5208
5209 /* The record may have been created for this opcode. If not, we need
5210 to prepare one. */
5211 if (! opcode_has_operand_error_p (opcode))
5212 {
5213 /* Get one empty record. */
5214 if (free_opnd_error_record_nodes == NULL)
5215 {
5216 record = XNEW (operand_error_record);
5217 }
5218 else
5219 {
5220 record = free_opnd_error_record_nodes;
5221 free_opnd_error_record_nodes = record->next;
5222 }
5223 record->opcode = opcode;
5224 /* Insert at the head. */
5225 record->next = operand_error_report.head;
5226 operand_error_report.head = record;
5227 if (operand_error_report.tail == NULL)
5228 operand_error_report.tail = record;
5229 }
5230 else if (record->detail.kind != AARCH64_OPDE_NIL
5231 && record->detail.index <= new_record->detail.index
5232 && operand_error_higher_severity_p (record->detail.kind,
5233 new_record->detail.kind))
5234 {
5235 /* In the case of multiple errors found on operands related with a
5236 single opcode, only record the error of the leftmost operand and
5237 only if the error is of higher severity. */
5238 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5239 " the existing error %s on operand %d",
5240 operand_mismatch_kind_names[new_record->detail.kind],
5241 new_record->detail.index,
5242 operand_mismatch_kind_names[record->detail.kind],
5243 record->detail.index);
5244 return;
5245 }
5246
5247 record->detail = new_record->detail;
5248 }
5249
5250 static inline void
5251 record_operand_error_info (const aarch64_opcode *opcode,
5252 aarch64_operand_error *error_info)
5253 {
5254 operand_error_record record;
5255 record.opcode = opcode;
5256 record.detail = *error_info;
5257 add_operand_error_record (&record);
5258 }
5259
5260 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5261 error message *ERROR, for operand IDX (count from 0). */
5262
5263 static void
5264 record_operand_error (const aarch64_opcode *opcode, int idx,
5265 enum aarch64_operand_error_kind kind,
5266 const char* error)
5267 {
5268 aarch64_operand_error info;
5269 memset(&info, 0, sizeof (info));
5270 info.index = idx;
5271 info.kind = kind;
5272 info.error = error;
5273 info.non_fatal = false;
5274 record_operand_error_info (opcode, &info);
5275 }
5276
5277 static void
5278 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5279 enum aarch64_operand_error_kind kind,
5280 const char* error, const int *extra_data)
5281 {
5282 aarch64_operand_error info;
5283 info.index = idx;
5284 info.kind = kind;
5285 info.error = error;
5286 info.data[0].i = extra_data[0];
5287 info.data[1].i = extra_data[1];
5288 info.data[2].i = extra_data[2];
5289 info.non_fatal = false;
5290 record_operand_error_info (opcode, &info);
5291 }
5292
5293 static void
5294 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5295 const char* error, int lower_bound,
5296 int upper_bound)
5297 {
5298 int data[3] = {lower_bound, upper_bound, 0};
5299 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5300 error, data);
5301 }
5302
5303 /* Remove the operand error record for *OPCODE. */
5304 static void ATTRIBUTE_UNUSED
5305 remove_operand_error_record (const aarch64_opcode *opcode)
5306 {
5307 if (opcode_has_operand_error_p (opcode))
5308 {
5309 operand_error_record* record = operand_error_report.head;
5310 gas_assert (record != NULL && operand_error_report.tail != NULL);
5311 operand_error_report.head = record->next;
5312 record->next = free_opnd_error_record_nodes;
5313 free_opnd_error_record_nodes = record;
5314 if (operand_error_report.head == NULL)
5315 {
5316 gas_assert (operand_error_report.tail == record);
5317 operand_error_report.tail = NULL;
5318 }
5319 }
5320 }
5321
5322 /* Given the instruction in *INSTR, return the index of the best matched
5323 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5324
5325 Return -1 if there is no qualifier sequence; return the first match
5326 if there is multiple matches found. */
5327
5328 static int
5329 find_best_match (const aarch64_inst *instr,
5330 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5331 {
5332 int i, num_opnds, max_num_matched, idx;
5333
5334 num_opnds = aarch64_num_of_operands (instr->opcode);
5335 if (num_opnds == 0)
5336 {
5337 DEBUG_TRACE ("no operand");
5338 return -1;
5339 }
5340
5341 max_num_matched = 0;
5342 idx = 0;
5343
5344 /* For each pattern. */
5345 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5346 {
5347 int j, num_matched;
5348 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5349
5350 /* Most opcodes has much fewer patterns in the list. */
5351 if (empty_qualifier_sequence_p (qualifiers))
5352 {
5353 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5354 break;
5355 }
5356
5357 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5358 if (*qualifiers == instr->operands[j].qualifier)
5359 ++num_matched;
5360
5361 if (num_matched > max_num_matched)
5362 {
5363 max_num_matched = num_matched;
5364 idx = i;
5365 }
5366 }
5367
5368 DEBUG_TRACE ("return with %d", idx);
5369 return idx;
5370 }
5371
5372 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5373 corresponding operands in *INSTR. */
5374
5375 static inline void
5376 assign_qualifier_sequence (aarch64_inst *instr,
5377 const aarch64_opnd_qualifier_t *qualifiers)
5378 {
5379 int i = 0;
5380 int num_opnds = aarch64_num_of_operands (instr->opcode);
5381 gas_assert (num_opnds);
5382 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5383 instr->operands[i].qualifier = *qualifiers;
5384 }
5385
5386 /* Callback used by aarch64_print_operand to apply STYLE to the
5387 disassembler output created from FMT and ARGS. The STYLER object holds
5388 any required state. Must return a pointer to a string (created from FMT
5389 and ARGS) that will continue to be valid until the complete disassembled
5390 instruction has been printed.
5391
5392 We don't currently add any styling to the output of the disassembler as
5393 used within assembler error messages, and so STYLE is ignored here. A
5394 new string is allocated on the obstack help within STYLER and returned
5395 to the caller. */
5396
5397 static const char *aarch64_apply_style
5398 (struct aarch64_styler *styler,
5399 enum disassembler_style style ATTRIBUTE_UNUSED,
5400 const char *fmt, va_list args)
5401 {
5402 int res;
5403 char *ptr;
5404 struct obstack *stack = (struct obstack *) styler->state;
5405 va_list ap;
5406
5407 /* Calculate the required space. */
5408 va_copy (ap, args);
5409 res = vsnprintf (NULL, 0, fmt, ap);
5410 va_end (ap);
5411 gas_assert (res >= 0);
5412
5413 /* Allocate space on the obstack and format the result. */
5414 ptr = (char *) obstack_alloc (stack, res + 1);
5415 res = vsnprintf (ptr, (res + 1), fmt, args);
5416 gas_assert (res >= 0);
5417
5418 return ptr;
5419 }
5420
5421 /* Print operands for the diagnosis purpose. */
5422
5423 static void
5424 print_operands (char *buf, const aarch64_opcode *opcode,
5425 const aarch64_opnd_info *opnds)
5426 {
5427 int i;
5428 struct aarch64_styler styler;
5429 struct obstack content;
5430 obstack_init (&content);
5431
5432 styler.apply_style = aarch64_apply_style;
5433 styler.state = (void *) &content;
5434
5435 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5436 {
5437 char str[128];
5438 char cmt[128];
5439
5440 /* We regard the opcode operand info more, however we also look into
5441 the inst->operands to support the disassembling of the optional
5442 operand.
5443 The two operand code should be the same in all cases, apart from
5444 when the operand can be optional. */
5445 if (opcode->operands[i] == AARCH64_OPND_NIL
5446 || opnds[i].type == AARCH64_OPND_NIL)
5447 break;
5448
5449 /* Generate the operand string in STR. */
5450 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5451 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5452
5453 /* Delimiter. */
5454 if (str[0] != '\0')
5455 strcat (buf, i == 0 ? " " : ", ");
5456
5457 /* Append the operand string. */
5458 strcat (buf, str);
5459
5460 /* Append a comment. This works because only the last operand ever
5461 adds a comment. If that ever changes then we'll need to be
5462 smarter here. */
5463 if (cmt[0] != '\0')
5464 {
5465 strcat (buf, "\t// ");
5466 strcat (buf, cmt);
5467 }
5468 }
5469
5470 obstack_free (&content, NULL);
5471 }
5472
5473 /* Send to stderr a string as information. */
5474
5475 static void
5476 output_info (const char *format, ...)
5477 {
5478 const char *file;
5479 unsigned int line;
5480 va_list args;
5481
5482 file = as_where (&line);
5483 if (file)
5484 {
5485 if (line != 0)
5486 fprintf (stderr, "%s:%u: ", file, line);
5487 else
5488 fprintf (stderr, "%s: ", file);
5489 }
5490 fprintf (stderr, _("Info: "));
5491 va_start (args, format);
5492 vfprintf (stderr, format, args);
5493 va_end (args);
5494 (void) putc ('\n', stderr);
5495 }
5496
5497 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5498 relates to registers or register lists. If so, return a string that
5499 reports the error against "operand %d", otherwise return null. */
5500
5501 static const char *
5502 get_reg_error_message (const aarch64_operand_error *detail)
5503 {
5504 /* Handle the case where we found a register that was expected
5505 to be in a register list outside of a register list. */
5506 if ((detail->data[1].i & detail->data[2].i) != 0
5507 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5508 return _("missing braces at operand %d");
5509
5510 /* If some opcodes expected a register, and we found a register,
5511 complain about the difference. */
5512 if (detail->data[2].i)
5513 {
5514 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5515 ? detail->data[1].i & ~SEF_IN_REGLIST
5516 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5517 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5518 if (!msg)
5519 msg = N_("unexpected register type at operand %d");
5520 return msg;
5521 }
5522
5523 /* Handle the case where we got to the point of trying to parse a
5524 register within a register list, but didn't find a known register. */
5525 if (detail->data[1].i & SEF_IN_REGLIST)
5526 {
5527 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5528 const char *msg = get_reg_expected_msg (expected, 0);
5529 if (!msg)
5530 msg = _("invalid register list at operand %d");
5531 return msg;
5532 }
5533
5534 /* Punt if register-related problems weren't the only errors. */
5535 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5536 return NULL;
5537
5538 /* Handle the case where the only acceptable things are registers. */
5539 if (detail->data[1].i == 0)
5540 {
5541 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5542 if (!msg)
5543 msg = _("expected a register at operand %d");
5544 return msg;
5545 }
5546
5547 /* Handle the case where the only acceptable things are register lists,
5548 and there was no opening '{'. */
5549 if (detail->data[0].i == 0)
5550 return _("expected '{' at operand %d");
5551
5552 return _("expected a register or register list at operand %d");
5553 }
5554
5555 /* Output one operand error record. */
5556
5557 static void
5558 output_operand_error_record (const operand_error_record *record, char *str)
5559 {
5560 const aarch64_operand_error *detail = &record->detail;
5561 int idx = detail->index;
5562 const aarch64_opcode *opcode = record->opcode;
5563 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5564 : AARCH64_OPND_NIL);
5565
5566 typedef void (*handler_t)(const char *format, ...);
5567 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5568 const char *msg = detail->error;
5569
5570 switch (detail->kind)
5571 {
5572 case AARCH64_OPDE_NIL:
5573 gas_assert (0);
5574 break;
5575
5576 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5577 handler (_("this `%s' should have an immediately preceding `%s'"
5578 " -- `%s'"),
5579 detail->data[0].s, detail->data[1].s, str);
5580 break;
5581
5582 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5583 handler (_("the preceding `%s' should be followed by `%s` rather"
5584 " than `%s` -- `%s'"),
5585 detail->data[1].s, detail->data[0].s, opcode->name, str);
5586 break;
5587
5588 case AARCH64_OPDE_SYNTAX_ERROR:
5589 if (!msg && idx >= 0)
5590 {
5591 msg = get_reg_error_message (detail);
5592 if (msg)
5593 {
5594 char *full_msg = xasprintf (msg, idx + 1);
5595 handler (_("%s -- `%s'"), full_msg, str);
5596 free (full_msg);
5597 break;
5598 }
5599 }
5600 /* Fall through. */
5601
5602 case AARCH64_OPDE_RECOVERABLE:
5603 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5604 case AARCH64_OPDE_OTHER_ERROR:
5605 /* Use the prepared error message if there is, otherwise use the
5606 operand description string to describe the error. */
5607 if (msg != NULL)
5608 {
5609 if (idx < 0)
5610 handler (_("%s -- `%s'"), msg, str);
5611 else
5612 handler (_("%s at operand %d -- `%s'"),
5613 msg, idx + 1, str);
5614 }
5615 else
5616 {
5617 gas_assert (idx >= 0);
5618 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5619 aarch64_get_operand_desc (opd_code), str);
5620 }
5621 break;
5622
5623 case AARCH64_OPDE_INVALID_VARIANT:
5624 handler (_("operand mismatch -- `%s'"), str);
5625 if (verbose_error_p)
5626 {
5627 /* We will try to correct the erroneous instruction and also provide
5628 more information e.g. all other valid variants.
5629
5630 The string representation of the corrected instruction and other
5631 valid variants are generated by
5632
5633 1) obtaining the intermediate representation of the erroneous
5634 instruction;
5635 2) manipulating the IR, e.g. replacing the operand qualifier;
5636 3) printing out the instruction by calling the printer functions
5637 shared with the disassembler.
5638
5639 The limitation of this method is that the exact input assembly
5640 line cannot be accurately reproduced in some cases, for example an
5641 optional operand present in the actual assembly line will be
5642 omitted in the output; likewise for the optional syntax rules,
5643 e.g. the # before the immediate. Another limitation is that the
5644 assembly symbols and relocation operations in the assembly line
5645 currently cannot be printed out in the error report. Last but not
5646 least, when there is other error(s) co-exist with this error, the
5647 'corrected' instruction may be still incorrect, e.g. given
5648 'ldnp h0,h1,[x0,#6]!'
5649 this diagnosis will provide the version:
5650 'ldnp s0,s1,[x0,#6]!'
5651 which is still not right. */
5652 size_t len = strlen (get_mnemonic_name (str));
5653 int i, qlf_idx;
5654 bool result;
5655 char buf[2048];
5656 aarch64_inst *inst_base = &inst.base;
5657 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5658
5659 /* Init inst. */
5660 reset_aarch64_instruction (&inst);
5661 inst_base->opcode = opcode;
5662
5663 /* Reset the error report so that there is no side effect on the
5664 following operand parsing. */
5665 init_operand_error_report ();
5666
5667 /* Fill inst. */
5668 result = parse_operands (str + len, opcode)
5669 && programmer_friendly_fixup (&inst);
5670 gas_assert (result);
5671 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5672 NULL, NULL, insn_sequence);
5673 gas_assert (!result);
5674
5675 /* Find the most matched qualifier sequence. */
5676 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5677 gas_assert (qlf_idx > -1);
5678
5679 /* Assign the qualifiers. */
5680 assign_qualifier_sequence (inst_base,
5681 opcode->qualifiers_list[qlf_idx]);
5682
5683 /* Print the hint. */
5684 output_info (_(" did you mean this?"));
5685 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5686 print_operands (buf, opcode, inst_base->operands);
5687 output_info (_(" %s"), buf);
5688
5689 /* Print out other variant(s) if there is any. */
5690 if (qlf_idx != 0 ||
5691 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5692 output_info (_(" other valid variant(s):"));
5693
5694 /* For each pattern. */
5695 qualifiers_list = opcode->qualifiers_list;
5696 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5697 {
5698 /* Most opcodes has much fewer patterns in the list.
5699 First NIL qualifier indicates the end in the list. */
5700 if (empty_qualifier_sequence_p (*qualifiers_list))
5701 break;
5702
5703 if (i != qlf_idx)
5704 {
5705 /* Mnemonics name. */
5706 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5707
5708 /* Assign the qualifiers. */
5709 assign_qualifier_sequence (inst_base, *qualifiers_list);
5710
5711 /* Print instruction. */
5712 print_operands (buf, opcode, inst_base->operands);
5713
5714 output_info (_(" %s"), buf);
5715 }
5716 }
5717 }
5718 break;
5719
5720 case AARCH64_OPDE_UNTIED_IMMS:
5721 handler (_("operand %d must have the same immediate value "
5722 "as operand 1 -- `%s'"),
5723 detail->index + 1, str);
5724 break;
5725
5726 case AARCH64_OPDE_UNTIED_OPERAND:
5727 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5728 detail->index + 1, str);
5729 break;
5730
5731 case AARCH64_OPDE_INVALID_REGNO:
5732 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5733 detail->data[0].s, detail->data[1].i,
5734 detail->data[0].s, detail->data[2].i, idx + 1, str);
5735 break;
5736
5737 case AARCH64_OPDE_OUT_OF_RANGE:
5738 if (detail->data[0].i != detail->data[1].i)
5739 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5740 msg ? msg : _("immediate value"),
5741 detail->data[0].i, detail->data[1].i, idx + 1, str);
5742 else
5743 handler (_("%s must be %d at operand %d -- `%s'"),
5744 msg ? msg : _("immediate value"),
5745 detail->data[0].i, idx + 1, str);
5746 break;
5747
5748 case AARCH64_OPDE_REG_LIST:
5749 if (detail->data[0].i == (1 << 1))
5750 handler (_("expected a single-register list at operand %d -- `%s'"),
5751 idx + 1, str);
5752 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5753 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5754 get_log2 (detail->data[0].i), idx + 1, str);
5755 else
5756 handler (_("invalid number of registers in the list"
5757 " at operand %d -- `%s'"), idx + 1, str);
5758 break;
5759
5760 case AARCH64_OPDE_UNALIGNED:
5761 handler (_("immediate value must be a multiple of "
5762 "%d at operand %d -- `%s'"),
5763 detail->data[0].i, idx + 1, str);
5764 break;
5765
5766 default:
5767 gas_assert (0);
5768 break;
5769 }
5770 }
5771
5772 /* Return true if the presence of error A against an instruction means
5773 that error B should not be reported. This is only used as a first pass,
5774 to pick the kind of error that we should report. */
5775
5776 static bool
5777 better_error_p (operand_error_record *a, operand_error_record *b)
5778 {
5779 /* For errors reported during parsing, prefer errors that relate to
5780 later operands, since that implies that the earlier operands were
5781 syntactically valid.
5782
5783 For example, if we see a register R instead of an immediate in
5784 operand N, we'll report that as a recoverable "immediate operand
5785 required" error. This is because there is often another opcode
5786 entry that accepts a register operand N, and any errors about R
5787 should be reported against the register forms of the instruction.
5788 But if no such register form exists, the recoverable error should
5789 still win over a syntax error against operand N-1.
5790
5791 For these purposes, count an error reported at the end of the
5792 assembly string as equivalent to an error reported against the
5793 final operand. This means that opcode entries that expect more
5794 operands win over "unexpected characters following instruction". */
5795 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5796 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5797 {
5798 int a_index = (a->detail.index < 0
5799 ? aarch64_num_of_operands (a->opcode) - 1
5800 : a->detail.index);
5801 int b_index = (b->detail.index < 0
5802 ? aarch64_num_of_operands (b->opcode) - 1
5803 : b->detail.index);
5804 if (a_index != b_index)
5805 return a_index > b_index;
5806 }
5807 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5808 }
5809
5810 /* Process and output the error message about the operand mismatching.
5811
5812 When this function is called, the operand error information had
5813 been collected for an assembly line and there will be multiple
5814 errors in the case of multiple instruction templates; output the
5815 error message that most closely describes the problem.
5816
5817 The errors to be printed can be filtered on printing all errors
5818 or only non-fatal errors. This distinction has to be made because
5819 the error buffer may already be filled with fatal errors we don't want to
5820 print due to the different instruction templates. */
5821
5822 static void
5823 output_operand_error_report (char *str, bool non_fatal_only)
5824 {
5825 enum aarch64_operand_error_kind kind;
5826 operand_error_record *curr;
5827 operand_error_record *head = operand_error_report.head;
5828 operand_error_record *record;
5829
5830 /* No error to report. */
5831 if (head == NULL)
5832 return;
5833
5834 gas_assert (head != NULL && operand_error_report.tail != NULL);
5835
5836 /* Only one error. */
5837 if (head == operand_error_report.tail)
5838 {
5839 /* If the only error is a non-fatal one and we don't want to print it,
5840 just exit. */
5841 if (!non_fatal_only || head->detail.non_fatal)
5842 {
5843 DEBUG_TRACE ("single opcode entry with error kind: %s",
5844 operand_mismatch_kind_names[head->detail.kind]);
5845 output_operand_error_record (head, str);
5846 }
5847 return;
5848 }
5849
5850 /* Find the error kind of the highest severity. */
5851 DEBUG_TRACE ("multiple opcode entries with error kind");
5852 record = NULL;
5853 for (curr = head; curr != NULL; curr = curr->next)
5854 {
5855 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5856 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5857 {
5858 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5859 operand_mismatch_kind_names[curr->detail.kind],
5860 curr->detail.data[0].i, curr->detail.data[1].i,
5861 curr->detail.data[2].i);
5862 }
5863 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST)
5864 {
5865 DEBUG_TRACE ("\t%s [%x]",
5866 operand_mismatch_kind_names[curr->detail.kind],
5867 curr->detail.data[0].i);
5868 }
5869 else
5870 {
5871 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5872 }
5873 if ((!non_fatal_only || curr->detail.non_fatal)
5874 && (!record || better_error_p (curr, record)))
5875 record = curr;
5876 }
5877
5878 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5879 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5880
5881 /* Pick up one of errors of KIND to report. */
5882 record = NULL;
5883 for (curr = head; curr != NULL; curr = curr->next)
5884 {
5885 /* If we don't want to print non-fatal errors then don't consider them
5886 at all. */
5887 if (curr->detail.kind != kind
5888 || (non_fatal_only && !curr->detail.non_fatal))
5889 continue;
5890 /* If there are multiple errors, pick up the one with the highest
5891 mismatching operand index. In the case of multiple errors with
5892 the equally highest operand index, pick up the first one or the
5893 first one with non-NULL error message. */
5894 if (!record || curr->detail.index > record->detail.index)
5895 record = curr;
5896 else if (curr->detail.index == record->detail.index
5897 && !record->detail.error)
5898 {
5899 if (curr->detail.error)
5900 record = curr;
5901 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5902 {
5903 record->detail.data[0].i |= curr->detail.data[0].i;
5904 record->detail.data[1].i |= curr->detail.data[1].i;
5905 record->detail.data[2].i |= curr->detail.data[2].i;
5906 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5907 operand_mismatch_kind_names[kind],
5908 curr->detail.data[0].i, curr->detail.data[1].i,
5909 curr->detail.data[2].i);
5910 }
5911 else if (kind == AARCH64_OPDE_REG_LIST)
5912 {
5913 record->detail.data[0].i |= curr->detail.data[0].i;
5914 DEBUG_TRACE ("\t--> %s [%x]",
5915 operand_mismatch_kind_names[kind],
5916 curr->detail.data[0].i);
5917 }
5918 /* Pick the variant with the cloest match. */
5919 else if (kind == AARCH64_OPDE_INVALID_VARIANT
5920 && record->detail.data[0].i > curr->detail.data[0].i)
5921 record = curr;
5922 }
5923 }
5924
5925 /* The way errors are collected in the back-end is a bit non-intuitive. But
5926 essentially, because each operand template is tried recursively you may
5927 always have errors collected from the previous tried OPND. These are
5928 usually skipped if there is one successful match. However now with the
5929 non-fatal errors we have to ignore those previously collected hard errors
5930 when we're only interested in printing the non-fatal ones. This condition
5931 prevents us from printing errors that are not appropriate, since we did
5932 match a condition, but it also has warnings that it wants to print. */
5933 if (non_fatal_only && !record)
5934 return;
5935
5936 gas_assert (record);
5937 DEBUG_TRACE ("Pick up error kind %s to report",
5938 operand_mismatch_kind_names[kind]);
5939
5940 /* Output. */
5941 output_operand_error_record (record, str);
5942 }
5943 \f
5944 /* Write an AARCH64 instruction to buf - always little-endian. */
5945 static void
5946 put_aarch64_insn (char *buf, uint32_t insn)
5947 {
5948 unsigned char *where = (unsigned char *) buf;
5949 where[0] = insn;
5950 where[1] = insn >> 8;
5951 where[2] = insn >> 16;
5952 where[3] = insn >> 24;
5953 }
5954
5955 static uint32_t
5956 get_aarch64_insn (char *buf)
5957 {
5958 unsigned char *where = (unsigned char *) buf;
5959 uint32_t result;
5960 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5961 | ((uint32_t) where[3] << 24)));
5962 return result;
5963 }
5964
5965 static void
5966 output_inst (struct aarch64_inst *new_inst)
5967 {
5968 char *to = NULL;
5969
5970 to = frag_more (INSN_SIZE);
5971
5972 frag_now->tc_frag_data.recorded = 1;
5973
5974 put_aarch64_insn (to, inst.base.value);
5975
5976 if (inst.reloc.type != BFD_RELOC_UNUSED)
5977 {
5978 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5979 INSN_SIZE, &inst.reloc.exp,
5980 inst.reloc.pc_rel,
5981 inst.reloc.type);
5982 DEBUG_TRACE ("Prepared relocation fix up");
5983 /* Don't check the addend value against the instruction size,
5984 that's the job of our code in md_apply_fix(). */
5985 fixp->fx_no_overflow = 1;
5986 if (new_inst != NULL)
5987 fixp->tc_fix_data.inst = new_inst;
5988 if (aarch64_gas_internal_fixup_p ())
5989 {
5990 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5991 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5992 fixp->fx_addnumber = inst.reloc.flags;
5993 }
5994 }
5995
5996 dwarf2_emit_insn (INSN_SIZE);
5997 }
5998
5999 /* Link together opcodes of the same name. */
6000
6001 struct templates
6002 {
6003 const aarch64_opcode *opcode;
6004 struct templates *next;
6005 };
6006
6007 typedef struct templates templates;
6008
6009 static templates *
6010 lookup_mnemonic (const char *start, int len)
6011 {
6012 templates *templ = NULL;
6013
6014 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6015 return templ;
6016 }
6017
6018 /* Subroutine of md_assemble, responsible for looking up the primary
6019 opcode from the mnemonic the user wrote. BASE points to the beginning
6020 of the mnemonic, DOT points to the first '.' within the mnemonic
6021 (if any) and END points to the end of the mnemonic. */
6022
6023 static templates *
6024 opcode_lookup (char *base, char *dot, char *end)
6025 {
6026 const aarch64_cond *cond;
6027 char condname[16];
6028 int len;
6029
6030 if (dot == end)
6031 return 0;
6032
6033 inst.cond = COND_ALWAYS;
6034
6035 /* Handle a possible condition. */
6036 if (dot)
6037 {
6038 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6039 if (!cond)
6040 return 0;
6041 inst.cond = cond->value;
6042 len = dot - base;
6043 }
6044 else
6045 len = end - base;
6046
6047 if (inst.cond == COND_ALWAYS)
6048 {
6049 /* Look for unaffixed mnemonic. */
6050 return lookup_mnemonic (base, len);
6051 }
6052 else if (len <= 13)
6053 {
6054 /* append ".c" to mnemonic if conditional */
6055 memcpy (condname, base, len);
6056 memcpy (condname + len, ".c", 2);
6057 base = condname;
6058 len += 2;
6059 return lookup_mnemonic (base, len);
6060 }
6061
6062 return NULL;
6063 }
6064
6065 /* Process an optional operand that is found omitted from the assembly line.
6066 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6067 instruction's opcode entry while IDX is the index of this omitted operand.
6068 */
6069
6070 static void
6071 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6072 int idx, aarch64_opnd_info *operand)
6073 {
6074 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6075 gas_assert (optional_operand_p (opcode, idx));
6076 gas_assert (!operand->present);
6077
6078 switch (type)
6079 {
6080 case AARCH64_OPND_Rd:
6081 case AARCH64_OPND_Rn:
6082 case AARCH64_OPND_Rm:
6083 case AARCH64_OPND_Rt:
6084 case AARCH64_OPND_Rt2:
6085 case AARCH64_OPND_Rt_LS64:
6086 case AARCH64_OPND_Rt_SP:
6087 case AARCH64_OPND_Rs:
6088 case AARCH64_OPND_Ra:
6089 case AARCH64_OPND_Rt_SYS:
6090 case AARCH64_OPND_Rd_SP:
6091 case AARCH64_OPND_Rn_SP:
6092 case AARCH64_OPND_Rm_SP:
6093 case AARCH64_OPND_Fd:
6094 case AARCH64_OPND_Fn:
6095 case AARCH64_OPND_Fm:
6096 case AARCH64_OPND_Fa:
6097 case AARCH64_OPND_Ft:
6098 case AARCH64_OPND_Ft2:
6099 case AARCH64_OPND_Sd:
6100 case AARCH64_OPND_Sn:
6101 case AARCH64_OPND_Sm:
6102 case AARCH64_OPND_Va:
6103 case AARCH64_OPND_Vd:
6104 case AARCH64_OPND_Vn:
6105 case AARCH64_OPND_Vm:
6106 case AARCH64_OPND_VdD1:
6107 case AARCH64_OPND_VnD1:
6108 operand->reg.regno = default_value;
6109 break;
6110
6111 case AARCH64_OPND_Ed:
6112 case AARCH64_OPND_En:
6113 case AARCH64_OPND_Em:
6114 case AARCH64_OPND_Em16:
6115 case AARCH64_OPND_SM3_IMM2:
6116 operand->reglane.regno = default_value;
6117 break;
6118
6119 case AARCH64_OPND_IDX:
6120 case AARCH64_OPND_BIT_NUM:
6121 case AARCH64_OPND_IMMR:
6122 case AARCH64_OPND_IMMS:
6123 case AARCH64_OPND_SHLL_IMM:
6124 case AARCH64_OPND_IMM_VLSL:
6125 case AARCH64_OPND_IMM_VLSR:
6126 case AARCH64_OPND_CCMP_IMM:
6127 case AARCH64_OPND_FBITS:
6128 case AARCH64_OPND_UIMM4:
6129 case AARCH64_OPND_UIMM3_OP1:
6130 case AARCH64_OPND_UIMM3_OP2:
6131 case AARCH64_OPND_IMM:
6132 case AARCH64_OPND_IMM_2:
6133 case AARCH64_OPND_WIDTH:
6134 case AARCH64_OPND_UIMM7:
6135 case AARCH64_OPND_NZCV:
6136 case AARCH64_OPND_SVE_PATTERN:
6137 case AARCH64_OPND_SVE_PRFOP:
6138 operand->imm.value = default_value;
6139 break;
6140
6141 case AARCH64_OPND_SVE_PATTERN_SCALED:
6142 operand->imm.value = default_value;
6143 operand->shifter.kind = AARCH64_MOD_MUL;
6144 operand->shifter.amount = 1;
6145 break;
6146
6147 case AARCH64_OPND_EXCEPTION:
6148 inst.reloc.type = BFD_RELOC_UNUSED;
6149 break;
6150
6151 case AARCH64_OPND_BARRIER_ISB:
6152 operand->barrier = aarch64_barrier_options + default_value;
6153 break;
6154
6155 case AARCH64_OPND_BTI_TARGET:
6156 operand->hint_option = aarch64_hint_options + default_value;
6157 break;
6158
6159 default:
6160 break;
6161 }
6162 }
6163
6164 /* Process the relocation type for move wide instructions.
6165 Return TRUE on success; otherwise return FALSE. */
6166
6167 static bool
6168 process_movw_reloc_info (void)
6169 {
6170 int is32;
6171 unsigned shift;
6172
6173 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6174
6175 if (inst.base.opcode->op == OP_MOVK)
6176 switch (inst.reloc.type)
6177 {
6178 case BFD_RELOC_AARCH64_MOVW_G0_S:
6179 case BFD_RELOC_AARCH64_MOVW_G1_S:
6180 case BFD_RELOC_AARCH64_MOVW_G2_S:
6181 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6182 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6183 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6184 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6185 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6188 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6189 set_syntax_error
6190 (_("the specified relocation type is not allowed for MOVK"));
6191 return false;
6192 default:
6193 break;
6194 }
6195
6196 switch (inst.reloc.type)
6197 {
6198 case BFD_RELOC_AARCH64_MOVW_G0:
6199 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6200 case BFD_RELOC_AARCH64_MOVW_G0_S:
6201 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6202 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6203 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6204 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6205 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6206 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6207 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6208 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6209 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6210 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6211 shift = 0;
6212 break;
6213 case BFD_RELOC_AARCH64_MOVW_G1:
6214 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6215 case BFD_RELOC_AARCH64_MOVW_G1_S:
6216 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6217 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6218 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6219 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6220 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6221 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6222 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6223 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6224 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6225 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6226 shift = 16;
6227 break;
6228 case BFD_RELOC_AARCH64_MOVW_G2:
6229 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6230 case BFD_RELOC_AARCH64_MOVW_G2_S:
6231 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6232 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6233 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6234 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6235 if (is32)
6236 {
6237 set_fatal_syntax_error
6238 (_("the specified relocation type is not allowed for 32-bit "
6239 "register"));
6240 return false;
6241 }
6242 shift = 32;
6243 break;
6244 case BFD_RELOC_AARCH64_MOVW_G3:
6245 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6246 if (is32)
6247 {
6248 set_fatal_syntax_error
6249 (_("the specified relocation type is not allowed for 32-bit "
6250 "register"));
6251 return false;
6252 }
6253 shift = 48;
6254 break;
6255 default:
6256 /* More cases should be added when more MOVW-related relocation types
6257 are supported in GAS. */
6258 gas_assert (aarch64_gas_internal_fixup_p ());
6259 /* The shift amount should have already been set by the parser. */
6260 return true;
6261 }
6262 inst.base.operands[1].shifter.amount = shift;
6263 return true;
6264 }
6265
6266 /* Determine and return the real reloc type code for an instruction
6267 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6268
6269 static inline bfd_reloc_code_real_type
6270 ldst_lo12_determine_real_reloc_type (void)
6271 {
6272 unsigned logsz, max_logsz;
6273 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6274 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6275
6276 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6277 {
6278 BFD_RELOC_AARCH64_LDST8_LO12,
6279 BFD_RELOC_AARCH64_LDST16_LO12,
6280 BFD_RELOC_AARCH64_LDST32_LO12,
6281 BFD_RELOC_AARCH64_LDST64_LO12,
6282 BFD_RELOC_AARCH64_LDST128_LO12
6283 },
6284 {
6285 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6286 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6287 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6288 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6289 BFD_RELOC_AARCH64_NONE
6290 },
6291 {
6292 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6293 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6294 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6295 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6296 BFD_RELOC_AARCH64_NONE
6297 },
6298 {
6299 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6300 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6301 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6302 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6303 BFD_RELOC_AARCH64_NONE
6304 },
6305 {
6306 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6307 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6308 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6309 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6310 BFD_RELOC_AARCH64_NONE
6311 }
6312 };
6313
6314 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6315 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6316 || (inst.reloc.type
6317 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6318 || (inst.reloc.type
6319 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6320 || (inst.reloc.type
6321 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6322 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6323
6324 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6325 opd1_qlf =
6326 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6327 1, opd0_qlf, 0);
6328 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6329
6330 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6331
6332 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6333 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6334 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6335 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6336 max_logsz = 3;
6337 else
6338 max_logsz = 4;
6339
6340 if (logsz > max_logsz)
6341 {
6342 /* SEE PR 27904 for an example of this. */
6343 set_fatal_syntax_error
6344 (_("relocation qualifier does not match instruction size"));
6345 return BFD_RELOC_AARCH64_NONE;
6346 }
6347
6348 /* In reloc.c, these pseudo relocation types should be defined in similar
6349 order as above reloc_ldst_lo12 array. Because the array index calculation
6350 below relies on this. */
6351 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6352 }
6353
6354 /* Check whether a register list REGINFO is valid. The registers must be
6355 numbered in increasing order (modulo 32), in increments of one or two.
6356
6357 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6358 increments of two.
6359
6360 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6361
6362 static bool
6363 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6364 {
6365 uint32_t i, nb_regs, prev_regno, incr;
6366
6367 nb_regs = 1 + (reginfo & 0x3);
6368 reginfo >>= 2;
6369 prev_regno = reginfo & 0x1f;
6370 incr = accept_alternate ? 2 : 1;
6371
6372 for (i = 1; i < nb_regs; ++i)
6373 {
6374 uint32_t curr_regno;
6375 reginfo >>= 5;
6376 curr_regno = reginfo & 0x1f;
6377 if (curr_regno != ((prev_regno + incr) & 0x1f))
6378 return false;
6379 prev_regno = curr_regno;
6380 }
6381
6382 return true;
6383 }
6384
6385 /* Generic instruction operand parser. This does no encoding and no
6386 semantic validation; it merely squirrels values away in the inst
6387 structure. Returns TRUE or FALSE depending on whether the
6388 specified grammar matched. */
6389
6390 static bool
6391 parse_operands (char *str, const aarch64_opcode *opcode)
6392 {
6393 int i;
6394 char *backtrack_pos = 0;
6395 const enum aarch64_opnd *operands = opcode->operands;
6396 aarch64_reg_type imm_reg_type;
6397
6398 clear_error ();
6399 skip_whitespace (str);
6400
6401 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6402 AARCH64_FEATURE_SVE
6403 | AARCH64_FEATURE_SVE2))
6404 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6405 else
6406 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6407
6408 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6409 {
6410 int64_t val;
6411 const reg_entry *reg;
6412 int comma_skipped_p = 0;
6413 struct vector_type_el vectype;
6414 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6415 aarch64_opnd_info *info = &inst.base.operands[i];
6416 aarch64_reg_type reg_type;
6417
6418 DEBUG_TRACE ("parse operand %d", i);
6419
6420 /* Assign the operand code. */
6421 info->type = operands[i];
6422
6423 if (optional_operand_p (opcode, i))
6424 {
6425 /* Remember where we are in case we need to backtrack. */
6426 gas_assert (!backtrack_pos);
6427 backtrack_pos = str;
6428 }
6429
6430 /* Expect comma between operands; the backtrack mechanism will take
6431 care of cases of omitted optional operand. */
6432 if (i > 0 && ! skip_past_char (&str, ','))
6433 {
6434 set_syntax_error (_("comma expected between operands"));
6435 goto failure;
6436 }
6437 else
6438 comma_skipped_p = 1;
6439
6440 switch (operands[i])
6441 {
6442 case AARCH64_OPND_Rd:
6443 case AARCH64_OPND_Rn:
6444 case AARCH64_OPND_Rm:
6445 case AARCH64_OPND_Rt:
6446 case AARCH64_OPND_Rt2:
6447 case AARCH64_OPND_Rs:
6448 case AARCH64_OPND_Ra:
6449 case AARCH64_OPND_Rt_LS64:
6450 case AARCH64_OPND_Rt_SYS:
6451 case AARCH64_OPND_PAIRREG:
6452 case AARCH64_OPND_SVE_Rm:
6453 po_int_fp_reg_or_fail (REG_TYPE_R_Z);
6454
6455 /* In LS64 load/store instructions Rt register number must be even
6456 and <=22. */
6457 if (operands[i] == AARCH64_OPND_Rt_LS64)
6458 {
6459 /* We've already checked if this is valid register.
6460 This will check if register number (Rt) is not undefined for LS64
6461 instructions:
6462 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6463 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6464 {
6465 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6466 goto failure;
6467 }
6468 }
6469 break;
6470
6471 case AARCH64_OPND_Rd_SP:
6472 case AARCH64_OPND_Rn_SP:
6473 case AARCH64_OPND_Rt_SP:
6474 case AARCH64_OPND_SVE_Rn_SP:
6475 case AARCH64_OPND_Rm_SP:
6476 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6477 break;
6478
6479 case AARCH64_OPND_Rm_EXT:
6480 case AARCH64_OPND_Rm_SFT:
6481 po_misc_or_fail (parse_shifter_operand
6482 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6483 ? SHIFTED_ARITH_IMM
6484 : SHIFTED_LOGIC_IMM)));
6485 if (!info->shifter.operator_present)
6486 {
6487 /* Default to LSL if not present. Libopcodes prefers shifter
6488 kind to be explicit. */
6489 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6490 info->shifter.kind = AARCH64_MOD_LSL;
6491 /* For Rm_EXT, libopcodes will carry out further check on whether
6492 or not stack pointer is used in the instruction (Recall that
6493 "the extend operator is not optional unless at least one of
6494 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6495 }
6496 break;
6497
6498 case AARCH64_OPND_Fd:
6499 case AARCH64_OPND_Fn:
6500 case AARCH64_OPND_Fm:
6501 case AARCH64_OPND_Fa:
6502 case AARCH64_OPND_Ft:
6503 case AARCH64_OPND_Ft2:
6504 case AARCH64_OPND_Sd:
6505 case AARCH64_OPND_Sn:
6506 case AARCH64_OPND_Sm:
6507 case AARCH64_OPND_SVE_VZn:
6508 case AARCH64_OPND_SVE_Vd:
6509 case AARCH64_OPND_SVE_Vm:
6510 case AARCH64_OPND_SVE_Vn:
6511 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6512 break;
6513
6514 case AARCH64_OPND_SVE_Pd:
6515 case AARCH64_OPND_SVE_Pg3:
6516 case AARCH64_OPND_SVE_Pg4_5:
6517 case AARCH64_OPND_SVE_Pg4_10:
6518 case AARCH64_OPND_SVE_Pg4_16:
6519 case AARCH64_OPND_SVE_Pm:
6520 case AARCH64_OPND_SVE_Pn:
6521 case AARCH64_OPND_SVE_Pt:
6522 case AARCH64_OPND_SME_Pm:
6523 reg_type = REG_TYPE_PN;
6524 goto vector_reg;
6525
6526 case AARCH64_OPND_SVE_Za_5:
6527 case AARCH64_OPND_SVE_Za_16:
6528 case AARCH64_OPND_SVE_Zd:
6529 case AARCH64_OPND_SVE_Zm_5:
6530 case AARCH64_OPND_SVE_Zm_16:
6531 case AARCH64_OPND_SVE_Zn:
6532 case AARCH64_OPND_SVE_Zt:
6533 reg_type = REG_TYPE_ZN;
6534 goto vector_reg;
6535
6536 case AARCH64_OPND_Va:
6537 case AARCH64_OPND_Vd:
6538 case AARCH64_OPND_Vn:
6539 case AARCH64_OPND_Vm:
6540 reg_type = REG_TYPE_VN;
6541 vector_reg:
6542 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6543 if (!reg)
6544 goto failure;
6545 if (vectype.defined & NTA_HASINDEX)
6546 goto failure;
6547
6548 info->reg.regno = reg->number;
6549 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6550 && vectype.type == NT_invtype)
6551 /* Unqualified Pn and Zn registers are allowed in certain
6552 contexts. Rely on F_STRICT qualifier checking to catch
6553 invalid uses. */
6554 info->qualifier = AARCH64_OPND_QLF_NIL;
6555 else
6556 {
6557 info->qualifier = vectype_to_qualifier (&vectype);
6558 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6559 goto failure;
6560 }
6561 break;
6562
6563 case AARCH64_OPND_VdD1:
6564 case AARCH64_OPND_VnD1:
6565 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6566 if (!reg)
6567 goto failure;
6568 if (vectype.type != NT_d || vectype.index != 1)
6569 {
6570 set_fatal_syntax_error
6571 (_("the top half of a 128-bit FP/SIMD register is expected"));
6572 goto failure;
6573 }
6574 info->reg.regno = reg->number;
6575 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6576 here; it is correct for the purpose of encoding/decoding since
6577 only the register number is explicitly encoded in the related
6578 instructions, although this appears a bit hacky. */
6579 info->qualifier = AARCH64_OPND_QLF_S_D;
6580 break;
6581
6582 case AARCH64_OPND_SVE_Zm3_INDEX:
6583 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6584 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6585 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6586 case AARCH64_OPND_SVE_Zm4_INDEX:
6587 case AARCH64_OPND_SVE_Zn_INDEX:
6588 reg_type = REG_TYPE_ZN;
6589 goto vector_reg_index;
6590
6591 case AARCH64_OPND_Ed:
6592 case AARCH64_OPND_En:
6593 case AARCH64_OPND_Em:
6594 case AARCH64_OPND_Em16:
6595 case AARCH64_OPND_SM3_IMM2:
6596 reg_type = REG_TYPE_VN;
6597 vector_reg_index:
6598 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6599 if (!reg)
6600 goto failure;
6601 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6602 goto failure;
6603
6604 info->reglane.regno = reg->number;
6605 info->reglane.index = vectype.index;
6606 info->qualifier = vectype_to_qualifier (&vectype);
6607 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6608 goto failure;
6609 break;
6610
6611 case AARCH64_OPND_SVE_ZnxN:
6612 case AARCH64_OPND_SVE_ZtxN:
6613 reg_type = REG_TYPE_ZN;
6614 goto vector_reg_list;
6615
6616 case AARCH64_OPND_LVn:
6617 case AARCH64_OPND_LVt:
6618 case AARCH64_OPND_LVt_AL:
6619 case AARCH64_OPND_LEt:
6620 reg_type = REG_TYPE_VN;
6621 vector_reg_list:
6622 if (reg_type == REG_TYPE_ZN
6623 && get_opcode_dependent_value (opcode) == 1
6624 && *str != '{')
6625 {
6626 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6627 if (!reg)
6628 goto failure;
6629 info->reglist.first_regno = reg->number;
6630 info->reglist.num_regs = 1;
6631 }
6632 else
6633 {
6634 val = parse_vector_reg_list (&str, reg_type, &vectype);
6635 if (val == PARSE_FAIL)
6636 goto failure;
6637
6638 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6639 {
6640 set_fatal_syntax_error (_("invalid register list"));
6641 goto failure;
6642 }
6643
6644 if (vectype.width != 0 && *str != ',')
6645 {
6646 set_fatal_syntax_error
6647 (_("expected element type rather than vector type"));
6648 goto failure;
6649 }
6650
6651 info->reglist.first_regno = (val >> 2) & 0x1f;
6652 info->reglist.num_regs = (val & 0x3) + 1;
6653 }
6654 if (operands[i] == AARCH64_OPND_LEt)
6655 {
6656 if (!(vectype.defined & NTA_HASINDEX))
6657 goto failure;
6658 info->reglist.has_index = 1;
6659 info->reglist.index = vectype.index;
6660 }
6661 else
6662 {
6663 if (vectype.defined & NTA_HASINDEX)
6664 goto failure;
6665 if (!(vectype.defined & NTA_HASTYPE))
6666 {
6667 if (reg_type == REG_TYPE_ZN)
6668 set_fatal_syntax_error (_("missing type suffix"));
6669 goto failure;
6670 }
6671 }
6672 info->qualifier = vectype_to_qualifier (&vectype);
6673 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6674 goto failure;
6675 break;
6676
6677 case AARCH64_OPND_CRn:
6678 case AARCH64_OPND_CRm:
6679 {
6680 char prefix = *(str++);
6681 if (prefix != 'c' && prefix != 'C')
6682 goto failure;
6683
6684 po_imm_nc_or_fail ();
6685 if (val > 15)
6686 {
6687 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6688 goto failure;
6689 }
6690 info->qualifier = AARCH64_OPND_QLF_CR;
6691 info->imm.value = val;
6692 break;
6693 }
6694
6695 case AARCH64_OPND_SHLL_IMM:
6696 case AARCH64_OPND_IMM_VLSR:
6697 po_imm_or_fail (1, 64);
6698 info->imm.value = val;
6699 break;
6700
6701 case AARCH64_OPND_CCMP_IMM:
6702 case AARCH64_OPND_SIMM5:
6703 case AARCH64_OPND_FBITS:
6704 case AARCH64_OPND_TME_UIMM16:
6705 case AARCH64_OPND_UIMM4:
6706 case AARCH64_OPND_UIMM4_ADDG:
6707 case AARCH64_OPND_UIMM10:
6708 case AARCH64_OPND_UIMM3_OP1:
6709 case AARCH64_OPND_UIMM3_OP2:
6710 case AARCH64_OPND_IMM_VLSL:
6711 case AARCH64_OPND_IMM:
6712 case AARCH64_OPND_IMM_2:
6713 case AARCH64_OPND_WIDTH:
6714 case AARCH64_OPND_SVE_INV_LIMM:
6715 case AARCH64_OPND_SVE_LIMM:
6716 case AARCH64_OPND_SVE_LIMM_MOV:
6717 case AARCH64_OPND_SVE_SHLIMM_PRED:
6718 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6719 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6720 case AARCH64_OPND_SVE_SHRIMM_PRED:
6721 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6722 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6723 case AARCH64_OPND_SVE_SIMM5:
6724 case AARCH64_OPND_SVE_SIMM5B:
6725 case AARCH64_OPND_SVE_SIMM6:
6726 case AARCH64_OPND_SVE_SIMM8:
6727 case AARCH64_OPND_SVE_UIMM3:
6728 case AARCH64_OPND_SVE_UIMM7:
6729 case AARCH64_OPND_SVE_UIMM8:
6730 case AARCH64_OPND_SVE_UIMM8_53:
6731 case AARCH64_OPND_IMM_ROT1:
6732 case AARCH64_OPND_IMM_ROT2:
6733 case AARCH64_OPND_IMM_ROT3:
6734 case AARCH64_OPND_SVE_IMM_ROT1:
6735 case AARCH64_OPND_SVE_IMM_ROT2:
6736 case AARCH64_OPND_SVE_IMM_ROT3:
6737 case AARCH64_OPND_CSSC_SIMM8:
6738 case AARCH64_OPND_CSSC_UIMM8:
6739 po_imm_nc_or_fail ();
6740 info->imm.value = val;
6741 break;
6742
6743 case AARCH64_OPND_SVE_AIMM:
6744 case AARCH64_OPND_SVE_ASIMM:
6745 po_imm_nc_or_fail ();
6746 info->imm.value = val;
6747 skip_whitespace (str);
6748 if (skip_past_comma (&str))
6749 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6750 else
6751 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6752 break;
6753
6754 case AARCH64_OPND_SVE_PATTERN:
6755 po_enum_or_fail (aarch64_sve_pattern_array);
6756 info->imm.value = val;
6757 break;
6758
6759 case AARCH64_OPND_SVE_PATTERN_SCALED:
6760 po_enum_or_fail (aarch64_sve_pattern_array);
6761 info->imm.value = val;
6762 if (skip_past_comma (&str)
6763 && !parse_shift (&str, info, SHIFTED_MUL))
6764 goto failure;
6765 if (!info->shifter.operator_present)
6766 {
6767 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6768 info->shifter.kind = AARCH64_MOD_MUL;
6769 info->shifter.amount = 1;
6770 }
6771 break;
6772
6773 case AARCH64_OPND_SVE_PRFOP:
6774 po_enum_or_fail (aarch64_sve_prfop_array);
6775 info->imm.value = val;
6776 break;
6777
6778 case AARCH64_OPND_UIMM7:
6779 po_imm_or_fail (0, 127);
6780 info->imm.value = val;
6781 break;
6782
6783 case AARCH64_OPND_IDX:
6784 case AARCH64_OPND_MASK:
6785 case AARCH64_OPND_BIT_NUM:
6786 case AARCH64_OPND_IMMR:
6787 case AARCH64_OPND_IMMS:
6788 po_imm_or_fail (0, 63);
6789 info->imm.value = val;
6790 break;
6791
6792 case AARCH64_OPND_IMM0:
6793 po_imm_nc_or_fail ();
6794 if (val != 0)
6795 {
6796 set_fatal_syntax_error (_("immediate zero expected"));
6797 goto failure;
6798 }
6799 info->imm.value = 0;
6800 break;
6801
6802 case AARCH64_OPND_FPIMM0:
6803 {
6804 int qfloat;
6805 bool res1 = false, res2 = false;
6806 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6807 it is probably not worth the effort to support it. */
6808 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6809 imm_reg_type))
6810 && (error_p ()
6811 || !(res2 = parse_constant_immediate (&str, &val,
6812 imm_reg_type))))
6813 goto failure;
6814 if ((res1 && qfloat == 0) || (res2 && val == 0))
6815 {
6816 info->imm.value = 0;
6817 info->imm.is_fp = 1;
6818 break;
6819 }
6820 set_fatal_syntax_error (_("immediate zero expected"));
6821 goto failure;
6822 }
6823
6824 case AARCH64_OPND_IMM_MOV:
6825 {
6826 char *saved = str;
6827 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6828 reg_name_p (str, REG_TYPE_VN))
6829 goto failure;
6830 str = saved;
6831 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6832 GE_OPT_PREFIX, REJECT_ABSENT));
6833 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6834 later. fix_mov_imm_insn will try to determine a machine
6835 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6836 message if the immediate cannot be moved by a single
6837 instruction. */
6838 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6839 inst.base.operands[i].skip = 1;
6840 }
6841 break;
6842
6843 case AARCH64_OPND_SIMD_IMM:
6844 case AARCH64_OPND_SIMD_IMM_SFT:
6845 if (! parse_big_immediate (&str, &val, imm_reg_type))
6846 goto failure;
6847 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6848 /* addr_off_p */ 0,
6849 /* need_libopcodes_p */ 1,
6850 /* skip_p */ 1);
6851 /* Parse shift.
6852 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6853 shift, we don't check it here; we leave the checking to
6854 the libopcodes (operand_general_constraint_met_p). By
6855 doing this, we achieve better diagnostics. */
6856 if (skip_past_comma (&str)
6857 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6858 goto failure;
6859 if (!info->shifter.operator_present
6860 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6861 {
6862 /* Default to LSL if not present. Libopcodes prefers shifter
6863 kind to be explicit. */
6864 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6865 info->shifter.kind = AARCH64_MOD_LSL;
6866 }
6867 break;
6868
6869 case AARCH64_OPND_FPIMM:
6870 case AARCH64_OPND_SIMD_FPIMM:
6871 case AARCH64_OPND_SVE_FPIMM8:
6872 {
6873 int qfloat;
6874 bool dp_p;
6875
6876 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6877 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6878 || !aarch64_imm_float_p (qfloat))
6879 {
6880 if (!error_p ())
6881 set_fatal_syntax_error (_("invalid floating-point"
6882 " constant"));
6883 goto failure;
6884 }
6885 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6886 inst.base.operands[i].imm.is_fp = 1;
6887 }
6888 break;
6889
6890 case AARCH64_OPND_SVE_I1_HALF_ONE:
6891 case AARCH64_OPND_SVE_I1_HALF_TWO:
6892 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6893 {
6894 int qfloat;
6895 bool dp_p;
6896
6897 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6898 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6899 {
6900 if (!error_p ())
6901 set_fatal_syntax_error (_("invalid floating-point"
6902 " constant"));
6903 goto failure;
6904 }
6905 inst.base.operands[i].imm.value = qfloat;
6906 inst.base.operands[i].imm.is_fp = 1;
6907 }
6908 break;
6909
6910 case AARCH64_OPND_LIMM:
6911 po_misc_or_fail (parse_shifter_operand (&str, info,
6912 SHIFTED_LOGIC_IMM));
6913 if (info->shifter.operator_present)
6914 {
6915 set_fatal_syntax_error
6916 (_("shift not allowed for bitmask immediate"));
6917 goto failure;
6918 }
6919 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6920 /* addr_off_p */ 0,
6921 /* need_libopcodes_p */ 1,
6922 /* skip_p */ 1);
6923 break;
6924
6925 case AARCH64_OPND_AIMM:
6926 if (opcode->op == OP_ADD)
6927 /* ADD may have relocation types. */
6928 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6929 SHIFTED_ARITH_IMM));
6930 else
6931 po_misc_or_fail (parse_shifter_operand (&str, info,
6932 SHIFTED_ARITH_IMM));
6933 switch (inst.reloc.type)
6934 {
6935 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6936 info->shifter.amount = 12;
6937 break;
6938 case BFD_RELOC_UNUSED:
6939 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6940 if (info->shifter.kind != AARCH64_MOD_NONE)
6941 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6942 inst.reloc.pc_rel = 0;
6943 break;
6944 default:
6945 break;
6946 }
6947 info->imm.value = 0;
6948 if (!info->shifter.operator_present)
6949 {
6950 /* Default to LSL if not present. Libopcodes prefers shifter
6951 kind to be explicit. */
6952 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6953 info->shifter.kind = AARCH64_MOD_LSL;
6954 }
6955 break;
6956
6957 case AARCH64_OPND_HALF:
6958 {
6959 /* #<imm16> or relocation. */
6960 int internal_fixup_p;
6961 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6962 if (internal_fixup_p)
6963 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6964 skip_whitespace (str);
6965 if (skip_past_comma (&str))
6966 {
6967 /* {, LSL #<shift>} */
6968 if (! aarch64_gas_internal_fixup_p ())
6969 {
6970 set_fatal_syntax_error (_("can't mix relocation modifier "
6971 "with explicit shift"));
6972 goto failure;
6973 }
6974 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6975 }
6976 else
6977 inst.base.operands[i].shifter.amount = 0;
6978 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6979 inst.base.operands[i].imm.value = 0;
6980 if (! process_movw_reloc_info ())
6981 goto failure;
6982 }
6983 break;
6984
6985 case AARCH64_OPND_EXCEPTION:
6986 case AARCH64_OPND_UNDEFINED:
6987 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6988 imm_reg_type));
6989 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6990 /* addr_off_p */ 0,
6991 /* need_libopcodes_p */ 0,
6992 /* skip_p */ 1);
6993 break;
6994
6995 case AARCH64_OPND_NZCV:
6996 {
6997 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6998 if (nzcv != NULL)
6999 {
7000 str += 4;
7001 info->imm.value = nzcv->value;
7002 break;
7003 }
7004 po_imm_or_fail (0, 15);
7005 info->imm.value = val;
7006 }
7007 break;
7008
7009 case AARCH64_OPND_COND:
7010 case AARCH64_OPND_COND1:
7011 {
7012 char *start = str;
7013 do
7014 str++;
7015 while (ISALPHA (*str));
7016 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7017 if (info->cond == NULL)
7018 {
7019 set_syntax_error (_("invalid condition"));
7020 goto failure;
7021 }
7022 else if (operands[i] == AARCH64_OPND_COND1
7023 && (info->cond->value & 0xe) == 0xe)
7024 {
7025 /* Do not allow AL or NV. */
7026 set_default_error ();
7027 goto failure;
7028 }
7029 }
7030 break;
7031
7032 case AARCH64_OPND_ADDR_ADRP:
7033 po_misc_or_fail (parse_adrp (&str));
7034 /* Clear the value as operand needs to be relocated. */
7035 info->imm.value = 0;
7036 break;
7037
7038 case AARCH64_OPND_ADDR_PCREL14:
7039 case AARCH64_OPND_ADDR_PCREL19:
7040 case AARCH64_OPND_ADDR_PCREL21:
7041 case AARCH64_OPND_ADDR_PCREL26:
7042 po_misc_or_fail (parse_address (&str, info));
7043 if (!info->addr.pcrel)
7044 {
7045 set_syntax_error (_("invalid pc-relative address"));
7046 goto failure;
7047 }
7048 if (inst.gen_lit_pool
7049 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7050 {
7051 /* Only permit "=value" in the literal load instructions.
7052 The literal will be generated by programmer_friendly_fixup. */
7053 set_syntax_error (_("invalid use of \"=immediate\""));
7054 goto failure;
7055 }
7056 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7057 {
7058 set_syntax_error (_("unrecognized relocation suffix"));
7059 goto failure;
7060 }
7061 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7062 {
7063 info->imm.value = inst.reloc.exp.X_add_number;
7064 inst.reloc.type = BFD_RELOC_UNUSED;
7065 }
7066 else
7067 {
7068 info->imm.value = 0;
7069 if (inst.reloc.type == BFD_RELOC_UNUSED)
7070 switch (opcode->iclass)
7071 {
7072 case compbranch:
7073 case condbranch:
7074 /* e.g. CBZ or B.COND */
7075 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7076 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7077 break;
7078 case testbranch:
7079 /* e.g. TBZ */
7080 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7081 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7082 break;
7083 case branch_imm:
7084 /* e.g. B or BL */
7085 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7086 inst.reloc.type =
7087 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7088 : BFD_RELOC_AARCH64_JUMP26;
7089 break;
7090 case loadlit:
7091 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7092 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7093 break;
7094 case pcreladdr:
7095 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7096 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7097 break;
7098 default:
7099 gas_assert (0);
7100 abort ();
7101 }
7102 inst.reloc.pc_rel = 1;
7103 }
7104 break;
7105
7106 case AARCH64_OPND_ADDR_SIMPLE:
7107 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7108 {
7109 /* [<Xn|SP>{, #<simm>}] */
7110 char *start = str;
7111 /* First use the normal address-parsing routines, to get
7112 the usual syntax errors. */
7113 po_misc_or_fail (parse_address (&str, info));
7114 if (info->addr.pcrel || info->addr.offset.is_reg
7115 || !info->addr.preind || info->addr.postind
7116 || info->addr.writeback)
7117 {
7118 set_syntax_error (_("invalid addressing mode"));
7119 goto failure;
7120 }
7121
7122 /* Then retry, matching the specific syntax of these addresses. */
7123 str = start;
7124 po_char_or_fail ('[');
7125 po_reg_or_fail (REG_TYPE_R64_SP);
7126 /* Accept optional ", #0". */
7127 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7128 && skip_past_char (&str, ','))
7129 {
7130 skip_past_char (&str, '#');
7131 if (! skip_past_char (&str, '0'))
7132 {
7133 set_fatal_syntax_error
7134 (_("the optional immediate offset can only be 0"));
7135 goto failure;
7136 }
7137 }
7138 po_char_or_fail (']');
7139 break;
7140 }
7141
7142 case AARCH64_OPND_ADDR_REGOFF:
7143 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7144 po_misc_or_fail (parse_address (&str, info));
7145 regoff_addr:
7146 if (info->addr.pcrel || !info->addr.offset.is_reg
7147 || !info->addr.preind || info->addr.postind
7148 || info->addr.writeback)
7149 {
7150 set_syntax_error (_("invalid addressing mode"));
7151 goto failure;
7152 }
7153 if (!info->shifter.operator_present)
7154 {
7155 /* Default to LSL if not present. Libopcodes prefers shifter
7156 kind to be explicit. */
7157 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7158 info->shifter.kind = AARCH64_MOD_LSL;
7159 }
7160 /* Qualifier to be deduced by libopcodes. */
7161 break;
7162
7163 case AARCH64_OPND_ADDR_SIMM7:
7164 po_misc_or_fail (parse_address (&str, info));
7165 if (info->addr.pcrel || info->addr.offset.is_reg
7166 || (!info->addr.preind && !info->addr.postind))
7167 {
7168 set_syntax_error (_("invalid addressing mode"));
7169 goto failure;
7170 }
7171 if (inst.reloc.type != BFD_RELOC_UNUSED)
7172 {
7173 set_syntax_error (_("relocation not allowed"));
7174 goto failure;
7175 }
7176 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7177 /* addr_off_p */ 1,
7178 /* need_libopcodes_p */ 1,
7179 /* skip_p */ 0);
7180 break;
7181
7182 case AARCH64_OPND_ADDR_SIMM9:
7183 case AARCH64_OPND_ADDR_SIMM9_2:
7184 case AARCH64_OPND_ADDR_SIMM11:
7185 case AARCH64_OPND_ADDR_SIMM13:
7186 po_misc_or_fail (parse_address (&str, info));
7187 if (info->addr.pcrel || info->addr.offset.is_reg
7188 || (!info->addr.preind && !info->addr.postind)
7189 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7190 && info->addr.writeback))
7191 {
7192 set_syntax_error (_("invalid addressing mode"));
7193 goto failure;
7194 }
7195 if (inst.reloc.type != BFD_RELOC_UNUSED)
7196 {
7197 set_syntax_error (_("relocation not allowed"));
7198 goto failure;
7199 }
7200 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7201 /* addr_off_p */ 1,
7202 /* need_libopcodes_p */ 1,
7203 /* skip_p */ 0);
7204 break;
7205
7206 case AARCH64_OPND_ADDR_SIMM10:
7207 case AARCH64_OPND_ADDR_OFFSET:
7208 po_misc_or_fail (parse_address (&str, info));
7209 if (info->addr.pcrel || info->addr.offset.is_reg
7210 || !info->addr.preind || info->addr.postind)
7211 {
7212 set_syntax_error (_("invalid addressing mode"));
7213 goto failure;
7214 }
7215 if (inst.reloc.type != BFD_RELOC_UNUSED)
7216 {
7217 set_syntax_error (_("relocation not allowed"));
7218 goto failure;
7219 }
7220 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7221 /* addr_off_p */ 1,
7222 /* need_libopcodes_p */ 1,
7223 /* skip_p */ 0);
7224 break;
7225
7226 case AARCH64_OPND_ADDR_UIMM12:
7227 po_misc_or_fail (parse_address (&str, info));
7228 if (info->addr.pcrel || info->addr.offset.is_reg
7229 || !info->addr.preind || info->addr.writeback)
7230 {
7231 set_syntax_error (_("invalid addressing mode"));
7232 goto failure;
7233 }
7234 if (inst.reloc.type == BFD_RELOC_UNUSED)
7235 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7236 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7237 || (inst.reloc.type
7238 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7239 || (inst.reloc.type
7240 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7241 || (inst.reloc.type
7242 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7243 || (inst.reloc.type
7244 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7245 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7246 /* Leave qualifier to be determined by libopcodes. */
7247 break;
7248
7249 case AARCH64_OPND_SIMD_ADDR_POST:
7250 /* [<Xn|SP>], <Xm|#<amount>> */
7251 po_misc_or_fail (parse_address (&str, info));
7252 if (!info->addr.postind || !info->addr.writeback)
7253 {
7254 set_syntax_error (_("invalid addressing mode"));
7255 goto failure;
7256 }
7257 if (!info->addr.offset.is_reg)
7258 {
7259 if (inst.reloc.exp.X_op == O_constant)
7260 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7261 else
7262 {
7263 set_fatal_syntax_error
7264 (_("writeback value must be an immediate constant"));
7265 goto failure;
7266 }
7267 }
7268 /* No qualifier. */
7269 break;
7270
7271 case AARCH64_OPND_SME_SM_ZA:
7272 /* { SM | ZA } */
7273 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7274 {
7275 set_syntax_error (_("unknown or missing PSTATE field name"));
7276 goto failure;
7277 }
7278 info->reg.regno = val;
7279 break;
7280
7281 case AARCH64_OPND_SME_PnT_Wm_imm:
7282 if (!parse_dual_indexed_reg (&str, REG_TYPE_PN,
7283 &info->indexed_za, &qualifier, 0))
7284 goto failure;
7285 info->qualifier = qualifier;
7286 break;
7287
7288 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7289 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7290 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7291 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7292 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7293 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7294 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7295 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7296 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7297 case AARCH64_OPND_SVE_ADDR_RI_U6:
7298 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7299 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7300 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7301 /* [X<n>{, #imm, MUL VL}]
7302 [X<n>{, #imm}]
7303 but recognizing SVE registers. */
7304 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7305 &offset_qualifier));
7306 if (base_qualifier != AARCH64_OPND_QLF_X)
7307 {
7308 set_syntax_error (_("invalid addressing mode"));
7309 goto failure;
7310 }
7311 sve_regimm:
7312 if (info->addr.pcrel || info->addr.offset.is_reg
7313 || !info->addr.preind || info->addr.writeback)
7314 {
7315 set_syntax_error (_("invalid addressing mode"));
7316 goto failure;
7317 }
7318 if (inst.reloc.type != BFD_RELOC_UNUSED
7319 || inst.reloc.exp.X_op != O_constant)
7320 {
7321 /* Make sure this has priority over
7322 "invalid addressing mode". */
7323 set_fatal_syntax_error (_("constant offset required"));
7324 goto failure;
7325 }
7326 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7327 break;
7328
7329 case AARCH64_OPND_SVE_ADDR_R:
7330 /* [<Xn|SP>{, <R><m>}]
7331 but recognizing SVE registers. */
7332 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7333 &offset_qualifier));
7334 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7335 {
7336 offset_qualifier = AARCH64_OPND_QLF_X;
7337 info->addr.offset.is_reg = 1;
7338 info->addr.offset.regno = 31;
7339 }
7340 else if (base_qualifier != AARCH64_OPND_QLF_X
7341 || offset_qualifier != AARCH64_OPND_QLF_X)
7342 {
7343 set_syntax_error (_("invalid addressing mode"));
7344 goto failure;
7345 }
7346 goto regoff_addr;
7347
7348 case AARCH64_OPND_SVE_ADDR_RR:
7349 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7350 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7351 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7352 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7353 case AARCH64_OPND_SVE_ADDR_RX:
7354 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7355 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7356 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7357 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7358 but recognizing SVE registers. */
7359 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7360 &offset_qualifier));
7361 if (base_qualifier != AARCH64_OPND_QLF_X
7362 || offset_qualifier != AARCH64_OPND_QLF_X)
7363 {
7364 set_syntax_error (_("invalid addressing mode"));
7365 goto failure;
7366 }
7367 goto regoff_addr;
7368
7369 case AARCH64_OPND_SVE_ADDR_RZ:
7370 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7371 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7372 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7373 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7374 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7375 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7376 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7377 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7378 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7379 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7380 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7381 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7382 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7383 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7384 &offset_qualifier));
7385 if (base_qualifier != AARCH64_OPND_QLF_X
7386 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7387 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7388 {
7389 set_syntax_error (_("invalid addressing mode"));
7390 goto failure;
7391 }
7392 info->qualifier = offset_qualifier;
7393 goto regoff_addr;
7394
7395 case AARCH64_OPND_SVE_ADDR_ZX:
7396 /* [Zn.<T>{, <Xm>}]. */
7397 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7398 &offset_qualifier));
7399 /* Things to check:
7400 base_qualifier either S_S or S_D
7401 offset_qualifier must be X
7402 */
7403 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7404 && base_qualifier != AARCH64_OPND_QLF_S_D)
7405 || offset_qualifier != AARCH64_OPND_QLF_X)
7406 {
7407 set_syntax_error (_("invalid addressing mode"));
7408 goto failure;
7409 }
7410 info->qualifier = base_qualifier;
7411 if (!info->addr.offset.is_reg || info->addr.pcrel
7412 || !info->addr.preind || info->addr.writeback
7413 || info->shifter.operator_present != 0)
7414 {
7415 set_syntax_error (_("invalid addressing mode"));
7416 goto failure;
7417 }
7418 info->shifter.kind = AARCH64_MOD_LSL;
7419 break;
7420
7421
7422 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7423 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7424 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7425 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7426 /* [Z<n>.<T>{, #imm}] */
7427 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7428 &offset_qualifier));
7429 if (base_qualifier != AARCH64_OPND_QLF_S_S
7430 && base_qualifier != AARCH64_OPND_QLF_S_D)
7431 {
7432 set_syntax_error (_("invalid addressing mode"));
7433 goto failure;
7434 }
7435 info->qualifier = base_qualifier;
7436 goto sve_regimm;
7437
7438 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7439 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7440 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7441 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7442 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7443
7444 We don't reject:
7445
7446 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7447
7448 here since we get better error messages by leaving it to
7449 the qualifier checking routines. */
7450 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7451 &offset_qualifier));
7452 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7453 && base_qualifier != AARCH64_OPND_QLF_S_D)
7454 || offset_qualifier != base_qualifier)
7455 {
7456 set_syntax_error (_("invalid addressing mode"));
7457 goto failure;
7458 }
7459 info->qualifier = base_qualifier;
7460 goto regoff_addr;
7461
7462 case AARCH64_OPND_SYSREG:
7463 {
7464 uint32_t sysreg_flags;
7465 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7466 &sysreg_flags)) == PARSE_FAIL)
7467 {
7468 set_syntax_error (_("unknown or missing system register name"));
7469 goto failure;
7470 }
7471 inst.base.operands[i].sysreg.value = val;
7472 inst.base.operands[i].sysreg.flags = sysreg_flags;
7473 break;
7474 }
7475
7476 case AARCH64_OPND_PSTATEFIELD:
7477 {
7478 uint32_t sysreg_flags;
7479 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7480 &sysreg_flags)) == PARSE_FAIL)
7481 {
7482 set_syntax_error (_("unknown or missing PSTATE field name"));
7483 goto failure;
7484 }
7485 inst.base.operands[i].pstatefield = val;
7486 inst.base.operands[i].sysreg.flags = sysreg_flags;
7487 break;
7488 }
7489
7490 case AARCH64_OPND_SYSREG_IC:
7491 inst.base.operands[i].sysins_op =
7492 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7493 goto sys_reg_ins;
7494
7495 case AARCH64_OPND_SYSREG_DC:
7496 inst.base.operands[i].sysins_op =
7497 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7498 goto sys_reg_ins;
7499
7500 case AARCH64_OPND_SYSREG_AT:
7501 inst.base.operands[i].sysins_op =
7502 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7503 goto sys_reg_ins;
7504
7505 case AARCH64_OPND_SYSREG_SR:
7506 inst.base.operands[i].sysins_op =
7507 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7508 goto sys_reg_ins;
7509
7510 case AARCH64_OPND_SYSREG_TLBI:
7511 inst.base.operands[i].sysins_op =
7512 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7513 sys_reg_ins:
7514 if (inst.base.operands[i].sysins_op == NULL)
7515 {
7516 set_fatal_syntax_error ( _("unknown or missing operation name"));
7517 goto failure;
7518 }
7519 break;
7520
7521 case AARCH64_OPND_BARRIER:
7522 case AARCH64_OPND_BARRIER_ISB:
7523 val = parse_barrier (&str);
7524 if (val != PARSE_FAIL
7525 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7526 {
7527 /* ISB only accepts options name 'sy'. */
7528 set_syntax_error
7529 (_("the specified option is not accepted in ISB"));
7530 /* Turn off backtrack as this optional operand is present. */
7531 backtrack_pos = 0;
7532 goto failure;
7533 }
7534 if (val != PARSE_FAIL
7535 && operands[i] == AARCH64_OPND_BARRIER)
7536 {
7537 /* Regular barriers accept options CRm (C0-C15).
7538 DSB nXS barrier variant accepts values > 15. */
7539 if (val < 0 || val > 15)
7540 {
7541 set_syntax_error (_("the specified option is not accepted in DSB"));
7542 goto failure;
7543 }
7544 }
7545 /* This is an extension to accept a 0..15 immediate. */
7546 if (val == PARSE_FAIL)
7547 po_imm_or_fail (0, 15);
7548 info->barrier = aarch64_barrier_options + val;
7549 break;
7550
7551 case AARCH64_OPND_BARRIER_DSB_NXS:
7552 val = parse_barrier (&str);
7553 if (val != PARSE_FAIL)
7554 {
7555 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7556 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7557 {
7558 set_syntax_error (_("the specified option is not accepted in DSB"));
7559 /* Turn off backtrack as this optional operand is present. */
7560 backtrack_pos = 0;
7561 goto failure;
7562 }
7563 }
7564 else
7565 {
7566 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7567 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7568 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7569 goto failure;
7570 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7571 {
7572 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7573 goto failure;
7574 }
7575 }
7576 /* Option index is encoded as 2-bit value in val<3:2>. */
7577 val = (val >> 2) - 4;
7578 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7579 break;
7580
7581 case AARCH64_OPND_PRFOP:
7582 val = parse_pldop (&str);
7583 /* This is an extension to accept a 0..31 immediate. */
7584 if (val == PARSE_FAIL)
7585 po_imm_or_fail (0, 31);
7586 inst.base.operands[i].prfop = aarch64_prfops + val;
7587 break;
7588
7589 case AARCH64_OPND_BARRIER_PSB:
7590 val = parse_barrier_psb (&str, &(info->hint_option));
7591 if (val == PARSE_FAIL)
7592 goto failure;
7593 break;
7594
7595 case AARCH64_OPND_BTI_TARGET:
7596 val = parse_bti_operand (&str, &(info->hint_option));
7597 if (val == PARSE_FAIL)
7598 goto failure;
7599 break;
7600
7601 case AARCH64_OPND_SME_ZAda_2b:
7602 case AARCH64_OPND_SME_ZAda_3b:
7603 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7604 if (!reg)
7605 goto failure;
7606 info->reg.regno = reg->number;
7607 info->qualifier = qualifier;
7608 break;
7609
7610 case AARCH64_OPND_SME_ZA_HV_idx_src:
7611 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7612 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7613 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7614 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7615 &info->indexed_za,
7616 &qualifier)
7617 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7618 &info->indexed_za, &qualifier, 0))
7619 goto failure;
7620 info->qualifier = qualifier;
7621 break;
7622
7623 case AARCH64_OPND_SME_list_of_64bit_tiles:
7624 val = parse_sme_list_of_64bit_tiles (&str);
7625 if (val == PARSE_FAIL)
7626 goto failure;
7627 info->imm.value = val;
7628 break;
7629
7630 case AARCH64_OPND_SME_ZA_array:
7631 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7632 &info->indexed_za, &qualifier, 0))
7633 goto failure;
7634 info->qualifier = qualifier;
7635 break;
7636
7637 case AARCH64_OPND_MOPS_ADDR_Rd:
7638 case AARCH64_OPND_MOPS_ADDR_Rs:
7639 po_char_or_fail ('[');
7640 if (!parse_x0_to_x30 (&str, info))
7641 goto failure;
7642 po_char_or_fail (']');
7643 po_char_or_fail ('!');
7644 break;
7645
7646 case AARCH64_OPND_MOPS_WB_Rn:
7647 if (!parse_x0_to_x30 (&str, info))
7648 goto failure;
7649 po_char_or_fail ('!');
7650 break;
7651
7652 default:
7653 as_fatal (_("unhandled operand code %d"), operands[i]);
7654 }
7655
7656 /* If we get here, this operand was successfully parsed. */
7657 inst.base.operands[i].present = 1;
7658 continue;
7659
7660 failure:
7661 /* The parse routine should already have set the error, but in case
7662 not, set a default one here. */
7663 if (! error_p ())
7664 set_default_error ();
7665
7666 if (! backtrack_pos)
7667 goto parse_operands_return;
7668
7669 {
7670 /* We reach here because this operand is marked as optional, and
7671 either no operand was supplied or the operand was supplied but it
7672 was syntactically incorrect. In the latter case we report an
7673 error. In the former case we perform a few more checks before
7674 dropping through to the code to insert the default operand. */
7675
7676 char *tmp = backtrack_pos;
7677 char endchar = END_OF_INSN;
7678
7679 if (i != (aarch64_num_of_operands (opcode) - 1))
7680 endchar = ',';
7681 skip_past_char (&tmp, ',');
7682
7683 if (*tmp != endchar)
7684 /* The user has supplied an operand in the wrong format. */
7685 goto parse_operands_return;
7686
7687 /* Make sure there is not a comma before the optional operand.
7688 For example the fifth operand of 'sys' is optional:
7689
7690 sys #0,c0,c0,#0, <--- wrong
7691 sys #0,c0,c0,#0 <--- correct. */
7692 if (comma_skipped_p && i && endchar == END_OF_INSN)
7693 {
7694 set_fatal_syntax_error
7695 (_("unexpected comma before the omitted optional operand"));
7696 goto parse_operands_return;
7697 }
7698 }
7699
7700 /* Reaching here means we are dealing with an optional operand that is
7701 omitted from the assembly line. */
7702 gas_assert (optional_operand_p (opcode, i));
7703 info->present = 0;
7704 process_omitted_operand (operands[i], opcode, i, info);
7705
7706 /* Try again, skipping the optional operand at backtrack_pos. */
7707 str = backtrack_pos;
7708 backtrack_pos = 0;
7709
7710 /* Clear any error record after the omitted optional operand has been
7711 successfully handled. */
7712 clear_error ();
7713 }
7714
7715 /* Check if we have parsed all the operands. */
7716 if (*str != '\0' && ! error_p ())
7717 {
7718 /* Set I to the index of the last present operand; this is
7719 for the purpose of diagnostics. */
7720 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7721 ;
7722 set_fatal_syntax_error
7723 (_("unexpected characters following instruction"));
7724 }
7725
7726 parse_operands_return:
7727
7728 if (error_p ())
7729 {
7730 inst.parsing_error.index = i;
7731 DEBUG_TRACE ("parsing FAIL: %s - %s",
7732 operand_mismatch_kind_names[inst.parsing_error.kind],
7733 inst.parsing_error.error);
7734 /* Record the operand error properly; this is useful when there
7735 are multiple instruction templates for a mnemonic name, so that
7736 later on, we can select the error that most closely describes
7737 the problem. */
7738 record_operand_error_info (opcode, &inst.parsing_error);
7739 return false;
7740 }
7741 else
7742 {
7743 DEBUG_TRACE ("parsing SUCCESS");
7744 return true;
7745 }
7746 }
7747
7748 /* It does some fix-up to provide some programmer friendly feature while
7749 keeping the libopcodes happy, i.e. libopcodes only accepts
7750 the preferred architectural syntax.
7751 Return FALSE if there is any failure; otherwise return TRUE. */
7752
7753 static bool
7754 programmer_friendly_fixup (aarch64_instruction *instr)
7755 {
7756 aarch64_inst *base = &instr->base;
7757 const aarch64_opcode *opcode = base->opcode;
7758 enum aarch64_op op = opcode->op;
7759 aarch64_opnd_info *operands = base->operands;
7760
7761 DEBUG_TRACE ("enter");
7762
7763 switch (opcode->iclass)
7764 {
7765 case testbranch:
7766 /* TBNZ Xn|Wn, #uimm6, label
7767 Test and Branch Not Zero: conditionally jumps to label if bit number
7768 uimm6 in register Xn is not zero. The bit number implies the width of
7769 the register, which may be written and should be disassembled as Wn if
7770 uimm is less than 32. */
7771 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7772 {
7773 if (operands[1].imm.value >= 32)
7774 {
7775 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7776 0, 31);
7777 return false;
7778 }
7779 operands[0].qualifier = AARCH64_OPND_QLF_X;
7780 }
7781 break;
7782 case loadlit:
7783 /* LDR Wt, label | =value
7784 As a convenience assemblers will typically permit the notation
7785 "=value" in conjunction with the pc-relative literal load instructions
7786 to automatically place an immediate value or symbolic address in a
7787 nearby literal pool and generate a hidden label which references it.
7788 ISREG has been set to 0 in the case of =value. */
7789 if (instr->gen_lit_pool
7790 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7791 {
7792 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7793 if (op == OP_LDRSW_LIT)
7794 size = 4;
7795 if (instr->reloc.exp.X_op != O_constant
7796 && instr->reloc.exp.X_op != O_big
7797 && instr->reloc.exp.X_op != O_symbol)
7798 {
7799 record_operand_error (opcode, 1,
7800 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7801 _("constant expression expected"));
7802 return false;
7803 }
7804 if (! add_to_lit_pool (&instr->reloc.exp, size))
7805 {
7806 record_operand_error (opcode, 1,
7807 AARCH64_OPDE_OTHER_ERROR,
7808 _("literal pool insertion failed"));
7809 return false;
7810 }
7811 }
7812 break;
7813 case log_shift:
7814 case bitfield:
7815 /* UXT[BHW] Wd, Wn
7816 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7817 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7818 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7819 A programmer-friendly assembler should accept a destination Xd in
7820 place of Wd, however that is not the preferred form for disassembly.
7821 */
7822 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7823 && operands[1].qualifier == AARCH64_OPND_QLF_W
7824 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7825 operands[0].qualifier = AARCH64_OPND_QLF_W;
7826 break;
7827
7828 case addsub_ext:
7829 {
7830 /* In the 64-bit form, the final register operand is written as Wm
7831 for all but the (possibly omitted) UXTX/LSL and SXTX
7832 operators.
7833 As a programmer-friendly assembler, we accept e.g.
7834 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7835 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7836 int idx = aarch64_operand_index (opcode->operands,
7837 AARCH64_OPND_Rm_EXT);
7838 gas_assert (idx == 1 || idx == 2);
7839 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7840 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7841 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7842 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7843 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7844 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7845 }
7846 break;
7847
7848 default:
7849 break;
7850 }
7851
7852 DEBUG_TRACE ("exit with SUCCESS");
7853 return true;
7854 }
7855
7856 /* Check for loads and stores that will cause unpredictable behavior. */
7857
7858 static void
7859 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7860 {
7861 aarch64_inst *base = &instr->base;
7862 const aarch64_opcode *opcode = base->opcode;
7863 const aarch64_opnd_info *opnds = base->operands;
7864 switch (opcode->iclass)
7865 {
7866 case ldst_pos:
7867 case ldst_imm9:
7868 case ldst_imm10:
7869 case ldst_unscaled:
7870 case ldst_unpriv:
7871 /* Loading/storing the base register is unpredictable if writeback. */
7872 if ((aarch64_get_operand_class (opnds[0].type)
7873 == AARCH64_OPND_CLASS_INT_REG)
7874 && opnds[0].reg.regno == opnds[1].addr.base_regno
7875 && opnds[1].addr.base_regno != REG_SP
7876 /* Exempt STG/STZG/ST2G/STZ2G. */
7877 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7878 && opnds[1].addr.writeback)
7879 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7880 break;
7881
7882 case ldstpair_off:
7883 case ldstnapair_offs:
7884 case ldstpair_indexed:
7885 /* Loading/storing the base register is unpredictable if writeback. */
7886 if ((aarch64_get_operand_class (opnds[0].type)
7887 == AARCH64_OPND_CLASS_INT_REG)
7888 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7889 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7890 && opnds[2].addr.base_regno != REG_SP
7891 /* Exempt STGP. */
7892 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7893 && opnds[2].addr.writeback)
7894 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7895 /* Load operations must load different registers. */
7896 if ((opcode->opcode & (1 << 22))
7897 && opnds[0].reg.regno == opnds[1].reg.regno)
7898 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7899 break;
7900
7901 case ldstexcl:
7902 if ((aarch64_get_operand_class (opnds[0].type)
7903 == AARCH64_OPND_CLASS_INT_REG)
7904 && (aarch64_get_operand_class (opnds[1].type)
7905 == AARCH64_OPND_CLASS_INT_REG))
7906 {
7907 if ((opcode->opcode & (1 << 22)))
7908 {
7909 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7910 if ((opcode->opcode & (1 << 21))
7911 && opnds[0].reg.regno == opnds[1].reg.regno)
7912 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7913 }
7914 else
7915 {
7916 /* Store-Exclusive is unpredictable if Rt == Rs. */
7917 if (opnds[0].reg.regno == opnds[1].reg.regno)
7918 as_warn
7919 (_("unpredictable: identical transfer and status registers"
7920 " --`%s'"),str);
7921
7922 if (opnds[0].reg.regno == opnds[2].reg.regno)
7923 {
7924 if (!(opcode->opcode & (1 << 21)))
7925 /* Store-Exclusive is unpredictable if Rn == Rs. */
7926 as_warn
7927 (_("unpredictable: identical base and status registers"
7928 " --`%s'"),str);
7929 else
7930 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7931 as_warn
7932 (_("unpredictable: "
7933 "identical transfer and status registers"
7934 " --`%s'"),str);
7935 }
7936
7937 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7938 if ((opcode->opcode & (1 << 21))
7939 && opnds[0].reg.regno == opnds[3].reg.regno
7940 && opnds[3].reg.regno != REG_SP)
7941 as_warn (_("unpredictable: identical base and status registers"
7942 " --`%s'"),str);
7943 }
7944 }
7945 break;
7946
7947 default:
7948 break;
7949 }
7950 }
7951
7952 static void
7953 force_automatic_sequence_close (void)
7954 {
7955 struct aarch64_segment_info_type *tc_seg_info;
7956
7957 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7958 if (tc_seg_info->insn_sequence.instr)
7959 {
7960 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7961 _("previous `%s' sequence has not been closed"),
7962 tc_seg_info->insn_sequence.instr->opcode->name);
7963 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7964 }
7965 }
7966
7967 /* A wrapper function to interface with libopcodes on encoding and
7968 record the error message if there is any.
7969
7970 Return TRUE on success; otherwise return FALSE. */
7971
7972 static bool
7973 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7974 aarch64_insn *code)
7975 {
7976 aarch64_operand_error error_info;
7977 memset (&error_info, '\0', sizeof (error_info));
7978 error_info.kind = AARCH64_OPDE_NIL;
7979 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7980 && !error_info.non_fatal)
7981 return true;
7982
7983 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7984 record_operand_error_info (opcode, &error_info);
7985 return error_info.non_fatal;
7986 }
7987
7988 #ifdef DEBUG_AARCH64
7989 static inline void
7990 dump_opcode_operands (const aarch64_opcode *opcode)
7991 {
7992 int i = 0;
7993 while (opcode->operands[i] != AARCH64_OPND_NIL)
7994 {
7995 aarch64_verbose ("\t\t opnd%d: %s", i,
7996 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7997 ? aarch64_get_operand_name (opcode->operands[i])
7998 : aarch64_get_operand_desc (opcode->operands[i]));
7999 ++i;
8000 }
8001 }
8002 #endif /* DEBUG_AARCH64 */
8003
8004 /* This is the guts of the machine-dependent assembler. STR points to a
8005 machine dependent instruction. This function is supposed to emit
8006 the frags/bytes it assembles to. */
8007
8008 void
8009 md_assemble (char *str)
8010 {
8011 templates *template;
8012 const aarch64_opcode *opcode;
8013 struct aarch64_segment_info_type *tc_seg_info;
8014 aarch64_inst *inst_base;
8015 unsigned saved_cond;
8016
8017 /* Align the previous label if needed. */
8018 if (last_label_seen != NULL)
8019 {
8020 symbol_set_frag (last_label_seen, frag_now);
8021 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8022 S_SET_SEGMENT (last_label_seen, now_seg);
8023 }
8024
8025 /* Update the current insn_sequence from the segment. */
8026 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8027 insn_sequence = &tc_seg_info->insn_sequence;
8028 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8029
8030 inst.reloc.type = BFD_RELOC_UNUSED;
8031
8032 DEBUG_TRACE ("\n\n");
8033 DEBUG_TRACE ("==============================");
8034 DEBUG_TRACE ("Enter md_assemble with %s", str);
8035
8036 /* Scan up to the end of the mnemonic, which must end in whitespace,
8037 '.', or end of string. */
8038 char *p = str;
8039 char *dot = 0;
8040 for (; is_part_of_name (*p); p++)
8041 if (*p == '.' && !dot)
8042 dot = p;
8043
8044 if (p == str)
8045 {
8046 as_bad (_("unknown mnemonic -- `%s'"), str);
8047 return;
8048 }
8049
8050 if (!dot && create_register_alias (str, p))
8051 return;
8052
8053 template = opcode_lookup (str, dot, p);
8054 if (!template)
8055 {
8056 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8057 str);
8058 return;
8059 }
8060
8061 skip_whitespace (p);
8062 if (*p == ',')
8063 {
8064 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8065 get_mnemonic_name (str), str);
8066 return;
8067 }
8068
8069 init_operand_error_report ();
8070
8071 /* Sections are assumed to start aligned. In executable section, there is no
8072 MAP_DATA symbol pending. So we only align the address during
8073 MAP_DATA --> MAP_INSN transition.
8074 For other sections, this is not guaranteed. */
8075 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8076 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8077 frag_align_code (2, 0);
8078
8079 saved_cond = inst.cond;
8080 reset_aarch64_instruction (&inst);
8081 inst.cond = saved_cond;
8082
8083 /* Iterate through all opcode entries with the same mnemonic name. */
8084 do
8085 {
8086 opcode = template->opcode;
8087
8088 DEBUG_TRACE ("opcode %s found", opcode->name);
8089 #ifdef DEBUG_AARCH64
8090 if (debug_dump)
8091 dump_opcode_operands (opcode);
8092 #endif /* DEBUG_AARCH64 */
8093
8094 mapping_state (MAP_INSN);
8095
8096 inst_base = &inst.base;
8097 inst_base->opcode = opcode;
8098
8099 /* Truly conditionally executed instructions, e.g. b.cond. */
8100 if (opcode->flags & F_COND)
8101 {
8102 gas_assert (inst.cond != COND_ALWAYS);
8103 inst_base->cond = get_cond_from_value (inst.cond);
8104 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8105 }
8106 else if (inst.cond != COND_ALWAYS)
8107 {
8108 /* It shouldn't arrive here, where the assembly looks like a
8109 conditional instruction but the found opcode is unconditional. */
8110 gas_assert (0);
8111 continue;
8112 }
8113
8114 if (parse_operands (p, opcode)
8115 && programmer_friendly_fixup (&inst)
8116 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8117 {
8118 /* Check that this instruction is supported for this CPU. */
8119 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8120 {
8121 as_bad (_("selected processor does not support `%s'"), str);
8122 return;
8123 }
8124
8125 warn_unpredictable_ldst (&inst, str);
8126
8127 if (inst.reloc.type == BFD_RELOC_UNUSED
8128 || !inst.reloc.need_libopcodes_p)
8129 output_inst (NULL);
8130 else
8131 {
8132 /* If there is relocation generated for the instruction,
8133 store the instruction information for the future fix-up. */
8134 struct aarch64_inst *copy;
8135 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8136 copy = XNEW (struct aarch64_inst);
8137 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8138 output_inst (copy);
8139 }
8140
8141 /* Issue non-fatal messages if any. */
8142 output_operand_error_report (str, true);
8143 return;
8144 }
8145
8146 template = template->next;
8147 if (template != NULL)
8148 {
8149 reset_aarch64_instruction (&inst);
8150 inst.cond = saved_cond;
8151 }
8152 }
8153 while (template != NULL);
8154
8155 /* Issue the error messages if any. */
8156 output_operand_error_report (str, false);
8157 }
8158
8159 /* Various frobbings of labels and their addresses. */
8160
8161 void
8162 aarch64_start_line_hook (void)
8163 {
8164 last_label_seen = NULL;
8165 }
8166
8167 void
8168 aarch64_frob_label (symbolS * sym)
8169 {
8170 last_label_seen = sym;
8171
8172 dwarf2_emit_label (sym);
8173 }
8174
8175 void
8176 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8177 {
8178 /* Check to see if we have a block to close. */
8179 force_automatic_sequence_close ();
8180 }
8181
8182 int
8183 aarch64_data_in_code (void)
8184 {
8185 if (startswith (input_line_pointer + 1, "data:"))
8186 {
8187 *input_line_pointer = '/';
8188 input_line_pointer += 5;
8189 *input_line_pointer = 0;
8190 return 1;
8191 }
8192
8193 return 0;
8194 }
8195
8196 char *
8197 aarch64_canonicalize_symbol_name (char *name)
8198 {
8199 int len;
8200
8201 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8202 *(name + len - 5) = 0;
8203
8204 return name;
8205 }
8206 \f
8207 /* Table of all register names defined by default. The user can
8208 define additional names with .req. Note that all register names
8209 should appear in both upper and lowercase variants. Some registers
8210 also have mixed-case names. */
8211
8212 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8213 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8214 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8215 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8216 #define REGSET16(p,t) \
8217 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8218 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8219 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8220 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8221 #define REGSET16S(p,s,t) \
8222 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8223 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8224 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8225 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8226 #define REGSET31(p,t) \
8227 REGSET16(p, t), \
8228 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8229 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8230 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8231 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8232 #define REGSET(p,t) \
8233 REGSET31(p,t), REGNUM(p,31,t)
8234
8235 /* These go into aarch64_reg_hsh hash-table. */
8236 static const reg_entry reg_names[] = {
8237 /* Integer registers. */
8238 REGSET31 (x, R_64), REGSET31 (X, R_64),
8239 REGSET31 (w, R_32), REGSET31 (W, R_32),
8240
8241 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8242 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8243 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8244 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8245 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8246 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8247
8248 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8249 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8250
8251 /* Floating-point single precision registers. */
8252 REGSET (s, FP_S), REGSET (S, FP_S),
8253
8254 /* Floating-point double precision registers. */
8255 REGSET (d, FP_D), REGSET (D, FP_D),
8256
8257 /* Floating-point half precision registers. */
8258 REGSET (h, FP_H), REGSET (H, FP_H),
8259
8260 /* Floating-point byte precision registers. */
8261 REGSET (b, FP_B), REGSET (B, FP_B),
8262
8263 /* Floating-point quad precision registers. */
8264 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8265
8266 /* FP/SIMD registers. */
8267 REGSET (v, VN), REGSET (V, VN),
8268
8269 /* SVE vector registers. */
8270 REGSET (z, ZN), REGSET (Z, ZN),
8271
8272 /* SVE predicate registers. */
8273 REGSET16 (p, PN), REGSET16 (P, PN),
8274
8275 /* SME ZA. We model this as a register because it acts syntactically
8276 like ZA0H, supporting qualifier suffixes and indexing. */
8277 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8278
8279 /* SME ZA tile registers. */
8280 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8281
8282 /* SME ZA tile registers (horizontal slice). */
8283 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8284
8285 /* SME ZA tile registers (vertical slice). */
8286 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8287 };
8288
8289 #undef REGDEF
8290 #undef REGDEF_ALIAS
8291 #undef REGNUM
8292 #undef REGSET16
8293 #undef REGSET31
8294 #undef REGSET
8295
8296 #define N 1
8297 #define n 0
8298 #define Z 1
8299 #define z 0
8300 #define C 1
8301 #define c 0
8302 #define V 1
8303 #define v 0
8304 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8305 static const asm_nzcv nzcv_names[] = {
8306 {"nzcv", B (n, z, c, v)},
8307 {"nzcV", B (n, z, c, V)},
8308 {"nzCv", B (n, z, C, v)},
8309 {"nzCV", B (n, z, C, V)},
8310 {"nZcv", B (n, Z, c, v)},
8311 {"nZcV", B (n, Z, c, V)},
8312 {"nZCv", B (n, Z, C, v)},
8313 {"nZCV", B (n, Z, C, V)},
8314 {"Nzcv", B (N, z, c, v)},
8315 {"NzcV", B (N, z, c, V)},
8316 {"NzCv", B (N, z, C, v)},
8317 {"NzCV", B (N, z, C, V)},
8318 {"NZcv", B (N, Z, c, v)},
8319 {"NZcV", B (N, Z, c, V)},
8320 {"NZCv", B (N, Z, C, v)},
8321 {"NZCV", B (N, Z, C, V)}
8322 };
8323
8324 #undef N
8325 #undef n
8326 #undef Z
8327 #undef z
8328 #undef C
8329 #undef c
8330 #undef V
8331 #undef v
8332 #undef B
8333 \f
8334 /* MD interface: bits in the object file. */
8335
8336 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8337 for use in the a.out file, and stores them in the array pointed to by buf.
8338 This knows about the endian-ness of the target machine and does
8339 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8340 2 (short) and 4 (long) Floating numbers are put out as a series of
8341 LITTLENUMS (shorts, here at least). */
8342
8343 void
8344 md_number_to_chars (char *buf, valueT val, int n)
8345 {
8346 if (target_big_endian)
8347 number_to_chars_bigendian (buf, val, n);
8348 else
8349 number_to_chars_littleendian (buf, val, n);
8350 }
8351
8352 /* MD interface: Sections. */
8353
8354 /* Estimate the size of a frag before relaxing. Assume everything fits in
8355 4 bytes. */
8356
8357 int
8358 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8359 {
8360 fragp->fr_var = 4;
8361 return 4;
8362 }
8363
8364 /* Round up a section size to the appropriate boundary. */
8365
8366 valueT
8367 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8368 {
8369 return size;
8370 }
8371
8372 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8373 of an rs_align_code fragment.
8374
8375 Here we fill the frag with the appropriate info for padding the
8376 output stream. The resulting frag will consist of a fixed (fr_fix)
8377 and of a repeating (fr_var) part.
8378
8379 The fixed content is always emitted before the repeating content and
8380 these two parts are used as follows in constructing the output:
8381 - the fixed part will be used to align to a valid instruction word
8382 boundary, in case that we start at a misaligned address; as no
8383 executable instruction can live at the misaligned location, we
8384 simply fill with zeros;
8385 - the variable part will be used to cover the remaining padding and
8386 we fill using the AArch64 NOP instruction.
8387
8388 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8389 enough storage space for up to 3 bytes for padding the back to a valid
8390 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8391
8392 void
8393 aarch64_handle_align (fragS * fragP)
8394 {
8395 /* NOP = d503201f */
8396 /* AArch64 instructions are always little-endian. */
8397 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8398
8399 int bytes, fix, noop_size;
8400 char *p;
8401
8402 if (fragP->fr_type != rs_align_code)
8403 return;
8404
8405 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8406 p = fragP->fr_literal + fragP->fr_fix;
8407
8408 #ifdef OBJ_ELF
8409 gas_assert (fragP->tc_frag_data.recorded);
8410 #endif
8411
8412 noop_size = sizeof (aarch64_noop);
8413
8414 fix = bytes & (noop_size - 1);
8415 if (fix)
8416 {
8417 #if defined OBJ_ELF || defined OBJ_COFF
8418 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8419 #endif
8420 memset (p, 0, fix);
8421 p += fix;
8422 fragP->fr_fix += fix;
8423 }
8424
8425 if (noop_size)
8426 memcpy (p, aarch64_noop, noop_size);
8427 fragP->fr_var = noop_size;
8428 }
8429
8430 /* Perform target specific initialisation of a frag.
8431 Note - despite the name this initialisation is not done when the frag
8432 is created, but only when its type is assigned. A frag can be created
8433 and used a long time before its type is set, so beware of assuming that
8434 this initialisation is performed first. */
8435
8436 #ifndef OBJ_ELF
8437 void
8438 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8439 int max_chars ATTRIBUTE_UNUSED)
8440 {
8441 }
8442
8443 #else /* OBJ_ELF is defined. */
8444 void
8445 aarch64_init_frag (fragS * fragP, int max_chars)
8446 {
8447 /* Record a mapping symbol for alignment frags. We will delete this
8448 later if the alignment ends up empty. */
8449 if (!fragP->tc_frag_data.recorded)
8450 fragP->tc_frag_data.recorded = 1;
8451
8452 /* PR 21809: Do not set a mapping state for debug sections
8453 - it just confuses other tools. */
8454 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8455 return;
8456
8457 switch (fragP->fr_type)
8458 {
8459 case rs_align_test:
8460 case rs_fill:
8461 mapping_state_2 (MAP_DATA, max_chars);
8462 break;
8463 case rs_align:
8464 /* PR 20364: We can get alignment frags in code sections,
8465 so do not just assume that we should use the MAP_DATA state. */
8466 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8467 break;
8468 case rs_align_code:
8469 mapping_state_2 (MAP_INSN, max_chars);
8470 break;
8471 default:
8472 break;
8473 }
8474 }
8475
8476 /* Whether SFrame stack trace info is supported. */
8477
8478 bool
8479 aarch64_support_sframe_p (void)
8480 {
8481 /* At this time, SFrame is supported for aarch64 only. */
8482 return (aarch64_abi == AARCH64_ABI_LP64);
8483 }
8484
8485 /* Specify if RA tracking is needed. */
8486
8487 bool
8488 aarch64_sframe_ra_tracking_p (void)
8489 {
8490 return true;
8491 }
8492
8493 /* Specify the fixed offset to recover RA from CFA.
8494 (useful only when RA tracking is not needed). */
8495
8496 offsetT
8497 aarch64_sframe_cfa_ra_offset (void)
8498 {
8499 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8500 }
8501
8502 /* Get the abi/arch indentifier for SFrame. */
8503
8504 unsigned char
8505 aarch64_sframe_get_abi_arch (void)
8506 {
8507 unsigned char sframe_abi_arch = 0;
8508
8509 if (aarch64_support_sframe_p ())
8510 {
8511 sframe_abi_arch = target_big_endian
8512 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8513 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8514 }
8515
8516 return sframe_abi_arch;
8517 }
8518
8519 #endif /* OBJ_ELF */
8520 \f
8521 /* Initialize the DWARF-2 unwind information for this procedure. */
8522
8523 void
8524 tc_aarch64_frame_initial_instructions (void)
8525 {
8526 cfi_add_CFA_def_cfa (REG_SP, 0);
8527 }
8528
8529 /* Convert REGNAME to a DWARF-2 register number. */
8530
8531 int
8532 tc_aarch64_regname_to_dw2regnum (char *regname)
8533 {
8534 const reg_entry *reg = parse_reg (&regname);
8535 if (reg == NULL)
8536 return -1;
8537
8538 switch (reg->type)
8539 {
8540 case REG_TYPE_SP_32:
8541 case REG_TYPE_SP_64:
8542 case REG_TYPE_R_32:
8543 case REG_TYPE_R_64:
8544 return reg->number;
8545
8546 case REG_TYPE_FP_B:
8547 case REG_TYPE_FP_H:
8548 case REG_TYPE_FP_S:
8549 case REG_TYPE_FP_D:
8550 case REG_TYPE_FP_Q:
8551 return reg->number + 64;
8552
8553 default:
8554 break;
8555 }
8556 return -1;
8557 }
8558
8559 /* Implement DWARF2_ADDR_SIZE. */
8560
8561 int
8562 aarch64_dwarf2_addr_size (void)
8563 {
8564 if (ilp32_p)
8565 return 4;
8566 else if (llp64_p)
8567 return 8;
8568 return bfd_arch_bits_per_address (stdoutput) / 8;
8569 }
8570
8571 /* MD interface: Symbol and relocation handling. */
8572
8573 /* Return the address within the segment that a PC-relative fixup is
8574 relative to. For AArch64 PC-relative fixups applied to instructions
8575 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8576
8577 long
8578 md_pcrel_from_section (fixS * fixP, segT seg)
8579 {
8580 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8581
8582 /* If this is pc-relative and we are going to emit a relocation
8583 then we just want to put out any pipeline compensation that the linker
8584 will need. Otherwise we want to use the calculated base. */
8585 if (fixP->fx_pcrel
8586 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8587 || aarch64_force_relocation (fixP)))
8588 base = 0;
8589
8590 /* AArch64 should be consistent for all pc-relative relocations. */
8591 return base + AARCH64_PCREL_OFFSET;
8592 }
8593
8594 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8595 Otherwise we have no need to default values of symbols. */
8596
8597 symbolS *
8598 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8599 {
8600 #ifdef OBJ_ELF
8601 if (name[0] == '_' && name[1] == 'G'
8602 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8603 {
8604 if (!GOT_symbol)
8605 {
8606 if (symbol_find (name))
8607 as_bad (_("GOT already in the symbol table"));
8608
8609 GOT_symbol = symbol_new (name, undefined_section,
8610 &zero_address_frag, 0);
8611 }
8612
8613 return GOT_symbol;
8614 }
8615 #endif
8616
8617 return 0;
8618 }
8619
8620 /* Return non-zero if the indicated VALUE has overflowed the maximum
8621 range expressible by a unsigned number with the indicated number of
8622 BITS. */
8623
8624 static bool
8625 unsigned_overflow (valueT value, unsigned bits)
8626 {
8627 valueT lim;
8628 if (bits >= sizeof (valueT) * 8)
8629 return false;
8630 lim = (valueT) 1 << bits;
8631 return (value >= lim);
8632 }
8633
8634
8635 /* Return non-zero if the indicated VALUE has overflowed the maximum
8636 range expressible by an signed number with the indicated number of
8637 BITS. */
8638
8639 static bool
8640 signed_overflow (offsetT value, unsigned bits)
8641 {
8642 offsetT lim;
8643 if (bits >= sizeof (offsetT) * 8)
8644 return false;
8645 lim = (offsetT) 1 << (bits - 1);
8646 return (value < -lim || value >= lim);
8647 }
8648
8649 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8650 unsigned immediate offset load/store instruction, try to encode it as
8651 an unscaled, 9-bit, signed immediate offset load/store instruction.
8652 Return TRUE if it is successful; otherwise return FALSE.
8653
8654 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8655 in response to the standard LDR/STR mnemonics when the immediate offset is
8656 unambiguous, i.e. when it is negative or unaligned. */
8657
8658 static bool
8659 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8660 {
8661 int idx;
8662 enum aarch64_op new_op;
8663 const aarch64_opcode *new_opcode;
8664
8665 gas_assert (instr->opcode->iclass == ldst_pos);
8666
8667 switch (instr->opcode->op)
8668 {
8669 case OP_LDRB_POS:new_op = OP_LDURB; break;
8670 case OP_STRB_POS: new_op = OP_STURB; break;
8671 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8672 case OP_LDRH_POS: new_op = OP_LDURH; break;
8673 case OP_STRH_POS: new_op = OP_STURH; break;
8674 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8675 case OP_LDR_POS: new_op = OP_LDUR; break;
8676 case OP_STR_POS: new_op = OP_STUR; break;
8677 case OP_LDRF_POS: new_op = OP_LDURV; break;
8678 case OP_STRF_POS: new_op = OP_STURV; break;
8679 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8680 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8681 default: new_op = OP_NIL; break;
8682 }
8683
8684 if (new_op == OP_NIL)
8685 return false;
8686
8687 new_opcode = aarch64_get_opcode (new_op);
8688 gas_assert (new_opcode != NULL);
8689
8690 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8691 instr->opcode->op, new_opcode->op);
8692
8693 aarch64_replace_opcode (instr, new_opcode);
8694
8695 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8696 qualifier matching may fail because the out-of-date qualifier will
8697 prevent the operand being updated with a new and correct qualifier. */
8698 idx = aarch64_operand_index (instr->opcode->operands,
8699 AARCH64_OPND_ADDR_SIMM9);
8700 gas_assert (idx == 1);
8701 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8702
8703 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8704
8705 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8706 insn_sequence))
8707 return false;
8708
8709 return true;
8710 }
8711
8712 /* Called by fix_insn to fix a MOV immediate alias instruction.
8713
8714 Operand for a generic move immediate instruction, which is an alias
8715 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8716 a 32-bit/64-bit immediate value into general register. An assembler error
8717 shall result if the immediate cannot be created by a single one of these
8718 instructions. If there is a choice, then to ensure reversability an
8719 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8720
8721 static void
8722 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8723 {
8724 const aarch64_opcode *opcode;
8725
8726 /* Need to check if the destination is SP/ZR. The check has to be done
8727 before any aarch64_replace_opcode. */
8728 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8729 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8730
8731 instr->operands[1].imm.value = value;
8732 instr->operands[1].skip = 0;
8733
8734 if (try_mov_wide_p)
8735 {
8736 /* Try the MOVZ alias. */
8737 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8738 aarch64_replace_opcode (instr, opcode);
8739 if (aarch64_opcode_encode (instr->opcode, instr,
8740 &instr->value, NULL, NULL, insn_sequence))
8741 {
8742 put_aarch64_insn (buf, instr->value);
8743 return;
8744 }
8745 /* Try the MOVK alias. */
8746 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8747 aarch64_replace_opcode (instr, opcode);
8748 if (aarch64_opcode_encode (instr->opcode, instr,
8749 &instr->value, NULL, NULL, insn_sequence))
8750 {
8751 put_aarch64_insn (buf, instr->value);
8752 return;
8753 }
8754 }
8755
8756 if (try_mov_bitmask_p)
8757 {
8758 /* Try the ORR alias. */
8759 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8760 aarch64_replace_opcode (instr, opcode);
8761 if (aarch64_opcode_encode (instr->opcode, instr,
8762 &instr->value, NULL, NULL, insn_sequence))
8763 {
8764 put_aarch64_insn (buf, instr->value);
8765 return;
8766 }
8767 }
8768
8769 as_bad_where (fixP->fx_file, fixP->fx_line,
8770 _("immediate cannot be moved by a single instruction"));
8771 }
8772
8773 /* An instruction operand which is immediate related may have symbol used
8774 in the assembly, e.g.
8775
8776 mov w0, u32
8777 .set u32, 0x00ffff00
8778
8779 At the time when the assembly instruction is parsed, a referenced symbol,
8780 like 'u32' in the above example may not have been seen; a fixS is created
8781 in such a case and is handled here after symbols have been resolved.
8782 Instruction is fixed up with VALUE using the information in *FIXP plus
8783 extra information in FLAGS.
8784
8785 This function is called by md_apply_fix to fix up instructions that need
8786 a fix-up described above but does not involve any linker-time relocation. */
8787
8788 static void
8789 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8790 {
8791 int idx;
8792 uint32_t insn;
8793 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8794 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8795 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8796
8797 if (new_inst)
8798 {
8799 /* Now the instruction is about to be fixed-up, so the operand that
8800 was previously marked as 'ignored' needs to be unmarked in order
8801 to get the encoding done properly. */
8802 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8803 new_inst->operands[idx].skip = 0;
8804 }
8805
8806 gas_assert (opnd != AARCH64_OPND_NIL);
8807
8808 switch (opnd)
8809 {
8810 case AARCH64_OPND_EXCEPTION:
8811 case AARCH64_OPND_UNDEFINED:
8812 if (unsigned_overflow (value, 16))
8813 as_bad_where (fixP->fx_file, fixP->fx_line,
8814 _("immediate out of range"));
8815 insn = get_aarch64_insn (buf);
8816 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8817 put_aarch64_insn (buf, insn);
8818 break;
8819
8820 case AARCH64_OPND_AIMM:
8821 /* ADD or SUB with immediate.
8822 NOTE this assumes we come here with a add/sub shifted reg encoding
8823 3 322|2222|2 2 2 21111 111111
8824 1 098|7654|3 2 1 09876 543210 98765 43210
8825 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8826 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8827 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8828 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8829 ->
8830 3 322|2222|2 2 221111111111
8831 1 098|7654|3 2 109876543210 98765 43210
8832 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8833 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8834 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8835 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8836 Fields sf Rn Rd are already set. */
8837 insn = get_aarch64_insn (buf);
8838 if (value < 0)
8839 {
8840 /* Add <-> sub. */
8841 insn = reencode_addsub_switch_add_sub (insn);
8842 value = -value;
8843 }
8844
8845 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8846 && unsigned_overflow (value, 12))
8847 {
8848 /* Try to shift the value by 12 to make it fit. */
8849 if (((value >> 12) << 12) == value
8850 && ! unsigned_overflow (value, 12 + 12))
8851 {
8852 value >>= 12;
8853 insn |= encode_addsub_imm_shift_amount (1);
8854 }
8855 }
8856
8857 if (unsigned_overflow (value, 12))
8858 as_bad_where (fixP->fx_file, fixP->fx_line,
8859 _("immediate out of range"));
8860
8861 insn |= encode_addsub_imm (value);
8862
8863 put_aarch64_insn (buf, insn);
8864 break;
8865
8866 case AARCH64_OPND_SIMD_IMM:
8867 case AARCH64_OPND_SIMD_IMM_SFT:
8868 case AARCH64_OPND_LIMM:
8869 /* Bit mask immediate. */
8870 gas_assert (new_inst != NULL);
8871 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8872 new_inst->operands[idx].imm.value = value;
8873 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8874 &new_inst->value, NULL, NULL, insn_sequence))
8875 put_aarch64_insn (buf, new_inst->value);
8876 else
8877 as_bad_where (fixP->fx_file, fixP->fx_line,
8878 _("invalid immediate"));
8879 break;
8880
8881 case AARCH64_OPND_HALF:
8882 /* 16-bit unsigned immediate. */
8883 if (unsigned_overflow (value, 16))
8884 as_bad_where (fixP->fx_file, fixP->fx_line,
8885 _("immediate out of range"));
8886 insn = get_aarch64_insn (buf);
8887 insn |= encode_movw_imm (value & 0xffff);
8888 put_aarch64_insn (buf, insn);
8889 break;
8890
8891 case AARCH64_OPND_IMM_MOV:
8892 /* Operand for a generic move immediate instruction, which is
8893 an alias instruction that generates a single MOVZ, MOVN or ORR
8894 instruction to loads a 32-bit/64-bit immediate value into general
8895 register. An assembler error shall result if the immediate cannot be
8896 created by a single one of these instructions. If there is a choice,
8897 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8898 and MOVZ or MOVN to ORR. */
8899 gas_assert (new_inst != NULL);
8900 fix_mov_imm_insn (fixP, buf, new_inst, value);
8901 break;
8902
8903 case AARCH64_OPND_ADDR_SIMM7:
8904 case AARCH64_OPND_ADDR_SIMM9:
8905 case AARCH64_OPND_ADDR_SIMM9_2:
8906 case AARCH64_OPND_ADDR_SIMM10:
8907 case AARCH64_OPND_ADDR_UIMM12:
8908 case AARCH64_OPND_ADDR_SIMM11:
8909 case AARCH64_OPND_ADDR_SIMM13:
8910 /* Immediate offset in an address. */
8911 insn = get_aarch64_insn (buf);
8912
8913 gas_assert (new_inst != NULL && new_inst->value == insn);
8914 gas_assert (new_inst->opcode->operands[1] == opnd
8915 || new_inst->opcode->operands[2] == opnd);
8916
8917 /* Get the index of the address operand. */
8918 if (new_inst->opcode->operands[1] == opnd)
8919 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8920 idx = 1;
8921 else
8922 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8923 idx = 2;
8924
8925 /* Update the resolved offset value. */
8926 new_inst->operands[idx].addr.offset.imm = value;
8927
8928 /* Encode/fix-up. */
8929 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8930 &new_inst->value, NULL, NULL, insn_sequence))
8931 {
8932 put_aarch64_insn (buf, new_inst->value);
8933 break;
8934 }
8935 else if (new_inst->opcode->iclass == ldst_pos
8936 && try_to_encode_as_unscaled_ldst (new_inst))
8937 {
8938 put_aarch64_insn (buf, new_inst->value);
8939 break;
8940 }
8941
8942 as_bad_where (fixP->fx_file, fixP->fx_line,
8943 _("immediate offset out of range"));
8944 break;
8945
8946 default:
8947 gas_assert (0);
8948 as_fatal (_("unhandled operand code %d"), opnd);
8949 }
8950 }
8951
8952 /* Apply a fixup (fixP) to segment data, once it has been determined
8953 by our caller that we have all the info we need to fix it up.
8954
8955 Parameter valP is the pointer to the value of the bits. */
8956
8957 void
8958 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8959 {
8960 offsetT value = *valP;
8961 uint32_t insn;
8962 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8963 int scale;
8964 unsigned flags = fixP->fx_addnumber;
8965
8966 DEBUG_TRACE ("\n\n");
8967 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8968 DEBUG_TRACE ("Enter md_apply_fix");
8969
8970 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8971
8972 /* Note whether this will delete the relocation. */
8973
8974 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8975 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8976 fixP->fx_done = 1;
8977
8978 /* Process the relocations. */
8979 switch (fixP->fx_r_type)
8980 {
8981 case BFD_RELOC_NONE:
8982 /* This will need to go in the object file. */
8983 fixP->fx_done = 0;
8984 break;
8985
8986 case BFD_RELOC_8:
8987 case BFD_RELOC_8_PCREL:
8988 if (fixP->fx_done || !seg->use_rela_p)
8989 md_number_to_chars (buf, value, 1);
8990 break;
8991
8992 case BFD_RELOC_16:
8993 case BFD_RELOC_16_PCREL:
8994 if (fixP->fx_done || !seg->use_rela_p)
8995 md_number_to_chars (buf, value, 2);
8996 break;
8997
8998 case BFD_RELOC_32:
8999 case BFD_RELOC_32_PCREL:
9000 if (fixP->fx_done || !seg->use_rela_p)
9001 md_number_to_chars (buf, value, 4);
9002 break;
9003
9004 case BFD_RELOC_64:
9005 case BFD_RELOC_64_PCREL:
9006 if (fixP->fx_done || !seg->use_rela_p)
9007 md_number_to_chars (buf, value, 8);
9008 break;
9009
9010 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9011 /* We claim that these fixups have been processed here, even if
9012 in fact we generate an error because we do not have a reloc
9013 for them, so tc_gen_reloc() will reject them. */
9014 fixP->fx_done = 1;
9015 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9016 {
9017 as_bad_where (fixP->fx_file, fixP->fx_line,
9018 _("undefined symbol %s used as an immediate value"),
9019 S_GET_NAME (fixP->fx_addsy));
9020 goto apply_fix_return;
9021 }
9022 fix_insn (fixP, flags, value);
9023 break;
9024
9025 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9026 if (fixP->fx_done || !seg->use_rela_p)
9027 {
9028 if (value & 3)
9029 as_bad_where (fixP->fx_file, fixP->fx_line,
9030 _("pc-relative load offset not word aligned"));
9031 if (signed_overflow (value, 21))
9032 as_bad_where (fixP->fx_file, fixP->fx_line,
9033 _("pc-relative load offset out of range"));
9034 insn = get_aarch64_insn (buf);
9035 insn |= encode_ld_lit_ofs_19 (value >> 2);
9036 put_aarch64_insn (buf, insn);
9037 }
9038 break;
9039
9040 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9041 if (fixP->fx_done || !seg->use_rela_p)
9042 {
9043 if (signed_overflow (value, 21))
9044 as_bad_where (fixP->fx_file, fixP->fx_line,
9045 _("pc-relative address offset out of range"));
9046 insn = get_aarch64_insn (buf);
9047 insn |= encode_adr_imm (value);
9048 put_aarch64_insn (buf, insn);
9049 }
9050 break;
9051
9052 case BFD_RELOC_AARCH64_BRANCH19:
9053 if (fixP->fx_done || !seg->use_rela_p)
9054 {
9055 if (value & 3)
9056 as_bad_where (fixP->fx_file, fixP->fx_line,
9057 _("conditional branch target not word aligned"));
9058 if (signed_overflow (value, 21))
9059 as_bad_where (fixP->fx_file, fixP->fx_line,
9060 _("conditional branch out of range"));
9061 insn = get_aarch64_insn (buf);
9062 insn |= encode_cond_branch_ofs_19 (value >> 2);
9063 put_aarch64_insn (buf, insn);
9064 }
9065 break;
9066
9067 case BFD_RELOC_AARCH64_TSTBR14:
9068 if (fixP->fx_done || !seg->use_rela_p)
9069 {
9070 if (value & 3)
9071 as_bad_where (fixP->fx_file, fixP->fx_line,
9072 _("conditional branch target not word aligned"));
9073 if (signed_overflow (value, 16))
9074 as_bad_where (fixP->fx_file, fixP->fx_line,
9075 _("conditional branch out of range"));
9076 insn = get_aarch64_insn (buf);
9077 insn |= encode_tst_branch_ofs_14 (value >> 2);
9078 put_aarch64_insn (buf, insn);
9079 }
9080 break;
9081
9082 case BFD_RELOC_AARCH64_CALL26:
9083 case BFD_RELOC_AARCH64_JUMP26:
9084 if (fixP->fx_done || !seg->use_rela_p)
9085 {
9086 if (value & 3)
9087 as_bad_where (fixP->fx_file, fixP->fx_line,
9088 _("branch target not word aligned"));
9089 if (signed_overflow (value, 28))
9090 as_bad_where (fixP->fx_file, fixP->fx_line,
9091 _("branch out of range"));
9092 insn = get_aarch64_insn (buf);
9093 insn |= encode_branch_ofs_26 (value >> 2);
9094 put_aarch64_insn (buf, insn);
9095 }
9096 break;
9097
9098 case BFD_RELOC_AARCH64_MOVW_G0:
9099 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9100 case BFD_RELOC_AARCH64_MOVW_G0_S:
9101 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9102 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9103 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9104 scale = 0;
9105 goto movw_common;
9106 case BFD_RELOC_AARCH64_MOVW_G1:
9107 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9108 case BFD_RELOC_AARCH64_MOVW_G1_S:
9109 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9110 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9111 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9112 scale = 16;
9113 goto movw_common;
9114 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9115 scale = 0;
9116 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9117 /* Should always be exported to object file, see
9118 aarch64_force_relocation(). */
9119 gas_assert (!fixP->fx_done);
9120 gas_assert (seg->use_rela_p);
9121 goto movw_common;
9122 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9123 scale = 16;
9124 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9125 /* Should always be exported to object file, see
9126 aarch64_force_relocation(). */
9127 gas_assert (!fixP->fx_done);
9128 gas_assert (seg->use_rela_p);
9129 goto movw_common;
9130 case BFD_RELOC_AARCH64_MOVW_G2:
9131 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9132 case BFD_RELOC_AARCH64_MOVW_G2_S:
9133 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9134 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9135 scale = 32;
9136 goto movw_common;
9137 case BFD_RELOC_AARCH64_MOVW_G3:
9138 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9139 scale = 48;
9140 movw_common:
9141 if (fixP->fx_done || !seg->use_rela_p)
9142 {
9143 insn = get_aarch64_insn (buf);
9144
9145 if (!fixP->fx_done)
9146 {
9147 /* REL signed addend must fit in 16 bits */
9148 if (signed_overflow (value, 16))
9149 as_bad_where (fixP->fx_file, fixP->fx_line,
9150 _("offset out of range"));
9151 }
9152 else
9153 {
9154 /* Check for overflow and scale. */
9155 switch (fixP->fx_r_type)
9156 {
9157 case BFD_RELOC_AARCH64_MOVW_G0:
9158 case BFD_RELOC_AARCH64_MOVW_G1:
9159 case BFD_RELOC_AARCH64_MOVW_G2:
9160 case BFD_RELOC_AARCH64_MOVW_G3:
9161 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9162 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9163 if (unsigned_overflow (value, scale + 16))
9164 as_bad_where (fixP->fx_file, fixP->fx_line,
9165 _("unsigned value out of range"));
9166 break;
9167 case BFD_RELOC_AARCH64_MOVW_G0_S:
9168 case BFD_RELOC_AARCH64_MOVW_G1_S:
9169 case BFD_RELOC_AARCH64_MOVW_G2_S:
9170 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9171 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9172 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9173 /* NOTE: We can only come here with movz or movn. */
9174 if (signed_overflow (value, scale + 16))
9175 as_bad_where (fixP->fx_file, fixP->fx_line,
9176 _("signed value out of range"));
9177 if (value < 0)
9178 {
9179 /* Force use of MOVN. */
9180 value = ~value;
9181 insn = reencode_movzn_to_movn (insn);
9182 }
9183 else
9184 {
9185 /* Force use of MOVZ. */
9186 insn = reencode_movzn_to_movz (insn);
9187 }
9188 break;
9189 default:
9190 /* Unchecked relocations. */
9191 break;
9192 }
9193 value >>= scale;
9194 }
9195
9196 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9197 insn |= encode_movw_imm (value & 0xffff);
9198
9199 put_aarch64_insn (buf, insn);
9200 }
9201 break;
9202
9203 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9204 fixP->fx_r_type = (ilp32_p
9205 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9206 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9207 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9208 /* Should always be exported to object file, see
9209 aarch64_force_relocation(). */
9210 gas_assert (!fixP->fx_done);
9211 gas_assert (seg->use_rela_p);
9212 break;
9213
9214 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9215 fixP->fx_r_type = (ilp32_p
9216 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9217 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9218 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9219 /* Should always be exported to object file, see
9220 aarch64_force_relocation(). */
9221 gas_assert (!fixP->fx_done);
9222 gas_assert (seg->use_rela_p);
9223 break;
9224
9225 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9226 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9227 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9228 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9229 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9230 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9231 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9232 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9233 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9234 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9235 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9236 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9237 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9238 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9239 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9240 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9241 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9242 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9243 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9244 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9245 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9246 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9247 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9248 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9249 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9250 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9251 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9252 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9253 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9254 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9255 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9256 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9257 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9258 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9259 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9260 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9261 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9262 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9263 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9264 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9265 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9266 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9267 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9268 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9269 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9270 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9271 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9272 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9273 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9274 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9275 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9276 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9277 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9278 /* Should always be exported to object file, see
9279 aarch64_force_relocation(). */
9280 gas_assert (!fixP->fx_done);
9281 gas_assert (seg->use_rela_p);
9282 break;
9283
9284 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9285 /* Should always be exported to object file, see
9286 aarch64_force_relocation(). */
9287 fixP->fx_r_type = (ilp32_p
9288 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9289 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9290 gas_assert (!fixP->fx_done);
9291 gas_assert (seg->use_rela_p);
9292 break;
9293
9294 case BFD_RELOC_AARCH64_ADD_LO12:
9295 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9296 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9297 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9298 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9299 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9300 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9301 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9302 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9303 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9304 case BFD_RELOC_AARCH64_LDST128_LO12:
9305 case BFD_RELOC_AARCH64_LDST16_LO12:
9306 case BFD_RELOC_AARCH64_LDST32_LO12:
9307 case BFD_RELOC_AARCH64_LDST64_LO12:
9308 case BFD_RELOC_AARCH64_LDST8_LO12:
9309 /* Should always be exported to object file, see
9310 aarch64_force_relocation(). */
9311 gas_assert (!fixP->fx_done);
9312 gas_assert (seg->use_rela_p);
9313 break;
9314
9315 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9316 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9317 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9318 break;
9319
9320 case BFD_RELOC_UNUSED:
9321 /* An error will already have been reported. */
9322 break;
9323
9324 case BFD_RELOC_RVA:
9325 case BFD_RELOC_32_SECREL:
9326 case BFD_RELOC_16_SECIDX:
9327 break;
9328
9329 default:
9330 as_bad_where (fixP->fx_file, fixP->fx_line,
9331 _("unexpected %s fixup"),
9332 bfd_get_reloc_code_name (fixP->fx_r_type));
9333 break;
9334 }
9335
9336 apply_fix_return:
9337 /* Free the allocated the struct aarch64_inst.
9338 N.B. currently there are very limited number of fix-up types actually use
9339 this field, so the impact on the performance should be minimal . */
9340 free (fixP->tc_fix_data.inst);
9341
9342 return;
9343 }
9344
9345 /* Translate internal representation of relocation info to BFD target
9346 format. */
9347
9348 arelent *
9349 tc_gen_reloc (asection * section, fixS * fixp)
9350 {
9351 arelent *reloc;
9352 bfd_reloc_code_real_type code;
9353
9354 reloc = XNEW (arelent);
9355
9356 reloc->sym_ptr_ptr = XNEW (asymbol *);
9357 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9358 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9359
9360 if (fixp->fx_pcrel)
9361 {
9362 if (section->use_rela_p)
9363 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9364 else
9365 fixp->fx_offset = reloc->address;
9366 }
9367 reloc->addend = fixp->fx_offset;
9368
9369 code = fixp->fx_r_type;
9370 switch (code)
9371 {
9372 case BFD_RELOC_16:
9373 if (fixp->fx_pcrel)
9374 code = BFD_RELOC_16_PCREL;
9375 break;
9376
9377 case BFD_RELOC_32:
9378 if (fixp->fx_pcrel)
9379 code = BFD_RELOC_32_PCREL;
9380 break;
9381
9382 case BFD_RELOC_64:
9383 if (fixp->fx_pcrel)
9384 code = BFD_RELOC_64_PCREL;
9385 break;
9386
9387 default:
9388 break;
9389 }
9390
9391 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9392 if (reloc->howto == NULL)
9393 {
9394 as_bad_where (fixp->fx_file, fixp->fx_line,
9395 _
9396 ("cannot represent %s relocation in this object file format"),
9397 bfd_get_reloc_code_name (code));
9398 return NULL;
9399 }
9400
9401 return reloc;
9402 }
9403
9404 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9405
9406 void
9407 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9408 {
9409 bfd_reloc_code_real_type type;
9410 int pcrel = 0;
9411
9412 #ifdef TE_PE
9413 if (exp->X_op == O_secrel)
9414 {
9415 exp->X_op = O_symbol;
9416 type = BFD_RELOC_32_SECREL;
9417 }
9418 else if (exp->X_op == O_secidx)
9419 {
9420 exp->X_op = O_symbol;
9421 type = BFD_RELOC_16_SECIDX;
9422 }
9423 else
9424 {
9425 #endif
9426 /* Pick a reloc.
9427 FIXME: @@ Should look at CPU word size. */
9428 switch (size)
9429 {
9430 case 1:
9431 type = BFD_RELOC_8;
9432 break;
9433 case 2:
9434 type = BFD_RELOC_16;
9435 break;
9436 case 4:
9437 type = BFD_RELOC_32;
9438 break;
9439 case 8:
9440 type = BFD_RELOC_64;
9441 break;
9442 default:
9443 as_bad (_("cannot do %u-byte relocation"), size);
9444 type = BFD_RELOC_UNUSED;
9445 break;
9446 }
9447 #ifdef TE_PE
9448 }
9449 #endif
9450
9451 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9452 }
9453
9454 /* Implement md_after_parse_args. This is the earliest time we need to decide
9455 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9456
9457 void
9458 aarch64_after_parse_args (void)
9459 {
9460 if (aarch64_abi != AARCH64_ABI_NONE)
9461 return;
9462
9463 #ifdef OBJ_ELF
9464 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9465 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9466 aarch64_abi = AARCH64_ABI_ILP32;
9467 else
9468 aarch64_abi = AARCH64_ABI_LP64;
9469 #else
9470 aarch64_abi = AARCH64_ABI_LLP64;
9471 #endif
9472 }
9473
9474 #ifdef OBJ_ELF
9475 const char *
9476 elf64_aarch64_target_format (void)
9477 {
9478 #ifdef TE_CLOUDABI
9479 /* FIXME: What to do for ilp32_p ? */
9480 if (target_big_endian)
9481 return "elf64-bigaarch64-cloudabi";
9482 else
9483 return "elf64-littleaarch64-cloudabi";
9484 #else
9485 if (target_big_endian)
9486 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9487 else
9488 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9489 #endif
9490 }
9491
9492 void
9493 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9494 {
9495 elf_frob_symbol (symp, puntp);
9496 }
9497 #elif defined OBJ_COFF
9498 const char *
9499 coff_aarch64_target_format (void)
9500 {
9501 return "pe-aarch64-little";
9502 }
9503 #endif
9504
9505 /* MD interface: Finalization. */
9506
9507 /* A good place to do this, although this was probably not intended
9508 for this kind of use. We need to dump the literal pool before
9509 references are made to a null symbol pointer. */
9510
9511 void
9512 aarch64_cleanup (void)
9513 {
9514 literal_pool *pool;
9515
9516 for (pool = list_of_pools; pool; pool = pool->next)
9517 {
9518 /* Put it at the end of the relevant section. */
9519 subseg_set (pool->section, pool->sub_section);
9520 s_ltorg (0);
9521 }
9522 }
9523
9524 #ifdef OBJ_ELF
9525 /* Remove any excess mapping symbols generated for alignment frags in
9526 SEC. We may have created a mapping symbol before a zero byte
9527 alignment; remove it if there's a mapping symbol after the
9528 alignment. */
9529 static void
9530 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9531 void *dummy ATTRIBUTE_UNUSED)
9532 {
9533 segment_info_type *seginfo = seg_info (sec);
9534 fragS *fragp;
9535
9536 if (seginfo == NULL || seginfo->frchainP == NULL)
9537 return;
9538
9539 for (fragp = seginfo->frchainP->frch_root;
9540 fragp != NULL; fragp = fragp->fr_next)
9541 {
9542 symbolS *sym = fragp->tc_frag_data.last_map;
9543 fragS *next = fragp->fr_next;
9544
9545 /* Variable-sized frags have been converted to fixed size by
9546 this point. But if this was variable-sized to start with,
9547 there will be a fixed-size frag after it. So don't handle
9548 next == NULL. */
9549 if (sym == NULL || next == NULL)
9550 continue;
9551
9552 if (S_GET_VALUE (sym) < next->fr_address)
9553 /* Not at the end of this frag. */
9554 continue;
9555 know (S_GET_VALUE (sym) == next->fr_address);
9556
9557 do
9558 {
9559 if (next->tc_frag_data.first_map != NULL)
9560 {
9561 /* Next frag starts with a mapping symbol. Discard this
9562 one. */
9563 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9564 break;
9565 }
9566
9567 if (next->fr_next == NULL)
9568 {
9569 /* This mapping symbol is at the end of the section. Discard
9570 it. */
9571 know (next->fr_fix == 0 && next->fr_var == 0);
9572 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9573 break;
9574 }
9575
9576 /* As long as we have empty frags without any mapping symbols,
9577 keep looking. */
9578 /* If the next frag is non-empty and does not start with a
9579 mapping symbol, then this mapping symbol is required. */
9580 if (next->fr_address != next->fr_next->fr_address)
9581 break;
9582
9583 next = next->fr_next;
9584 }
9585 while (next != NULL);
9586 }
9587 }
9588 #endif
9589
9590 /* Adjust the symbol table. */
9591
9592 void
9593 aarch64_adjust_symtab (void)
9594 {
9595 #ifdef OBJ_ELF
9596 /* Remove any overlapping mapping symbols generated by alignment frags. */
9597 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9598 /* Now do generic ELF adjustments. */
9599 elf_adjust_symtab ();
9600 #endif
9601 }
9602
9603 static void
9604 checked_hash_insert (htab_t table, const char *key, void *value)
9605 {
9606 str_hash_insert (table, key, value, 0);
9607 }
9608
9609 static void
9610 sysreg_hash_insert (htab_t table, const char *key, void *value)
9611 {
9612 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9613 checked_hash_insert (table, key, value);
9614 }
9615
9616 static void
9617 fill_instruction_hash_table (void)
9618 {
9619 const aarch64_opcode *opcode = aarch64_opcode_table;
9620
9621 while (opcode->name != NULL)
9622 {
9623 templates *templ, *new_templ;
9624 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9625
9626 new_templ = XNEW (templates);
9627 new_templ->opcode = opcode;
9628 new_templ->next = NULL;
9629
9630 if (!templ)
9631 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9632 else
9633 {
9634 new_templ->next = templ->next;
9635 templ->next = new_templ;
9636 }
9637 ++opcode;
9638 }
9639 }
9640
9641 static inline void
9642 convert_to_upper (char *dst, const char *src, size_t num)
9643 {
9644 unsigned int i;
9645 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9646 *dst = TOUPPER (*src);
9647 *dst = '\0';
9648 }
9649
9650 /* Assume STR point to a lower-case string, allocate, convert and return
9651 the corresponding upper-case string. */
9652 static inline const char*
9653 get_upper_str (const char *str)
9654 {
9655 char *ret;
9656 size_t len = strlen (str);
9657 ret = XNEWVEC (char, len + 1);
9658 convert_to_upper (ret, str, len);
9659 return ret;
9660 }
9661
9662 /* MD interface: Initialization. */
9663
9664 void
9665 md_begin (void)
9666 {
9667 unsigned mach;
9668 unsigned int i;
9669
9670 aarch64_ops_hsh = str_htab_create ();
9671 aarch64_cond_hsh = str_htab_create ();
9672 aarch64_shift_hsh = str_htab_create ();
9673 aarch64_sys_regs_hsh = str_htab_create ();
9674 aarch64_pstatefield_hsh = str_htab_create ();
9675 aarch64_sys_regs_ic_hsh = str_htab_create ();
9676 aarch64_sys_regs_dc_hsh = str_htab_create ();
9677 aarch64_sys_regs_at_hsh = str_htab_create ();
9678 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9679 aarch64_sys_regs_sr_hsh = str_htab_create ();
9680 aarch64_reg_hsh = str_htab_create ();
9681 aarch64_barrier_opt_hsh = str_htab_create ();
9682 aarch64_nzcv_hsh = str_htab_create ();
9683 aarch64_pldop_hsh = str_htab_create ();
9684 aarch64_hint_opt_hsh = str_htab_create ();
9685
9686 fill_instruction_hash_table ();
9687
9688 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9689 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9690 (void *) (aarch64_sys_regs + i));
9691
9692 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9693 sysreg_hash_insert (aarch64_pstatefield_hsh,
9694 aarch64_pstatefields[i].name,
9695 (void *) (aarch64_pstatefields + i));
9696
9697 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9698 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9699 aarch64_sys_regs_ic[i].name,
9700 (void *) (aarch64_sys_regs_ic + i));
9701
9702 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9703 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9704 aarch64_sys_regs_dc[i].name,
9705 (void *) (aarch64_sys_regs_dc + i));
9706
9707 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9708 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9709 aarch64_sys_regs_at[i].name,
9710 (void *) (aarch64_sys_regs_at + i));
9711
9712 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9713 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9714 aarch64_sys_regs_tlbi[i].name,
9715 (void *) (aarch64_sys_regs_tlbi + i));
9716
9717 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9718 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9719 aarch64_sys_regs_sr[i].name,
9720 (void *) (aarch64_sys_regs_sr + i));
9721
9722 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9723 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9724 (void *) (reg_names + i));
9725
9726 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9727 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9728 (void *) (nzcv_names + i));
9729
9730 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9731 {
9732 const char *name = aarch64_operand_modifiers[i].name;
9733 checked_hash_insert (aarch64_shift_hsh, name,
9734 (void *) (aarch64_operand_modifiers + i));
9735 /* Also hash the name in the upper case. */
9736 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9737 (void *) (aarch64_operand_modifiers + i));
9738 }
9739
9740 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9741 {
9742 unsigned int j;
9743 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9744 the same condition code. */
9745 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9746 {
9747 const char *name = aarch64_conds[i].names[j];
9748 if (name == NULL)
9749 break;
9750 checked_hash_insert (aarch64_cond_hsh, name,
9751 (void *) (aarch64_conds + i));
9752 /* Also hash the name in the upper case. */
9753 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9754 (void *) (aarch64_conds + i));
9755 }
9756 }
9757
9758 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9759 {
9760 const char *name = aarch64_barrier_options[i].name;
9761 /* Skip xx00 - the unallocated values of option. */
9762 if ((i & 0x3) == 0)
9763 continue;
9764 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9765 (void *) (aarch64_barrier_options + i));
9766 /* Also hash the name in the upper case. */
9767 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9768 (void *) (aarch64_barrier_options + i));
9769 }
9770
9771 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9772 {
9773 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9774 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9775 (void *) (aarch64_barrier_dsb_nxs_options + i));
9776 /* Also hash the name in the upper case. */
9777 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9778 (void *) (aarch64_barrier_dsb_nxs_options + i));
9779 }
9780
9781 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9782 {
9783 const char* name = aarch64_prfops[i].name;
9784 /* Skip the unallocated hint encodings. */
9785 if (name == NULL)
9786 continue;
9787 checked_hash_insert (aarch64_pldop_hsh, name,
9788 (void *) (aarch64_prfops + i));
9789 /* Also hash the name in the upper case. */
9790 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9791 (void *) (aarch64_prfops + i));
9792 }
9793
9794 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9795 {
9796 const char* name = aarch64_hint_options[i].name;
9797 const char* upper_name = get_upper_str(name);
9798
9799 checked_hash_insert (aarch64_hint_opt_hsh, name,
9800 (void *) (aarch64_hint_options + i));
9801
9802 /* Also hash the name in the upper case if not the same. */
9803 if (strcmp (name, upper_name) != 0)
9804 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9805 (void *) (aarch64_hint_options + i));
9806 }
9807
9808 /* Set the cpu variant based on the command-line options. */
9809 if (!mcpu_cpu_opt)
9810 mcpu_cpu_opt = march_cpu_opt;
9811
9812 if (!mcpu_cpu_opt)
9813 mcpu_cpu_opt = &cpu_default;
9814
9815 cpu_variant = *mcpu_cpu_opt;
9816
9817 /* Record the CPU type. */
9818 if(ilp32_p)
9819 mach = bfd_mach_aarch64_ilp32;
9820 else if (llp64_p)
9821 mach = bfd_mach_aarch64_llp64;
9822 else
9823 mach = bfd_mach_aarch64;
9824
9825 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9826 #ifdef OBJ_ELF
9827 /* FIXME - is there a better way to do it ? */
9828 aarch64_sframe_cfa_sp_reg = 31;
9829 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9830 aarch64_sframe_cfa_ra_reg = 30;
9831 #endif
9832 }
9833
9834 /* Command line processing. */
9835
9836 const char *md_shortopts = "m:";
9837
9838 #ifdef AARCH64_BI_ENDIAN
9839 #define OPTION_EB (OPTION_MD_BASE + 0)
9840 #define OPTION_EL (OPTION_MD_BASE + 1)
9841 #else
9842 #if TARGET_BYTES_BIG_ENDIAN
9843 #define OPTION_EB (OPTION_MD_BASE + 0)
9844 #else
9845 #define OPTION_EL (OPTION_MD_BASE + 1)
9846 #endif
9847 #endif
9848
9849 struct option md_longopts[] = {
9850 #ifdef OPTION_EB
9851 {"EB", no_argument, NULL, OPTION_EB},
9852 #endif
9853 #ifdef OPTION_EL
9854 {"EL", no_argument, NULL, OPTION_EL},
9855 #endif
9856 {NULL, no_argument, NULL, 0}
9857 };
9858
9859 size_t md_longopts_size = sizeof (md_longopts);
9860
9861 struct aarch64_option_table
9862 {
9863 const char *option; /* Option name to match. */
9864 const char *help; /* Help information. */
9865 int *var; /* Variable to change. */
9866 int value; /* What to change it to. */
9867 char *deprecated; /* If non-null, print this message. */
9868 };
9869
9870 static struct aarch64_option_table aarch64_opts[] = {
9871 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9872 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9873 NULL},
9874 #ifdef DEBUG_AARCH64
9875 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9876 #endif /* DEBUG_AARCH64 */
9877 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9878 NULL},
9879 {"mno-verbose-error", N_("do not output verbose error messages"),
9880 &verbose_error_p, 0, NULL},
9881 {NULL, NULL, NULL, 0, NULL}
9882 };
9883
9884 struct aarch64_cpu_option_table
9885 {
9886 const char *name;
9887 const aarch64_feature_set value;
9888 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9889 case. */
9890 const char *canonical_name;
9891 };
9892
9893 /* This list should, at a minimum, contain all the cpu names
9894 recognized by GCC. */
9895 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9896 {"all", AARCH64_ANY, NULL},
9897 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9898 AARCH64_FEATURE_CRC), "Cortex-A34"},
9899 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9900 AARCH64_FEATURE_CRC), "Cortex-A35"},
9901 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9902 AARCH64_FEATURE_CRC), "Cortex-A53"},
9903 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9904 AARCH64_FEATURE_CRC), "Cortex-A57"},
9905 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9906 AARCH64_FEATURE_CRC), "Cortex-A72"},
9907 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9908 AARCH64_FEATURE_CRC), "Cortex-A73"},
9909 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9910 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9911 "Cortex-A55"},
9912 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9913 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9914 "Cortex-A75"},
9915 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9916 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9917 "Cortex-A76"},
9918 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9919 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9920 | AARCH64_FEATURE_DOTPROD
9921 | AARCH64_FEATURE_SSBS),
9922 "Cortex-A76AE"},
9923 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9924 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9925 | AARCH64_FEATURE_DOTPROD
9926 | AARCH64_FEATURE_SSBS),
9927 "Cortex-A77"},
9928 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9929 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9930 | AARCH64_FEATURE_DOTPROD
9931 | AARCH64_FEATURE_SSBS),
9932 "Cortex-A65"},
9933 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9934 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9935 | AARCH64_FEATURE_DOTPROD
9936 | AARCH64_FEATURE_SSBS),
9937 "Cortex-A65AE"},
9938 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9939 AARCH64_FEATURE_F16
9940 | AARCH64_FEATURE_RCPC
9941 | AARCH64_FEATURE_DOTPROD
9942 | AARCH64_FEATURE_SSBS
9943 | AARCH64_FEATURE_PROFILE),
9944 "Cortex-A78"},
9945 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9946 AARCH64_FEATURE_F16
9947 | AARCH64_FEATURE_RCPC
9948 | AARCH64_FEATURE_DOTPROD
9949 | AARCH64_FEATURE_SSBS
9950 | AARCH64_FEATURE_PROFILE),
9951 "Cortex-A78AE"},
9952 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9953 AARCH64_FEATURE_DOTPROD
9954 | AARCH64_FEATURE_F16
9955 | AARCH64_FEATURE_FLAGM
9956 | AARCH64_FEATURE_PAC
9957 | AARCH64_FEATURE_PROFILE
9958 | AARCH64_FEATURE_RCPC
9959 | AARCH64_FEATURE_SSBS),
9960 "Cortex-A78C"},
9961 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9962 AARCH64_FEATURE_BFLOAT16
9963 | AARCH64_FEATURE_I8MM
9964 | AARCH64_FEATURE_MEMTAG
9965 | AARCH64_FEATURE_SVE2_BITPERM),
9966 "Cortex-A510"},
9967 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9968 AARCH64_FEATURE_BFLOAT16
9969 | AARCH64_FEATURE_I8MM
9970 | AARCH64_FEATURE_MEMTAG
9971 | AARCH64_FEATURE_SVE2_BITPERM),
9972 "Cortex-A710"},
9973 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9974 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9975 | AARCH64_FEATURE_DOTPROD
9976 | AARCH64_FEATURE_PROFILE),
9977 "Ares"},
9978 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9979 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9980 "Samsung Exynos M1"},
9981 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9982 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9983 | AARCH64_FEATURE_RDMA),
9984 "Qualcomm Falkor"},
9985 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9986 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9987 | AARCH64_FEATURE_DOTPROD
9988 | AARCH64_FEATURE_SSBS),
9989 "Neoverse E1"},
9990 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9991 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9992 | AARCH64_FEATURE_DOTPROD
9993 | AARCH64_FEATURE_PROFILE),
9994 "Neoverse N1"},
9995 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9996 AARCH64_FEATURE_BFLOAT16
9997 | AARCH64_FEATURE_I8MM
9998 | AARCH64_FEATURE_F16
9999 | AARCH64_FEATURE_SVE
10000 | AARCH64_FEATURE_SVE2
10001 | AARCH64_FEATURE_SVE2_BITPERM
10002 | AARCH64_FEATURE_MEMTAG
10003 | AARCH64_FEATURE_RNG),
10004 "Neoverse N2"},
10005 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10006 AARCH64_FEATURE_PROFILE
10007 | AARCH64_FEATURE_CVADP
10008 | AARCH64_FEATURE_SVE
10009 | AARCH64_FEATURE_SSBS
10010 | AARCH64_FEATURE_RNG
10011 | AARCH64_FEATURE_F16
10012 | AARCH64_FEATURE_BFLOAT16
10013 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
10014 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10015 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10016 | AARCH64_FEATURE_RDMA),
10017 "Qualcomm QDF24XX"},
10018 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10019 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
10020 "Qualcomm Saphira"},
10021 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10022 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10023 "Cavium ThunderX"},
10024 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
10025 AARCH64_FEATURE_CRYPTO),
10026 "Broadcom Vulcan"},
10027 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10028 in earlier releases and is superseded by 'xgene1' in all
10029 tools. */
10030 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10031 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10032 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
10033 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
10034 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
10035 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10036 AARCH64_FEATURE_F16
10037 | AARCH64_FEATURE_RCPC
10038 | AARCH64_FEATURE_DOTPROD
10039 | AARCH64_FEATURE_SSBS
10040 | AARCH64_FEATURE_PROFILE),
10041 "Cortex-X1"},
10042 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10043 AARCH64_FEATURE_BFLOAT16
10044 | AARCH64_FEATURE_I8MM
10045 | AARCH64_FEATURE_MEMTAG
10046 | AARCH64_FEATURE_SVE2_BITPERM),
10047 "Cortex-X2"},
10048 {"generic", AARCH64_ARCH_V8, NULL},
10049
10050 {NULL, AARCH64_ARCH_NONE, NULL}
10051 };
10052
10053 struct aarch64_arch_option_table
10054 {
10055 const char *name;
10056 const aarch64_feature_set value;
10057 };
10058
10059 /* This list should, at a minimum, contain all the architecture names
10060 recognized by GCC. */
10061 static const struct aarch64_arch_option_table aarch64_archs[] = {
10062 {"all", AARCH64_ANY},
10063 {"armv8-a", AARCH64_ARCH_V8},
10064 {"armv8.1-a", AARCH64_ARCH_V8_1},
10065 {"armv8.2-a", AARCH64_ARCH_V8_2},
10066 {"armv8.3-a", AARCH64_ARCH_V8_3},
10067 {"armv8.4-a", AARCH64_ARCH_V8_4},
10068 {"armv8.5-a", AARCH64_ARCH_V8_5},
10069 {"armv8.6-a", AARCH64_ARCH_V8_6},
10070 {"armv8.7-a", AARCH64_ARCH_V8_7},
10071 {"armv8.8-a", AARCH64_ARCH_V8_8},
10072 {"armv8-r", AARCH64_ARCH_V8_R},
10073 {"armv9-a", AARCH64_ARCH_V9},
10074 {"armv9.1-a", AARCH64_ARCH_V9_1},
10075 {"armv9.2-a", AARCH64_ARCH_V9_2},
10076 {"armv9.3-a", AARCH64_ARCH_V9_3},
10077 {NULL, AARCH64_ARCH_NONE}
10078 };
10079
10080 /* ISA extensions. */
10081 struct aarch64_option_cpu_value_table
10082 {
10083 const char *name;
10084 const aarch64_feature_set value;
10085 const aarch64_feature_set require; /* Feature dependencies. */
10086 };
10087
10088 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10089 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10090 AARCH64_ARCH_NONE},
10091 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10092 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10093 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10094 AARCH64_ARCH_NONE},
10095 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10096 AARCH64_ARCH_NONE},
10097 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10098 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10099 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10100 AARCH64_ARCH_NONE},
10101 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10102 AARCH64_ARCH_NONE},
10103 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10104 AARCH64_ARCH_NONE},
10105 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10106 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10107 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10108 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10109 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10110 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10111 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10112 AARCH64_ARCH_NONE},
10113 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10114 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10115 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10116 AARCH64_ARCH_NONE},
10117 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10118 AARCH64_FEATURE (AARCH64_FEATURE_F16
10119 | AARCH64_FEATURE_SIMD, 0)},
10120 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10121 AARCH64_ARCH_NONE},
10122 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10123 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10124 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10125 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10126 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10127 AARCH64_ARCH_NONE},
10128 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10129 AARCH64_ARCH_NONE},
10130 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10131 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10132 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10133 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10134 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10135 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10136 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10137 AARCH64_ARCH_NONE},
10138 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10139 AARCH64_ARCH_NONE},
10140 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10141 AARCH64_ARCH_NONE},
10142 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10143 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10144 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10145 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10146 | AARCH64_FEATURE_SM4, 0)},
10147 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10148 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10149 | AARCH64_FEATURE_AES, 0)},
10150 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10151 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10152 | AARCH64_FEATURE_SHA3, 0)},
10153 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10154 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10155 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10156 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10157 | AARCH64_FEATURE_BFLOAT16, 0)},
10158 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10159 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10160 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10161 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10162 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10163 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10164 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10165 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10166 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10167 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10168 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10169 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10170 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10171 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10172 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10173 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10174 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10175 AARCH64_ARCH_NONE},
10176 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10177 AARCH64_ARCH_NONE},
10178 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10179 AARCH64_ARCH_NONE},
10180 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10181 AARCH64_ARCH_NONE},
10182 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10183 AARCH64_ARCH_NONE},
10184 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10185 AARCH64_ARCH_NONE},
10186 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10187 };
10188
10189 struct aarch64_long_option_table
10190 {
10191 const char *option; /* Substring to match. */
10192 const char *help; /* Help information. */
10193 int (*func) (const char *subopt); /* Function to decode sub-option. */
10194 char *deprecated; /* If non-null, print this message. */
10195 };
10196
10197 /* Transitive closure of features depending on set. */
10198 static aarch64_feature_set
10199 aarch64_feature_disable_set (aarch64_feature_set set)
10200 {
10201 const struct aarch64_option_cpu_value_table *opt;
10202 aarch64_feature_set prev = 0;
10203
10204 while (prev != set) {
10205 prev = set;
10206 for (opt = aarch64_features; opt->name != NULL; opt++)
10207 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10208 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10209 }
10210 return set;
10211 }
10212
10213 /* Transitive closure of dependencies of set. */
10214 static aarch64_feature_set
10215 aarch64_feature_enable_set (aarch64_feature_set set)
10216 {
10217 const struct aarch64_option_cpu_value_table *opt;
10218 aarch64_feature_set prev = 0;
10219
10220 while (prev != set) {
10221 prev = set;
10222 for (opt = aarch64_features; opt->name != NULL; opt++)
10223 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10224 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10225 }
10226 return set;
10227 }
10228
10229 static int
10230 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10231 bool ext_only)
10232 {
10233 /* We insist on extensions being added before being removed. We achieve
10234 this by using the ADDING_VALUE variable to indicate whether we are
10235 adding an extension (1) or removing it (0) and only allowing it to
10236 change in the order -1 -> 1 -> 0. */
10237 int adding_value = -1;
10238 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10239
10240 /* Copy the feature set, so that we can modify it. */
10241 *ext_set = **opt_p;
10242 *opt_p = ext_set;
10243
10244 while (str != NULL && *str != 0)
10245 {
10246 const struct aarch64_option_cpu_value_table *opt;
10247 const char *ext = NULL;
10248 int optlen;
10249
10250 if (!ext_only)
10251 {
10252 if (*str != '+')
10253 {
10254 as_bad (_("invalid architectural extension"));
10255 return 0;
10256 }
10257
10258 ext = strchr (++str, '+');
10259 }
10260
10261 if (ext != NULL)
10262 optlen = ext - str;
10263 else
10264 optlen = strlen (str);
10265
10266 if (optlen >= 2 && startswith (str, "no"))
10267 {
10268 if (adding_value != 0)
10269 adding_value = 0;
10270 optlen -= 2;
10271 str += 2;
10272 }
10273 else if (optlen > 0)
10274 {
10275 if (adding_value == -1)
10276 adding_value = 1;
10277 else if (adding_value != 1)
10278 {
10279 as_bad (_("must specify extensions to add before specifying "
10280 "those to remove"));
10281 return false;
10282 }
10283 }
10284
10285 if (optlen == 0)
10286 {
10287 as_bad (_("missing architectural extension"));
10288 return 0;
10289 }
10290
10291 gas_assert (adding_value != -1);
10292
10293 for (opt = aarch64_features; opt->name != NULL; opt++)
10294 if (strncmp (opt->name, str, optlen) == 0)
10295 {
10296 aarch64_feature_set set;
10297
10298 /* Add or remove the extension. */
10299 if (adding_value)
10300 {
10301 set = aarch64_feature_enable_set (opt->value);
10302 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10303 }
10304 else
10305 {
10306 set = aarch64_feature_disable_set (opt->value);
10307 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10308 }
10309 break;
10310 }
10311
10312 if (opt->name == NULL)
10313 {
10314 as_bad (_("unknown architectural extension `%s'"), str);
10315 return 0;
10316 }
10317
10318 str = ext;
10319 };
10320
10321 return 1;
10322 }
10323
10324 static int
10325 aarch64_parse_cpu (const char *str)
10326 {
10327 const struct aarch64_cpu_option_table *opt;
10328 const char *ext = strchr (str, '+');
10329 size_t optlen;
10330
10331 if (ext != NULL)
10332 optlen = ext - str;
10333 else
10334 optlen = strlen (str);
10335
10336 if (optlen == 0)
10337 {
10338 as_bad (_("missing cpu name `%s'"), str);
10339 return 0;
10340 }
10341
10342 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10343 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10344 {
10345 mcpu_cpu_opt = &opt->value;
10346 if (ext != NULL)
10347 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10348
10349 return 1;
10350 }
10351
10352 as_bad (_("unknown cpu `%s'"), str);
10353 return 0;
10354 }
10355
10356 static int
10357 aarch64_parse_arch (const char *str)
10358 {
10359 const struct aarch64_arch_option_table *opt;
10360 const char *ext = strchr (str, '+');
10361 size_t optlen;
10362
10363 if (ext != NULL)
10364 optlen = ext - str;
10365 else
10366 optlen = strlen (str);
10367
10368 if (optlen == 0)
10369 {
10370 as_bad (_("missing architecture name `%s'"), str);
10371 return 0;
10372 }
10373
10374 for (opt = aarch64_archs; opt->name != NULL; opt++)
10375 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10376 {
10377 march_cpu_opt = &opt->value;
10378 if (ext != NULL)
10379 return aarch64_parse_features (ext, &march_cpu_opt, false);
10380
10381 return 1;
10382 }
10383
10384 as_bad (_("unknown architecture `%s'\n"), str);
10385 return 0;
10386 }
10387
10388 /* ABIs. */
10389 struct aarch64_option_abi_value_table
10390 {
10391 const char *name;
10392 enum aarch64_abi_type value;
10393 };
10394
10395 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10396 #ifdef OBJ_ELF
10397 {"ilp32", AARCH64_ABI_ILP32},
10398 {"lp64", AARCH64_ABI_LP64},
10399 #else
10400 {"llp64", AARCH64_ABI_LLP64},
10401 #endif
10402 };
10403
10404 static int
10405 aarch64_parse_abi (const char *str)
10406 {
10407 unsigned int i;
10408
10409 if (str[0] == '\0')
10410 {
10411 as_bad (_("missing abi name `%s'"), str);
10412 return 0;
10413 }
10414
10415 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10416 if (strcmp (str, aarch64_abis[i].name) == 0)
10417 {
10418 aarch64_abi = aarch64_abis[i].value;
10419 return 1;
10420 }
10421
10422 as_bad (_("unknown abi `%s'\n"), str);
10423 return 0;
10424 }
10425
10426 static struct aarch64_long_option_table aarch64_long_opts[] = {
10427 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10428 aarch64_parse_abi, NULL},
10429 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10430 aarch64_parse_cpu, NULL},
10431 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10432 aarch64_parse_arch, NULL},
10433 {NULL, NULL, 0, NULL}
10434 };
10435
10436 int
10437 md_parse_option (int c, const char *arg)
10438 {
10439 struct aarch64_option_table *opt;
10440 struct aarch64_long_option_table *lopt;
10441
10442 switch (c)
10443 {
10444 #ifdef OPTION_EB
10445 case OPTION_EB:
10446 target_big_endian = 1;
10447 break;
10448 #endif
10449
10450 #ifdef OPTION_EL
10451 case OPTION_EL:
10452 target_big_endian = 0;
10453 break;
10454 #endif
10455
10456 case 'a':
10457 /* Listing option. Just ignore these, we don't support additional
10458 ones. */
10459 return 0;
10460
10461 default:
10462 for (opt = aarch64_opts; opt->option != NULL; opt++)
10463 {
10464 if (c == opt->option[0]
10465 && ((arg == NULL && opt->option[1] == 0)
10466 || streq (arg, opt->option + 1)))
10467 {
10468 /* If the option is deprecated, tell the user. */
10469 if (opt->deprecated != NULL)
10470 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10471 arg ? arg : "", _(opt->deprecated));
10472
10473 if (opt->var != NULL)
10474 *opt->var = opt->value;
10475
10476 return 1;
10477 }
10478 }
10479
10480 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10481 {
10482 /* These options are expected to have an argument. */
10483 if (c == lopt->option[0]
10484 && arg != NULL
10485 && startswith (arg, lopt->option + 1))
10486 {
10487 /* If the option is deprecated, tell the user. */
10488 if (lopt->deprecated != NULL)
10489 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10490 _(lopt->deprecated));
10491
10492 /* Call the sup-option parser. */
10493 return lopt->func (arg + strlen (lopt->option) - 1);
10494 }
10495 }
10496
10497 return 0;
10498 }
10499
10500 return 1;
10501 }
10502
10503 void
10504 md_show_usage (FILE * fp)
10505 {
10506 struct aarch64_option_table *opt;
10507 struct aarch64_long_option_table *lopt;
10508
10509 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10510
10511 for (opt = aarch64_opts; opt->option != NULL; opt++)
10512 if (opt->help != NULL)
10513 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10514
10515 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10516 if (lopt->help != NULL)
10517 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10518
10519 #ifdef OPTION_EB
10520 fprintf (fp, _("\
10521 -EB assemble code for a big-endian cpu\n"));
10522 #endif
10523
10524 #ifdef OPTION_EL
10525 fprintf (fp, _("\
10526 -EL assemble code for a little-endian cpu\n"));
10527 #endif
10528 }
10529
10530 /* Parse a .cpu directive. */
10531
10532 static void
10533 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10534 {
10535 const struct aarch64_cpu_option_table *opt;
10536 char saved_char;
10537 char *name;
10538 char *ext;
10539 size_t optlen;
10540
10541 name = input_line_pointer;
10542 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10543 saved_char = *input_line_pointer;
10544 *input_line_pointer = 0;
10545
10546 ext = strchr (name, '+');
10547
10548 if (ext != NULL)
10549 optlen = ext - name;
10550 else
10551 optlen = strlen (name);
10552
10553 /* Skip the first "all" entry. */
10554 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10555 if (strlen (opt->name) == optlen
10556 && strncmp (name, opt->name, optlen) == 0)
10557 {
10558 mcpu_cpu_opt = &opt->value;
10559 if (ext != NULL)
10560 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10561 return;
10562
10563 cpu_variant = *mcpu_cpu_opt;
10564
10565 *input_line_pointer = saved_char;
10566 demand_empty_rest_of_line ();
10567 return;
10568 }
10569 as_bad (_("unknown cpu `%s'"), name);
10570 *input_line_pointer = saved_char;
10571 ignore_rest_of_line ();
10572 }
10573
10574
10575 /* Parse a .arch directive. */
10576
10577 static void
10578 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10579 {
10580 const struct aarch64_arch_option_table *opt;
10581 char saved_char;
10582 char *name;
10583 char *ext;
10584 size_t optlen;
10585
10586 name = input_line_pointer;
10587 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10588 saved_char = *input_line_pointer;
10589 *input_line_pointer = 0;
10590
10591 ext = strchr (name, '+');
10592
10593 if (ext != NULL)
10594 optlen = ext - name;
10595 else
10596 optlen = strlen (name);
10597
10598 /* Skip the first "all" entry. */
10599 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10600 if (strlen (opt->name) == optlen
10601 && strncmp (name, opt->name, optlen) == 0)
10602 {
10603 mcpu_cpu_opt = &opt->value;
10604 if (ext != NULL)
10605 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10606 return;
10607
10608 cpu_variant = *mcpu_cpu_opt;
10609
10610 *input_line_pointer = saved_char;
10611 demand_empty_rest_of_line ();
10612 return;
10613 }
10614
10615 as_bad (_("unknown architecture `%s'\n"), name);
10616 *input_line_pointer = saved_char;
10617 ignore_rest_of_line ();
10618 }
10619
10620 /* Parse a .arch_extension directive. */
10621
10622 static void
10623 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10624 {
10625 char saved_char;
10626 char *ext = input_line_pointer;
10627
10628 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10629 saved_char = *input_line_pointer;
10630 *input_line_pointer = 0;
10631
10632 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10633 return;
10634
10635 cpu_variant = *mcpu_cpu_opt;
10636
10637 *input_line_pointer = saved_char;
10638 demand_empty_rest_of_line ();
10639 }
10640
10641 /* Copy symbol information. */
10642
10643 void
10644 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10645 {
10646 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10647 }
10648
10649 #ifdef OBJ_ELF
10650 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10651 This is needed so AArch64 specific st_other values can be independently
10652 specified for an IFUNC resolver (that is called by the dynamic linker)
10653 and the symbol it resolves (aliased to the resolver). In particular,
10654 if a function symbol has special st_other value set via directives,
10655 then attaching an IFUNC resolver to that symbol should not override
10656 the st_other setting. Requiring the directive on the IFUNC resolver
10657 symbol would be unexpected and problematic in C code, where the two
10658 symbols appear as two independent function declarations. */
10659
10660 void
10661 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10662 {
10663 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10664 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10665 /* If size is unset, copy size from src. Because we don't track whether
10666 .size has been used, we can't differentiate .size dest, 0 from the case
10667 where dest's size is unset. */
10668 if (!destelf->size && S_GET_SIZE (dest) == 0)
10669 {
10670 if (srcelf->size)
10671 {
10672 destelf->size = XNEW (expressionS);
10673 *destelf->size = *srcelf->size;
10674 }
10675 S_SET_SIZE (dest, S_GET_SIZE (src));
10676 }
10677 }
10678 #endif