]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Add the SME2 FMLA and FMLS instructions
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
165 data fields contain the following information:
166
167 data[0].i:
168 A mask of register types that would have been acceptable as bare
169 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
170 is set if a general parsing error occured for an operand (that is,
171 an error not related to registers, and having no error string).
172
173 data[1].i:
174 A mask of register types that would have been acceptable inside
175 a register list. In addition, SEF_IN_REGLIST is set if the
176 operand contained a '{' and if we got to the point of trying
177 to parse a register inside a list.
178
179 data[2].i:
180 The mask associated with the register that was actually seen, or 0
181 if none. A nonzero value describes a register inside a register
182 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
183 register.
184
185 The idea is that stringless errors from multiple opcode templates can
186 be ORed together to give a summary of the available alternatives. */
187 #define SEF_DEFAULT_ERROR (1U << 31)
188 #define SEF_IN_REGLIST (1U << 31)
189
190 /* Diagnostics inline function utilities.
191
192 These are lightweight utilities which should only be called by parse_operands
193 and other parsers. GAS processes each assembly line by parsing it against
194 instruction template(s), in the case of multiple templates (for the same
195 mnemonic name), those templates are tried one by one until one succeeds or
196 all fail. An assembly line may fail a few templates before being
197 successfully parsed; an error saved here in most cases is not a user error
198 but an error indicating the current template is not the right template.
199 Therefore it is very important that errors can be saved at a low cost during
200 the parsing; we don't want to slow down the whole parsing by recording
201 non-user errors in detail.
202
203 Remember that the objective is to help GAS pick up the most appropriate
204 error message in the case of multiple templates, e.g. FMOV which has 8
205 templates. */
206
207 static inline void
208 clear_error (void)
209 {
210 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
211 inst.parsing_error.kind = AARCH64_OPDE_NIL;
212 }
213
214 static inline bool
215 error_p (void)
216 {
217 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
224 inst.parsing_error.index = -1;
225 inst.parsing_error.kind = kind;
226 inst.parsing_error.error = error;
227 }
228
229 static inline void
230 set_recoverable_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_RECOVERABLE, error);
233 }
234
235 /* Use the DESC field of the corresponding aarch64_operand entry to compose
236 the error message. */
237 static inline void
238 set_default_error (void)
239 {
240 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
241 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
242 }
243
244 static inline void
245 set_expected_error (unsigned int flags)
246 {
247 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
248 inst.parsing_error.data[0].i = flags;
249 }
250
251 static inline void
252 set_syntax_error (const char *error)
253 {
254 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
255 }
256
257 static inline void
258 set_first_syntax_error (const char *error)
259 {
260 if (! error_p ())
261 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
262 }
263
264 static inline void
265 set_fatal_syntax_error (const char *error)
266 {
267 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
268 }
269 \f
270 /* Return value for certain parsers when the parsing fails; those parsers
271 return the information of the parsed result, e.g. register number, on
272 success. */
273 #define PARSE_FAIL -1
274
275 /* This is an invalid condition code that means no conditional field is
276 present. */
277 #define COND_ALWAYS 0x10
278
279 typedef struct
280 {
281 const char *template;
282 uint32_t value;
283 } asm_nzcv;
284
285 struct reloc_entry
286 {
287 char *name;
288 bfd_reloc_code_real_type reloc;
289 };
290
291 /* Macros to define the register types and masks for the purpose
292 of parsing. */
293
294 #undef AARCH64_REG_TYPES
295 #define AARCH64_REG_TYPES \
296 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
297 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
298 BASIC_REG_TYPE(SP_32) /* wsp */ \
299 BASIC_REG_TYPE(SP_64) /* sp */ \
300 BASIC_REG_TYPE(ZR_32) /* wzr */ \
301 BASIC_REG_TYPE(ZR_64) /* xzr */ \
302 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
303 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
304 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
305 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
306 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
307 BASIC_REG_TYPE(V) /* v[0-31] */ \
308 BASIC_REG_TYPE(Z) /* z[0-31] */ \
309 BASIC_REG_TYPE(P) /* p[0-15] */ \
310 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
311 BASIC_REG_TYPE(ZA) /* za */ \
312 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
313 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
314 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
315 BASIC_REG_TYPE(ZT0) /* zt0 */ \
316 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
317 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
318 /* Typecheck: same, plus SVE registers. */ \
319 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z)) \
321 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
322 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
323 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
324 /* Typecheck: same, plus SVE registers. */ \
325 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
327 | REG_TYPE(Z)) \
328 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
329 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
330 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
331 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
332 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
335 /* Typecheck: any [BHSDQ]P FP. */ \
336 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
337 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
338 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
339 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
340 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
341 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
342 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
343 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
344 be used for SVE instructions, since Zn and Pn are valid symbols \
345 in other contexts. */ \
346 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
347 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
348 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
349 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
350 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
351 | REG_TYPE(Z) | REG_TYPE(P)) \
352 /* Any integer register; used for error messages only. */ \
353 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
354 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
355 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
356 /* Any vector register. */ \
357 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
358 /* An SVE vector or predicate register. */ \
359 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
360 /* Any vector or predicate register. */ \
361 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
362 /* The whole of ZA or a single tile. */ \
363 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
364 /* A horizontal or vertical slice of a ZA tile. */ \
365 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
366 /* Pseudo type to mark the end of the enumerator sequence. */ \
367 END_REG_TYPE(MAX)
368
369 #undef BASIC_REG_TYPE
370 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
371 #undef MULTI_REG_TYPE
372 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
373 #undef END_REG_TYPE
374 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
375
376 /* Register type enumerators. */
377 typedef enum aarch64_reg_type_
378 {
379 /* A list of REG_TYPE_*. */
380 AARCH64_REG_TYPES
381 } aarch64_reg_type;
382
383 #undef BASIC_REG_TYPE
384 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
385 #undef REG_TYPE
386 #define REG_TYPE(T) (1 << REG_TYPE_##T)
387 #undef MULTI_REG_TYPE
388 #define MULTI_REG_TYPE(T,V) V,
389 #undef END_REG_TYPE
390 #define END_REG_TYPE(T) 0
391
392 /* Structure for a hash table entry for a register. */
393 typedef struct
394 {
395 const char *name;
396 unsigned char number;
397 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
398 unsigned char builtin;
399 } reg_entry;
400
401 /* Values indexed by aarch64_reg_type to assist the type checking. */
402 static const unsigned reg_type_masks[] =
403 {
404 AARCH64_REG_TYPES
405 };
406
407 #undef BASIC_REG_TYPE
408 #undef REG_TYPE
409 #undef MULTI_REG_TYPE
410 #undef END_REG_TYPE
411 #undef AARCH64_REG_TYPES
412
413 /* We expected one of the registers in MASK to be specified. If a register
414 of some kind was specified, SEEN is a mask that contains that register,
415 otherwise it is zero.
416
417 If it is possible to provide a relatively pithy message that describes
418 the error exactly, return a string that does so, reporting the error
419 against "operand %d". Return null otherwise.
420
421 From a QoI perspective, any REG_TYPE_* that is passed as the first
422 argument to set_expected_reg_error should generally have its own message.
423 Providing messages for combinations of such REG_TYPE_*s can be useful if
424 it is possible to summarize the combination in a relatively natural way.
425 On the other hand, it seems better to avoid long lists of unrelated
426 things. */
427
428 static const char *
429 get_reg_expected_msg (unsigned int mask, unsigned int seen)
430 {
431 /* First handle messages that use SEEN. */
432 if ((mask & reg_type_masks[REG_TYPE_ZAT])
433 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
434 return N_("expected an unsuffixed ZA tile at operand %d");
435
436 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
437 && (seen & reg_type_masks[REG_TYPE_ZAT]))
438 return N_("missing horizontal or vertical suffix at operand %d");
439
440 if ((mask & reg_type_masks[REG_TYPE_ZA])
441 && (seen & (reg_type_masks[REG_TYPE_ZAT]
442 | reg_type_masks[REG_TYPE_ZATHV])))
443 return N_("expected 'za' rather than a ZA tile at operand %d");
444
445 if ((mask & reg_type_masks[REG_TYPE_PN])
446 && (seen & reg_type_masks[REG_TYPE_P]))
447 return N_("expected a predicate-as-counter rather than predicate-as-mask"
448 " register at operand %d");
449
450 if ((mask & reg_type_masks[REG_TYPE_P])
451 && (seen & reg_type_masks[REG_TYPE_PN]))
452 return N_("expected a predicate-as-mask rather than predicate-as-counter"
453 " register at operand %d");
454
455 /* Integer, zero and stack registers. */
456 if (mask == reg_type_masks[REG_TYPE_R_64])
457 return N_("expected a 64-bit integer register at operand %d");
458 if (mask == reg_type_masks[REG_TYPE_R_ZR])
459 return N_("expected an integer or zero register at operand %d");
460 if (mask == reg_type_masks[REG_TYPE_R_SP])
461 return N_("expected an integer or stack pointer register at operand %d");
462
463 /* Floating-point and SIMD registers. */
464 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
465 return N_("expected a scalar SIMD or floating-point register"
466 " at operand %d");
467 if (mask == reg_type_masks[REG_TYPE_V])
468 return N_("expected an Advanced SIMD vector register at operand %d");
469 if (mask == reg_type_masks[REG_TYPE_Z])
470 return N_("expected an SVE vector register at operand %d");
471 if (mask == reg_type_masks[REG_TYPE_P]
472 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
473 /* Use this error for "predicate-as-mask only" and "either kind of
474 predicate". We report a more specific error if P is used where
475 PN is expected, and vice versa, so the issue at this point is
476 "predicate-like" vs. "not predicate-like". */
477 return N_("expected an SVE predicate register at operand %d");
478 if (mask == reg_type_masks[REG_TYPE_PN])
479 return N_("expected an SVE predicate-as-counter register at operand %d");
480 if (mask == reg_type_masks[REG_TYPE_VZ])
481 return N_("expected a vector register at operand %d");
482 if (mask == reg_type_masks[REG_TYPE_ZP])
483 return N_("expected an SVE vector or predicate register at operand %d");
484 if (mask == reg_type_masks[REG_TYPE_VZP])
485 return N_("expected a vector or predicate register at operand %d");
486
487 /* SME-related registers. */
488 if (mask == reg_type_masks[REG_TYPE_ZA])
489 return N_("expected a ZA array vector at operand %d");
490 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
491 return N_("expected ZT0 or a ZA mask at operand %d");
492 if (mask == reg_type_masks[REG_TYPE_ZAT])
493 return N_("expected a ZA tile at operand %d");
494 if (mask == reg_type_masks[REG_TYPE_ZATHV])
495 return N_("expected a ZA tile slice at operand %d");
496
497 /* Integer and vector combos. */
498 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
499 return N_("expected an integer register or Advanced SIMD vector register"
500 " at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
502 return N_("expected an integer register or SVE vector register"
503 " at operand %d");
504 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
505 return N_("expected an integer or vector register at operand %d");
506 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
507 return N_("expected an integer or predicate register at operand %d");
508 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
509 return N_("expected an integer, vector or predicate register"
510 " at operand %d");
511
512 /* SVE and SME combos. */
513 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
514 return N_("expected an SVE vector register or ZA tile slice"
515 " at operand %d");
516
517 return NULL;
518 }
519
520 /* Record that we expected a register of type TYPE but didn't see one.
521 REG is the register that we actually saw, or null if we didn't see a
522 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
523 contents of a register list, otherwise it is zero. */
524
525 static inline void
526 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
527 unsigned int flags)
528 {
529 assert (flags == 0 || flags == SEF_IN_REGLIST);
530 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
531 if (flags & SEF_IN_REGLIST)
532 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
533 else
534 inst.parsing_error.data[0].i = reg_type_masks[type];
535 if (reg)
536 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
537 }
538
539 /* Record that we expected a register list containing registers of type TYPE,
540 but didn't see the opening '{'. If we saw a register instead, REG is the
541 register that we saw, otherwise it is null. */
542
543 static inline void
544 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
545 {
546 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
547 inst.parsing_error.data[1].i = reg_type_masks[type];
548 if (reg)
549 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
550 }
551
552 /* Some well known registers that we refer to directly elsewhere. */
553 #define REG_SP 31
554 #define REG_ZR 31
555
556 /* Instructions take 4 bytes in the object file. */
557 #define INSN_SIZE 4
558
559 static htab_t aarch64_ops_hsh;
560 static htab_t aarch64_cond_hsh;
561 static htab_t aarch64_shift_hsh;
562 static htab_t aarch64_sys_regs_hsh;
563 static htab_t aarch64_pstatefield_hsh;
564 static htab_t aarch64_sys_regs_ic_hsh;
565 static htab_t aarch64_sys_regs_dc_hsh;
566 static htab_t aarch64_sys_regs_at_hsh;
567 static htab_t aarch64_sys_regs_tlbi_hsh;
568 static htab_t aarch64_sys_regs_sr_hsh;
569 static htab_t aarch64_reg_hsh;
570 static htab_t aarch64_barrier_opt_hsh;
571 static htab_t aarch64_nzcv_hsh;
572 static htab_t aarch64_pldop_hsh;
573 static htab_t aarch64_hint_opt_hsh;
574
575 /* Stuff needed to resolve the label ambiguity
576 As:
577 ...
578 label: <insn>
579 may differ from:
580 ...
581 label:
582 <insn> */
583
584 static symbolS *last_label_seen;
585
586 /* Literal pool structure. Held on a per-section
587 and per-sub-section basis. */
588
589 #define MAX_LITERAL_POOL_SIZE 1024
590 typedef struct literal_expression
591 {
592 expressionS exp;
593 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
594 LITTLENUM_TYPE * bignum;
595 } literal_expression;
596
597 typedef struct literal_pool
598 {
599 literal_expression literals[MAX_LITERAL_POOL_SIZE];
600 unsigned int next_free_entry;
601 unsigned int id;
602 symbolS *symbol;
603 segT section;
604 subsegT sub_section;
605 int size;
606 struct literal_pool *next;
607 } literal_pool;
608
609 /* Pointer to a linked list of literal pools. */
610 static literal_pool *list_of_pools = NULL;
611 \f
612 /* Pure syntax. */
613
614 /* This array holds the chars that always start a comment. If the
615 pre-processor is disabled, these aren't very useful. */
616 const char comment_chars[] = "";
617
618 /* This array holds the chars that only start a comment at the beginning of
619 a line. If the line seems to have the form '# 123 filename'
620 .line and .file directives will appear in the pre-processed output. */
621 /* Note that input_file.c hand checks for '#' at the beginning of the
622 first line of the input file. This is because the compiler outputs
623 #NO_APP at the beginning of its output. */
624 /* Also note that comments like this one will always work. */
625 const char line_comment_chars[] = "#";
626
627 const char line_separator_chars[] = ";";
628
629 /* Chars that can be used to separate mant
630 from exp in floating point numbers. */
631 const char EXP_CHARS[] = "eE";
632
633 /* Chars that mean this number is a floating point constant. */
634 /* As in 0f12.456 */
635 /* or 0d1.2345e12 */
636
637 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
638
639 /* Prefix character that indicates the start of an immediate value. */
640 #define is_immediate_prefix(C) ((C) == '#')
641
642 /* Separator character handling. */
643
644 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
645
646 static inline bool
647 skip_past_char (char **str, char c)
648 {
649 if (**str == c)
650 {
651 (*str)++;
652 return true;
653 }
654 else
655 return false;
656 }
657
658 #define skip_past_comma(str) skip_past_char (str, ',')
659
660 /* Arithmetic expressions (possibly involving symbols). */
661
662 static bool in_aarch64_get_expression = false;
663
664 /* Third argument to aarch64_get_expression. */
665 #define GE_NO_PREFIX false
666 #define GE_OPT_PREFIX true
667
668 /* Fourth argument to aarch64_get_expression. */
669 #define ALLOW_ABSENT false
670 #define REJECT_ABSENT true
671
672 /* Return TRUE if the string pointed by *STR is successfully parsed
673 as an valid expression; *EP will be filled with the information of
674 such an expression. Otherwise return FALSE.
675
676 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
677 If REJECT_ABSENT is true then trat missing expressions as an error. */
678
679 static bool
680 aarch64_get_expression (expressionS * ep,
681 char ** str,
682 bool allow_immediate_prefix,
683 bool reject_absent)
684 {
685 char *save_in;
686 segT seg;
687 bool prefix_present = false;
688
689 if (allow_immediate_prefix)
690 {
691 if (is_immediate_prefix (**str))
692 {
693 (*str)++;
694 prefix_present = true;
695 }
696 }
697
698 memset (ep, 0, sizeof (expressionS));
699
700 save_in = input_line_pointer;
701 input_line_pointer = *str;
702 in_aarch64_get_expression = true;
703 seg = expression (ep);
704 in_aarch64_get_expression = false;
705
706 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
707 {
708 /* We found a bad expression in md_operand(). */
709 *str = input_line_pointer;
710 input_line_pointer = save_in;
711 if (prefix_present && ! error_p ())
712 set_fatal_syntax_error (_("bad expression"));
713 else
714 set_first_syntax_error (_("bad expression"));
715 return false;
716 }
717
718 #ifdef OBJ_AOUT
719 if (seg != absolute_section
720 && seg != text_section
721 && seg != data_section
722 && seg != bss_section
723 && seg != undefined_section)
724 {
725 set_syntax_error (_("bad segment"));
726 *str = input_line_pointer;
727 input_line_pointer = save_in;
728 return false;
729 }
730 #else
731 (void) seg;
732 #endif
733
734 *str = input_line_pointer;
735 input_line_pointer = save_in;
736 return true;
737 }
738
739 /* Turn a string in input_line_pointer into a floating point constant
740 of type TYPE, and store the appropriate bytes in *LITP. The number
741 of LITTLENUMS emitted is stored in *SIZEP. An error message is
742 returned, or NULL on OK. */
743
744 const char *
745 md_atof (int type, char *litP, int *sizeP)
746 {
747 return ieee_md_atof (type, litP, sizeP, target_big_endian);
748 }
749
750 /* We handle all bad expressions here, so that we can report the faulty
751 instruction in the error message. */
752 void
753 md_operand (expressionS * exp)
754 {
755 if (in_aarch64_get_expression)
756 exp->X_op = O_illegal;
757 }
758
759 /* Immediate values. */
760
761 /* Errors may be set multiple times during parsing or bit encoding
762 (particularly in the Neon bits), but usually the earliest error which is set
763 will be the most meaningful. Avoid overwriting it with later (cascading)
764 errors by calling this function. */
765
766 static void
767 first_error (const char *error)
768 {
769 if (! error_p ())
770 set_syntax_error (error);
771 }
772
773 /* Similar to first_error, but this function accepts formatted error
774 message. */
775 static void
776 first_error_fmt (const char *format, ...)
777 {
778 va_list args;
779 enum
780 { size = 100 };
781 /* N.B. this single buffer will not cause error messages for different
782 instructions to pollute each other; this is because at the end of
783 processing of each assembly line, error message if any will be
784 collected by as_bad. */
785 static char buffer[size];
786
787 if (! error_p ())
788 {
789 int ret ATTRIBUTE_UNUSED;
790 va_start (args, format);
791 ret = vsnprintf (buffer, size, format, args);
792 know (ret <= size - 1 && ret >= 0);
793 va_end (args);
794 set_syntax_error (buffer);
795 }
796 }
797
798 /* Internal helper routine converting a vector_type_el structure *VECTYPE
799 to a corresponding operand qualifier. */
800
801 static inline aarch64_opnd_qualifier_t
802 vectype_to_qualifier (const struct vector_type_el *vectype)
803 {
804 /* Element size in bytes indexed by vector_el_type. */
805 const unsigned char ele_size[5]
806 = {1, 2, 4, 8, 16};
807 const unsigned int ele_base [5] =
808 {
809 AARCH64_OPND_QLF_V_4B,
810 AARCH64_OPND_QLF_V_2H,
811 AARCH64_OPND_QLF_V_2S,
812 AARCH64_OPND_QLF_V_1D,
813 AARCH64_OPND_QLF_V_1Q
814 };
815
816 if (!vectype->defined || vectype->type == NT_invtype)
817 goto vectype_conversion_fail;
818
819 if (vectype->type == NT_zero)
820 return AARCH64_OPND_QLF_P_Z;
821 if (vectype->type == NT_merge)
822 return AARCH64_OPND_QLF_P_M;
823
824 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
825
826 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
827 {
828 /* Special case S_4B. */
829 if (vectype->type == NT_b && vectype->width == 4)
830 return AARCH64_OPND_QLF_S_4B;
831
832 /* Special case S_2H. */
833 if (vectype->type == NT_h && vectype->width == 2)
834 return AARCH64_OPND_QLF_S_2H;
835
836 /* Vector element register. */
837 return AARCH64_OPND_QLF_S_B + vectype->type;
838 }
839 else
840 {
841 /* Vector register. */
842 int reg_size = ele_size[vectype->type] * vectype->width;
843 unsigned offset;
844 unsigned shift;
845 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
846 goto vectype_conversion_fail;
847
848 /* The conversion is by calculating the offset from the base operand
849 qualifier for the vector type. The operand qualifiers are regular
850 enough that the offset can established by shifting the vector width by
851 a vector-type dependent amount. */
852 shift = 0;
853 if (vectype->type == NT_b)
854 shift = 3;
855 else if (vectype->type == NT_h || vectype->type == NT_s)
856 shift = 2;
857 else if (vectype->type >= NT_d)
858 shift = 1;
859 else
860 gas_assert (0);
861
862 offset = ele_base [vectype->type] + (vectype->width >> shift);
863 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
864 && offset <= AARCH64_OPND_QLF_V_1Q);
865 return offset;
866 }
867
868 vectype_conversion_fail:
869 first_error (_("bad vector arrangement type"));
870 return AARCH64_OPND_QLF_NIL;
871 }
872
873 /* Register parsing. */
874
875 /* Generic register parser which is called by other specialized
876 register parsers.
877 CCP points to what should be the beginning of a register name.
878 If it is indeed a valid register name, advance CCP over it and
879 return the reg_entry structure; otherwise return NULL.
880 It does not issue diagnostics. */
881
882 static reg_entry *
883 parse_reg (char **ccp)
884 {
885 char *start = *ccp;
886 char *p;
887 reg_entry *reg;
888
889 #ifdef REGISTER_PREFIX
890 if (*start != REGISTER_PREFIX)
891 return NULL;
892 start++;
893 #endif
894
895 p = start;
896 if (!ISALPHA (*p) || !is_name_beginner (*p))
897 return NULL;
898
899 do
900 p++;
901 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
902
903 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
904
905 if (!reg)
906 return NULL;
907
908 *ccp = p;
909 return reg;
910 }
911
912 /* Return the operand qualifier associated with all uses of REG, or
913 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
914 that qualifiers don't apply to REG or that qualifiers are added
915 using suffixes. */
916
917 static aarch64_opnd_qualifier_t
918 inherent_reg_qualifier (const reg_entry *reg)
919 {
920 switch (reg->type)
921 {
922 case REG_TYPE_R_32:
923 case REG_TYPE_SP_32:
924 case REG_TYPE_ZR_32:
925 return AARCH64_OPND_QLF_W;
926
927 case REG_TYPE_R_64:
928 case REG_TYPE_SP_64:
929 case REG_TYPE_ZR_64:
930 return AARCH64_OPND_QLF_X;
931
932 case REG_TYPE_FP_B:
933 case REG_TYPE_FP_H:
934 case REG_TYPE_FP_S:
935 case REG_TYPE_FP_D:
936 case REG_TYPE_FP_Q:
937 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
938
939 default:
940 return AARCH64_OPND_QLF_NIL;
941 }
942 }
943
944 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
945 return FALSE. */
946 static bool
947 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
948 {
949 return (reg_type_masks[type] & (1 << reg->type)) != 0;
950 }
951
952 /* Try to parse a base or offset register. Allow SVE base and offset
953 registers if REG_TYPE includes SVE registers. Return the register
954 entry on success, setting *QUALIFIER to the register qualifier.
955 Return null otherwise.
956
957 Note that this function does not issue any diagnostics. */
958
959 static const reg_entry *
960 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
961 aarch64_opnd_qualifier_t *qualifier)
962 {
963 char *str = *ccp;
964 const reg_entry *reg = parse_reg (&str);
965
966 if (reg == NULL)
967 return NULL;
968
969 switch (reg->type)
970 {
971 case REG_TYPE_Z:
972 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
973 || str[0] != '.')
974 return NULL;
975 switch (TOLOWER (str[1]))
976 {
977 case 's':
978 *qualifier = AARCH64_OPND_QLF_S_S;
979 break;
980 case 'd':
981 *qualifier = AARCH64_OPND_QLF_S_D;
982 break;
983 default:
984 return NULL;
985 }
986 str += 2;
987 break;
988
989 default:
990 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
991 return NULL;
992 *qualifier = inherent_reg_qualifier (reg);
993 break;
994 }
995
996 *ccp = str;
997
998 return reg;
999 }
1000
1001 /* Try to parse a base or offset register. Return the register entry
1002 on success, setting *QUALIFIER to the register qualifier. Return null
1003 otherwise.
1004
1005 Note that this function does not issue any diagnostics. */
1006
1007 static const reg_entry *
1008 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1009 {
1010 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1011 }
1012
1013 /* Parse the qualifier of a vector register or vector element of type
1014 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1015 succeeds; otherwise return FALSE.
1016
1017 Accept only one occurrence of:
1018 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1019 b h s d q */
1020 static bool
1021 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1022 struct vector_type_el *parsed_type, char **str)
1023 {
1024 char *ptr = *str;
1025 unsigned width;
1026 unsigned element_size;
1027 enum vector_el_type type;
1028
1029 /* skip '.' */
1030 gas_assert (*ptr == '.');
1031 ptr++;
1032
1033 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1034 {
1035 width = 0;
1036 goto elt_size;
1037 }
1038 width = strtoul (ptr, &ptr, 10);
1039 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1040 {
1041 first_error_fmt (_("bad size %d in vector width specifier"), width);
1042 return false;
1043 }
1044
1045 elt_size:
1046 switch (TOLOWER (*ptr))
1047 {
1048 case 'b':
1049 type = NT_b;
1050 element_size = 8;
1051 break;
1052 case 'h':
1053 type = NT_h;
1054 element_size = 16;
1055 break;
1056 case 's':
1057 type = NT_s;
1058 element_size = 32;
1059 break;
1060 case 'd':
1061 type = NT_d;
1062 element_size = 64;
1063 break;
1064 case 'q':
1065 if (reg_type != REG_TYPE_V || width == 1)
1066 {
1067 type = NT_q;
1068 element_size = 128;
1069 break;
1070 }
1071 /* fall through. */
1072 default:
1073 if (*ptr != '\0')
1074 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1075 else
1076 first_error (_("missing element size"));
1077 return false;
1078 }
1079 if (width != 0 && width * element_size != 64
1080 && width * element_size != 128
1081 && !(width == 2 && element_size == 16)
1082 && !(width == 4 && element_size == 8))
1083 {
1084 first_error_fmt (_
1085 ("invalid element size %d and vector size combination %c"),
1086 width, *ptr);
1087 return false;
1088 }
1089 ptr++;
1090
1091 parsed_type->type = type;
1092 parsed_type->width = width;
1093 parsed_type->element_size = element_size;
1094
1095 *str = ptr;
1096
1097 return true;
1098 }
1099
1100 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1101 *PARSED_TYPE and point *STR at the end of the suffix. */
1102
1103 static bool
1104 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1105 {
1106 char *ptr = *str;
1107
1108 /* Skip '/'. */
1109 gas_assert (*ptr == '/');
1110 ptr++;
1111 switch (TOLOWER (*ptr))
1112 {
1113 case 'z':
1114 parsed_type->type = NT_zero;
1115 break;
1116 case 'm':
1117 parsed_type->type = NT_merge;
1118 break;
1119 default:
1120 if (*ptr != '\0' && *ptr != ',')
1121 first_error_fmt (_("unexpected character `%c' in predication type"),
1122 *ptr);
1123 else
1124 first_error (_("missing predication type"));
1125 return false;
1126 }
1127 parsed_type->width = 0;
1128 *str = ptr + 1;
1129 return true;
1130 }
1131
1132 /* Return true if CH is a valid suffix character for registers of
1133 type TYPE. */
1134
1135 static bool
1136 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1137 {
1138 switch (type)
1139 {
1140 case REG_TYPE_V:
1141 case REG_TYPE_Z:
1142 case REG_TYPE_ZA:
1143 case REG_TYPE_ZAT:
1144 case REG_TYPE_ZATH:
1145 case REG_TYPE_ZATV:
1146 return ch == '.';
1147
1148 case REG_TYPE_P:
1149 case REG_TYPE_PN:
1150 return ch == '.' || ch == '/';
1151
1152 default:
1153 return false;
1154 }
1155 }
1156
1157 /* Parse an index expression at *STR, storing it in *IMM on success. */
1158
1159 static bool
1160 parse_index_expression (char **str, int64_t *imm)
1161 {
1162 expressionS exp;
1163
1164 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1165 if (exp.X_op != O_constant)
1166 {
1167 first_error (_("constant expression required"));
1168 return false;
1169 }
1170 *imm = exp.X_add_number;
1171 return true;
1172 }
1173
1174 /* Parse a register of the type TYPE.
1175
1176 Return null if the string pointed to by *CCP is not a valid register
1177 name or the parsed register is not of TYPE.
1178
1179 Otherwise return the register, and optionally return the register
1180 shape and element index information in *TYPEINFO.
1181
1182 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1183
1184 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1185 register index.
1186
1187 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1188 an operand that we can be confident that it is a good match. */
1189
1190 #define PTR_IN_REGLIST (1U << 0)
1191 #define PTR_FULL_REG (1U << 1)
1192 #define PTR_GOOD_MATCH (1U << 2)
1193
1194 static const reg_entry *
1195 parse_typed_reg (char **ccp, aarch64_reg_type type,
1196 struct vector_type_el *typeinfo, unsigned int flags)
1197 {
1198 char *str = *ccp;
1199 bool isalpha = ISALPHA (*str);
1200 const reg_entry *reg = parse_reg (&str);
1201 struct vector_type_el atype;
1202 struct vector_type_el parsetype;
1203 bool is_typed_vecreg = false;
1204 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1205
1206 atype.defined = 0;
1207 atype.type = NT_invtype;
1208 atype.width = -1;
1209 atype.element_size = 0;
1210 atype.index = 0;
1211
1212 if (reg == NULL)
1213 {
1214 if (typeinfo)
1215 *typeinfo = atype;
1216 if (!isalpha && (flags & PTR_IN_REGLIST))
1217 set_fatal_syntax_error (_("syntax error in register list"));
1218 else if (flags & PTR_GOOD_MATCH)
1219 set_fatal_syntax_error (NULL);
1220 else
1221 set_expected_reg_error (type, reg, err_flags);
1222 return NULL;
1223 }
1224
1225 if (! aarch64_check_reg_type (reg, type))
1226 {
1227 DEBUG_TRACE ("reg type check failed");
1228 if (flags & PTR_GOOD_MATCH)
1229 set_fatal_syntax_error (NULL);
1230 else
1231 set_expected_reg_error (type, reg, err_flags);
1232 return NULL;
1233 }
1234 type = reg->type;
1235
1236 if (aarch64_valid_suffix_char_p (reg->type, *str))
1237 {
1238 if (*str == '.')
1239 {
1240 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1241 return NULL;
1242 if ((reg->type == REG_TYPE_ZAT
1243 || reg->type == REG_TYPE_ZATH
1244 || reg->type == REG_TYPE_ZATV)
1245 && reg->number * 8 >= parsetype.element_size)
1246 {
1247 set_syntax_error (_("ZA tile number out of range"));
1248 return NULL;
1249 }
1250 }
1251 else
1252 {
1253 if (!parse_predication_for_operand (&parsetype, &str))
1254 return NULL;
1255 }
1256
1257 /* Register if of the form Vn.[bhsdq]. */
1258 is_typed_vecreg = true;
1259
1260 if (type != REG_TYPE_V)
1261 {
1262 /* The width is always variable; we don't allow an integer width
1263 to be specified. */
1264 gas_assert (parsetype.width == 0);
1265 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1266 }
1267 else if (parsetype.width == 0)
1268 /* Expect index. In the new scheme we cannot have
1269 Vn.[bhsdq] represent a scalar. Therefore any
1270 Vn.[bhsdq] should have an index following it.
1271 Except in reglists of course. */
1272 atype.defined |= NTA_HASINDEX;
1273 else
1274 atype.defined |= NTA_HASTYPE;
1275
1276 atype.type = parsetype.type;
1277 atype.width = parsetype.width;
1278 }
1279
1280 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1281 {
1282 /* Reject Sn[index] syntax. */
1283 if (reg->type != REG_TYPE_Z
1284 && reg->type != REG_TYPE_PN
1285 && reg->type != REG_TYPE_ZT0
1286 && !is_typed_vecreg)
1287 {
1288 first_error (_("this type of register can't be indexed"));
1289 return NULL;
1290 }
1291
1292 if (flags & PTR_IN_REGLIST)
1293 {
1294 first_error (_("index not allowed inside register list"));
1295 return NULL;
1296 }
1297
1298 atype.defined |= NTA_HASINDEX;
1299
1300 if (!parse_index_expression (&str, &atype.index))
1301 return NULL;
1302
1303 if (! skip_past_char (&str, ']'))
1304 return NULL;
1305 }
1306 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1307 {
1308 /* Indexed vector register expected. */
1309 first_error (_("indexed vector register expected"));
1310 return NULL;
1311 }
1312
1313 /* A vector reg Vn should be typed or indexed. */
1314 if (type == REG_TYPE_V && atype.defined == 0)
1315 {
1316 first_error (_("invalid use of vector register"));
1317 }
1318
1319 if (typeinfo)
1320 *typeinfo = atype;
1321
1322 *ccp = str;
1323
1324 return reg;
1325 }
1326
1327 /* Parse register.
1328
1329 Return the register on success; return null otherwise.
1330
1331 If this is a NEON vector register with additional type information, fill
1332 in the struct pointed to by VECTYPE (if non-NULL).
1333
1334 This parser does not handle register lists. */
1335
1336 static const reg_entry *
1337 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1338 struct vector_type_el *vectype)
1339 {
1340 return parse_typed_reg (ccp, type, vectype, 0);
1341 }
1342
1343 static inline bool
1344 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1345 {
1346 return (e1.type == e2.type
1347 && e1.defined == e2.defined
1348 && e1.width == e2.width
1349 && e1.element_size == e2.element_size
1350 && e1.index == e2.index);
1351 }
1352
1353 /* Return the register number mask for registers of type REG_TYPE. */
1354
1355 static inline int
1356 reg_type_mask (aarch64_reg_type reg_type)
1357 {
1358 return reg_type == REG_TYPE_P ? 15 : 31;
1359 }
1360
1361 /* This function parses a list of vector registers of type TYPE.
1362 On success, it returns the parsed register list information in the
1363 following encoded format:
1364
1365 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1366 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1367
1368 The information of the register shape and/or index is returned in
1369 *VECTYPE.
1370
1371 It returns PARSE_FAIL if the register list is invalid.
1372
1373 The list contains one to four registers.
1374 Each register can be one of:
1375 <Vt>.<T>[<index>]
1376 <Vt>.<T>
1377 All <T> should be identical.
1378 All <index> should be identical.
1379 There are restrictions on <Vt> numbers which are checked later
1380 (by reg_list_valid_p). */
1381
1382 static int
1383 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1384 struct vector_type_el *vectype)
1385 {
1386 char *str = *ccp;
1387 int nb_regs;
1388 struct vector_type_el typeinfo, typeinfo_first;
1389 int val, val_range, mask;
1390 int in_range;
1391 int ret_val;
1392 bool error = false;
1393 bool expect_index = false;
1394 unsigned int ptr_flags = PTR_IN_REGLIST;
1395
1396 if (*str != '{')
1397 {
1398 set_expected_reglist_error (type, parse_reg (&str));
1399 return PARSE_FAIL;
1400 }
1401 str++;
1402
1403 nb_regs = 0;
1404 typeinfo_first.defined = 0;
1405 typeinfo_first.type = NT_invtype;
1406 typeinfo_first.width = -1;
1407 typeinfo_first.element_size = 0;
1408 typeinfo_first.index = 0;
1409 ret_val = 0;
1410 val = -1;
1411 val_range = -1;
1412 in_range = 0;
1413 mask = reg_type_mask (type);
1414 do
1415 {
1416 if (in_range)
1417 {
1418 str++; /* skip over '-' */
1419 val_range = val;
1420 }
1421 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1422 ptr_flags);
1423 if (!reg)
1424 {
1425 set_first_syntax_error (_("invalid vector register in list"));
1426 error = true;
1427 continue;
1428 }
1429 val = reg->number;
1430 /* reject [bhsd]n */
1431 if (type == REG_TYPE_V && typeinfo.defined == 0)
1432 {
1433 set_first_syntax_error (_("invalid scalar register in list"));
1434 error = true;
1435 continue;
1436 }
1437
1438 if (typeinfo.defined & NTA_HASINDEX)
1439 expect_index = true;
1440
1441 if (in_range)
1442 {
1443 if (val == val_range)
1444 {
1445 set_first_syntax_error
1446 (_("invalid range in vector register list"));
1447 error = true;
1448 }
1449 val_range = (val_range + 1) & mask;
1450 }
1451 else
1452 {
1453 val_range = val;
1454 if (nb_regs == 0)
1455 typeinfo_first = typeinfo;
1456 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1457 {
1458 set_first_syntax_error
1459 (_("type mismatch in vector register list"));
1460 error = true;
1461 }
1462 }
1463 if (! error)
1464 for (;;)
1465 {
1466 ret_val |= val_range << (5 * nb_regs);
1467 nb_regs++;
1468 if (val_range == val)
1469 break;
1470 val_range = (val_range + 1) & mask;
1471 }
1472 in_range = 0;
1473 ptr_flags |= PTR_GOOD_MATCH;
1474 }
1475 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1476
1477 skip_whitespace (str);
1478 if (*str != '}')
1479 {
1480 set_first_syntax_error (_("end of vector register list not found"));
1481 error = true;
1482 }
1483 str++;
1484
1485 skip_whitespace (str);
1486
1487 if (expect_index)
1488 {
1489 if (skip_past_char (&str, '['))
1490 {
1491 if (!parse_index_expression (&str, &typeinfo_first.index))
1492 error = true;
1493 if (! skip_past_char (&str, ']'))
1494 error = true;
1495 }
1496 else
1497 {
1498 set_first_syntax_error (_("expected index"));
1499 error = true;
1500 }
1501 }
1502
1503 if (nb_regs > 4)
1504 {
1505 set_first_syntax_error (_("too many registers in vector register list"));
1506 error = true;
1507 }
1508 else if (nb_regs == 0)
1509 {
1510 set_first_syntax_error (_("empty vector register list"));
1511 error = true;
1512 }
1513
1514 *ccp = str;
1515 if (! error)
1516 *vectype = typeinfo_first;
1517
1518 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1519 }
1520
1521 /* Directives: register aliases. */
1522
1523 static reg_entry *
1524 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1525 {
1526 reg_entry *new;
1527 const char *name;
1528
1529 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1530 {
1531 if (new->builtin)
1532 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1533 str);
1534
1535 /* Only warn about a redefinition if it's not defined as the
1536 same register. */
1537 else if (new->number != number || new->type != type)
1538 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1539
1540 return NULL;
1541 }
1542
1543 name = xstrdup (str);
1544 new = XNEW (reg_entry);
1545
1546 new->name = name;
1547 new->number = number;
1548 new->type = type;
1549 new->builtin = false;
1550
1551 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1552
1553 return new;
1554 }
1555
1556 /* Look for the .req directive. This is of the form:
1557
1558 new_register_name .req existing_register_name
1559
1560 If we find one, or if it looks sufficiently like one that we want to
1561 handle any error here, return TRUE. Otherwise return FALSE. */
1562
1563 static bool
1564 create_register_alias (char *newname, char *p)
1565 {
1566 const reg_entry *old;
1567 char *oldname, *nbuf;
1568 size_t nlen;
1569
1570 /* The input scrubber ensures that whitespace after the mnemonic is
1571 collapsed to single spaces. */
1572 oldname = p;
1573 if (!startswith (oldname, " .req "))
1574 return false;
1575
1576 oldname += 6;
1577 if (*oldname == '\0')
1578 return false;
1579
1580 old = str_hash_find (aarch64_reg_hsh, oldname);
1581 if (!old)
1582 {
1583 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1584 return true;
1585 }
1586
1587 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1588 the desired alias name, and p points to its end. If not, then
1589 the desired alias name is in the global original_case_string. */
1590 #ifdef TC_CASE_SENSITIVE
1591 nlen = p - newname;
1592 #else
1593 newname = original_case_string;
1594 nlen = strlen (newname);
1595 #endif
1596
1597 nbuf = xmemdup0 (newname, nlen);
1598
1599 /* Create aliases under the new name as stated; an all-lowercase
1600 version of the new name; and an all-uppercase version of the new
1601 name. */
1602 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1603 {
1604 for (p = nbuf; *p; p++)
1605 *p = TOUPPER (*p);
1606
1607 if (strncmp (nbuf, newname, nlen))
1608 {
1609 /* If this attempt to create an additional alias fails, do not bother
1610 trying to create the all-lower case alias. We will fail and issue
1611 a second, duplicate error message. This situation arises when the
1612 programmer does something like:
1613 foo .req r0
1614 Foo .req r1
1615 The second .req creates the "Foo" alias but then fails to create
1616 the artificial FOO alias because it has already been created by the
1617 first .req. */
1618 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1619 {
1620 free (nbuf);
1621 return true;
1622 }
1623 }
1624
1625 for (p = nbuf; *p; p++)
1626 *p = TOLOWER (*p);
1627
1628 if (strncmp (nbuf, newname, nlen))
1629 insert_reg_alias (nbuf, old->number, old->type);
1630 }
1631
1632 free (nbuf);
1633 return true;
1634 }
1635
1636 /* Should never be called, as .req goes between the alias and the
1637 register name, not at the beginning of the line. */
1638 static void
1639 s_req (int a ATTRIBUTE_UNUSED)
1640 {
1641 as_bad (_("invalid syntax for .req directive"));
1642 }
1643
1644 /* The .unreq directive deletes an alias which was previously defined
1645 by .req. For example:
1646
1647 my_alias .req r11
1648 .unreq my_alias */
1649
1650 static void
1651 s_unreq (int a ATTRIBUTE_UNUSED)
1652 {
1653 char *name;
1654 char saved_char;
1655
1656 name = input_line_pointer;
1657 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1658 saved_char = *input_line_pointer;
1659 *input_line_pointer = 0;
1660
1661 if (!*name)
1662 as_bad (_("invalid syntax for .unreq directive"));
1663 else
1664 {
1665 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1666
1667 if (!reg)
1668 as_bad (_("unknown register alias '%s'"), name);
1669 else if (reg->builtin)
1670 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1671 name);
1672 else
1673 {
1674 char *p;
1675 char *nbuf;
1676
1677 str_hash_delete (aarch64_reg_hsh, name);
1678 free ((char *) reg->name);
1679 free (reg);
1680
1681 /* Also locate the all upper case and all lower case versions.
1682 Do not complain if we cannot find one or the other as it
1683 was probably deleted above. */
1684
1685 nbuf = strdup (name);
1686 for (p = nbuf; *p; p++)
1687 *p = TOUPPER (*p);
1688 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1689 if (reg)
1690 {
1691 str_hash_delete (aarch64_reg_hsh, nbuf);
1692 free ((char *) reg->name);
1693 free (reg);
1694 }
1695
1696 for (p = nbuf; *p; p++)
1697 *p = TOLOWER (*p);
1698 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1699 if (reg)
1700 {
1701 str_hash_delete (aarch64_reg_hsh, nbuf);
1702 free ((char *) reg->name);
1703 free (reg);
1704 }
1705
1706 free (nbuf);
1707 }
1708 }
1709
1710 *input_line_pointer = saved_char;
1711 demand_empty_rest_of_line ();
1712 }
1713
1714 /* Directives: Instruction set selection. */
1715
1716 #if defined OBJ_ELF || defined OBJ_COFF
1717 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1718 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1719 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1720 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1721
1722 /* Create a new mapping symbol for the transition to STATE. */
1723
1724 static void
1725 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1726 {
1727 symbolS *symbolP;
1728 const char *symname;
1729 int type;
1730
1731 switch (state)
1732 {
1733 case MAP_DATA:
1734 symname = "$d";
1735 type = BSF_NO_FLAGS;
1736 break;
1737 case MAP_INSN:
1738 symname = "$x";
1739 type = BSF_NO_FLAGS;
1740 break;
1741 default:
1742 abort ();
1743 }
1744
1745 symbolP = symbol_new (symname, now_seg, frag, value);
1746 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1747
1748 /* Save the mapping symbols for future reference. Also check that
1749 we do not place two mapping symbols at the same offset within a
1750 frag. We'll handle overlap between frags in
1751 check_mapping_symbols.
1752
1753 If .fill or other data filling directive generates zero sized data,
1754 the mapping symbol for the following code will have the same value
1755 as the one generated for the data filling directive. In this case,
1756 we replace the old symbol with the new one at the same address. */
1757 if (value == 0)
1758 {
1759 if (frag->tc_frag_data.first_map != NULL)
1760 {
1761 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1762 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1763 &symbol_lastP);
1764 }
1765 frag->tc_frag_data.first_map = symbolP;
1766 }
1767 if (frag->tc_frag_data.last_map != NULL)
1768 {
1769 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1770 S_GET_VALUE (symbolP));
1771 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1772 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1773 &symbol_lastP);
1774 }
1775 frag->tc_frag_data.last_map = symbolP;
1776 }
1777
1778 /* We must sometimes convert a region marked as code to data during
1779 code alignment, if an odd number of bytes have to be padded. The
1780 code mapping symbol is pushed to an aligned address. */
1781
1782 static void
1783 insert_data_mapping_symbol (enum mstate state,
1784 valueT value, fragS * frag, offsetT bytes)
1785 {
1786 /* If there was already a mapping symbol, remove it. */
1787 if (frag->tc_frag_data.last_map != NULL
1788 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1789 frag->fr_address + value)
1790 {
1791 symbolS *symp = frag->tc_frag_data.last_map;
1792
1793 if (value == 0)
1794 {
1795 know (frag->tc_frag_data.first_map == symp);
1796 frag->tc_frag_data.first_map = NULL;
1797 }
1798 frag->tc_frag_data.last_map = NULL;
1799 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1800 }
1801
1802 make_mapping_symbol (MAP_DATA, value, frag);
1803 make_mapping_symbol (state, value + bytes, frag);
1804 }
1805
1806 static void mapping_state_2 (enum mstate state, int max_chars);
1807
1808 /* Set the mapping state to STATE. Only call this when about to
1809 emit some STATE bytes to the file. */
1810
1811 void
1812 mapping_state (enum mstate state)
1813 {
1814 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1815
1816 if (state == MAP_INSN)
1817 /* AArch64 instructions require 4-byte alignment. When emitting
1818 instructions into any section, record the appropriate section
1819 alignment. */
1820 record_alignment (now_seg, 2);
1821
1822 if (mapstate == state)
1823 /* The mapping symbol has already been emitted.
1824 There is nothing else to do. */
1825 return;
1826
1827 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1828 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1829 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1830 evaluated later in the next else. */
1831 return;
1832 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1833 {
1834 /* Only add the symbol if the offset is > 0:
1835 if we're at the first frag, check it's size > 0;
1836 if we're not at the first frag, then for sure
1837 the offset is > 0. */
1838 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1839 const int add_symbol = (frag_now != frag_first)
1840 || (frag_now_fix () > 0);
1841
1842 if (add_symbol)
1843 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1844 }
1845 #undef TRANSITION
1846
1847 mapping_state_2 (state, 0);
1848 }
1849
1850 /* Same as mapping_state, but MAX_CHARS bytes have already been
1851 allocated. Put the mapping symbol that far back. */
1852
1853 static void
1854 mapping_state_2 (enum mstate state, int max_chars)
1855 {
1856 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1857
1858 if (!SEG_NORMAL (now_seg))
1859 return;
1860
1861 if (mapstate == state)
1862 /* The mapping symbol has already been emitted.
1863 There is nothing else to do. */
1864 return;
1865
1866 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1867 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1868 }
1869 #else
1870 #define mapping_state(x) /* nothing */
1871 #define mapping_state_2(x, y) /* nothing */
1872 #endif
1873
1874 /* Directives: sectioning and alignment. */
1875
1876 static void
1877 s_bss (int ignore ATTRIBUTE_UNUSED)
1878 {
1879 /* We don't support putting frags in the BSS segment, we fake it by
1880 marking in_bss, then looking at s_skip for clues. */
1881 subseg_set (bss_section, 0);
1882 demand_empty_rest_of_line ();
1883 mapping_state (MAP_DATA);
1884 }
1885
1886 static void
1887 s_even (int ignore ATTRIBUTE_UNUSED)
1888 {
1889 /* Never make frag if expect extra pass. */
1890 if (!need_pass_2)
1891 frag_align (1, 0, 0);
1892
1893 record_alignment (now_seg, 1);
1894
1895 demand_empty_rest_of_line ();
1896 }
1897
1898 /* Directives: Literal pools. */
1899
1900 static literal_pool *
1901 find_literal_pool (int size)
1902 {
1903 literal_pool *pool;
1904
1905 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1906 {
1907 if (pool->section == now_seg
1908 && pool->sub_section == now_subseg && pool->size == size)
1909 break;
1910 }
1911
1912 return pool;
1913 }
1914
1915 static literal_pool *
1916 find_or_make_literal_pool (int size)
1917 {
1918 /* Next literal pool ID number. */
1919 static unsigned int latest_pool_num = 1;
1920 literal_pool *pool;
1921
1922 pool = find_literal_pool (size);
1923
1924 if (pool == NULL)
1925 {
1926 /* Create a new pool. */
1927 pool = XNEW (literal_pool);
1928 if (!pool)
1929 return NULL;
1930
1931 /* Currently we always put the literal pool in the current text
1932 section. If we were generating "small" model code where we
1933 knew that all code and initialised data was within 1MB then
1934 we could output literals to mergeable, read-only data
1935 sections. */
1936
1937 pool->next_free_entry = 0;
1938 pool->section = now_seg;
1939 pool->sub_section = now_subseg;
1940 pool->size = size;
1941 pool->next = list_of_pools;
1942 pool->symbol = NULL;
1943
1944 /* Add it to the list. */
1945 list_of_pools = pool;
1946 }
1947
1948 /* New pools, and emptied pools, will have a NULL symbol. */
1949 if (pool->symbol == NULL)
1950 {
1951 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1952 &zero_address_frag, 0);
1953 pool->id = latest_pool_num++;
1954 }
1955
1956 /* Done. */
1957 return pool;
1958 }
1959
1960 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1961 Return TRUE on success, otherwise return FALSE. */
1962 static bool
1963 add_to_lit_pool (expressionS *exp, int size)
1964 {
1965 literal_pool *pool;
1966 unsigned int entry;
1967
1968 pool = find_or_make_literal_pool (size);
1969
1970 /* Check if this literal value is already in the pool. */
1971 for (entry = 0; entry < pool->next_free_entry; entry++)
1972 {
1973 expressionS * litexp = & pool->literals[entry].exp;
1974
1975 if ((litexp->X_op == exp->X_op)
1976 && (exp->X_op == O_constant)
1977 && (litexp->X_add_number == exp->X_add_number)
1978 && (litexp->X_unsigned == exp->X_unsigned))
1979 break;
1980
1981 if ((litexp->X_op == exp->X_op)
1982 && (exp->X_op == O_symbol)
1983 && (litexp->X_add_number == exp->X_add_number)
1984 && (litexp->X_add_symbol == exp->X_add_symbol)
1985 && (litexp->X_op_symbol == exp->X_op_symbol))
1986 break;
1987 }
1988
1989 /* Do we need to create a new entry? */
1990 if (entry == pool->next_free_entry)
1991 {
1992 if (entry >= MAX_LITERAL_POOL_SIZE)
1993 {
1994 set_syntax_error (_("literal pool overflow"));
1995 return false;
1996 }
1997
1998 pool->literals[entry].exp = *exp;
1999 pool->next_free_entry += 1;
2000 if (exp->X_op == O_big)
2001 {
2002 /* PR 16688: Bignums are held in a single global array. We must
2003 copy and preserve that value now, before it is overwritten. */
2004 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
2005 exp->X_add_number);
2006 memcpy (pool->literals[entry].bignum, generic_bignum,
2007 CHARS_PER_LITTLENUM * exp->X_add_number);
2008 }
2009 else
2010 pool->literals[entry].bignum = NULL;
2011 }
2012
2013 exp->X_op = O_symbol;
2014 exp->X_add_number = ((int) entry) * size;
2015 exp->X_add_symbol = pool->symbol;
2016
2017 return true;
2018 }
2019
2020 /* Can't use symbol_new here, so have to create a symbol and then at
2021 a later date assign it a value. That's what these functions do. */
2022
2023 static void
2024 symbol_locate (symbolS * symbolP,
2025 const char *name,/* It is copied, the caller can modify. */
2026 segT segment, /* Segment identifier (SEG_<something>). */
2027 valueT valu, /* Symbol value. */
2028 fragS * frag) /* Associated fragment. */
2029 {
2030 size_t name_length;
2031 char *preserved_copy_of_name;
2032
2033 name_length = strlen (name) + 1; /* +1 for \0. */
2034 obstack_grow (&notes, name, name_length);
2035 preserved_copy_of_name = obstack_finish (&notes);
2036
2037 #ifdef tc_canonicalize_symbol_name
2038 preserved_copy_of_name =
2039 tc_canonicalize_symbol_name (preserved_copy_of_name);
2040 #endif
2041
2042 S_SET_NAME (symbolP, preserved_copy_of_name);
2043
2044 S_SET_SEGMENT (symbolP, segment);
2045 S_SET_VALUE (symbolP, valu);
2046 symbol_clear_list_pointers (symbolP);
2047
2048 symbol_set_frag (symbolP, frag);
2049
2050 /* Link to end of symbol chain. */
2051 {
2052 extern int symbol_table_frozen;
2053
2054 if (symbol_table_frozen)
2055 abort ();
2056 }
2057
2058 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2059
2060 obj_symbol_new_hook (symbolP);
2061
2062 #ifdef tc_symbol_new_hook
2063 tc_symbol_new_hook (symbolP);
2064 #endif
2065
2066 #ifdef DEBUG_SYMS
2067 verify_symbol_chain (symbol_rootP, symbol_lastP);
2068 #endif /* DEBUG_SYMS */
2069 }
2070
2071
2072 static void
2073 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2074 {
2075 unsigned int entry;
2076 literal_pool *pool;
2077 char sym_name[20];
2078 int align;
2079
2080 for (align = 2; align <= 4; align++)
2081 {
2082 int size = 1 << align;
2083
2084 pool = find_literal_pool (size);
2085 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2086 continue;
2087
2088 /* Align pool as you have word accesses.
2089 Only make a frag if we have to. */
2090 if (!need_pass_2)
2091 frag_align (align, 0, 0);
2092
2093 mapping_state (MAP_DATA);
2094
2095 record_alignment (now_seg, align);
2096
2097 sprintf (sym_name, "$$lit_\002%x", pool->id);
2098
2099 symbol_locate (pool->symbol, sym_name, now_seg,
2100 (valueT) frag_now_fix (), frag_now);
2101 symbol_table_insert (pool->symbol);
2102
2103 for (entry = 0; entry < pool->next_free_entry; entry++)
2104 {
2105 expressionS * exp = & pool->literals[entry].exp;
2106
2107 if (exp->X_op == O_big)
2108 {
2109 /* PR 16688: Restore the global bignum value. */
2110 gas_assert (pool->literals[entry].bignum != NULL);
2111 memcpy (generic_bignum, pool->literals[entry].bignum,
2112 CHARS_PER_LITTLENUM * exp->X_add_number);
2113 }
2114
2115 /* First output the expression in the instruction to the pool. */
2116 emit_expr (exp, size); /* .word|.xword */
2117
2118 if (exp->X_op == O_big)
2119 {
2120 free (pool->literals[entry].bignum);
2121 pool->literals[entry].bignum = NULL;
2122 }
2123 }
2124
2125 /* Mark the pool as empty. */
2126 pool->next_free_entry = 0;
2127 pool->symbol = NULL;
2128 }
2129 }
2130
2131 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2132 /* Forward declarations for functions below, in the MD interface
2133 section. */
2134 static struct reloc_table_entry * find_reloc_table_entry (char **);
2135
2136 /* Directives: Data. */
2137 /* N.B. the support for relocation suffix in this directive needs to be
2138 implemented properly. */
2139
2140 static void
2141 s_aarch64_cons (int nbytes)
2142 {
2143 expressionS exp;
2144
2145 #ifdef md_flush_pending_output
2146 md_flush_pending_output ();
2147 #endif
2148
2149 if (is_it_end_of_statement ())
2150 {
2151 demand_empty_rest_of_line ();
2152 return;
2153 }
2154
2155 #ifdef md_cons_align
2156 md_cons_align (nbytes);
2157 #endif
2158
2159 mapping_state (MAP_DATA);
2160 do
2161 {
2162 struct reloc_table_entry *reloc;
2163
2164 expression (&exp);
2165
2166 if (exp.X_op != O_symbol)
2167 emit_expr (&exp, (unsigned int) nbytes);
2168 else
2169 {
2170 skip_past_char (&input_line_pointer, '#');
2171 if (skip_past_char (&input_line_pointer, ':'))
2172 {
2173 reloc = find_reloc_table_entry (&input_line_pointer);
2174 if (reloc == NULL)
2175 as_bad (_("unrecognized relocation suffix"));
2176 else
2177 as_bad (_("unimplemented relocation suffix"));
2178 ignore_rest_of_line ();
2179 return;
2180 }
2181 else
2182 emit_expr (&exp, (unsigned int) nbytes);
2183 }
2184 }
2185 while (*input_line_pointer++ == ',');
2186
2187 /* Put terminator back into stream. */
2188 input_line_pointer--;
2189 demand_empty_rest_of_line ();
2190 }
2191 #endif
2192
2193 #ifdef OBJ_ELF
2194 /* Forward declarations for functions below, in the MD interface
2195 section. */
2196 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2197
2198 /* Mark symbol that it follows a variant PCS convention. */
2199
2200 static void
2201 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2202 {
2203 char *name;
2204 char c;
2205 symbolS *sym;
2206 asymbol *bfdsym;
2207 elf_symbol_type *elfsym;
2208
2209 c = get_symbol_name (&name);
2210 if (!*name)
2211 as_bad (_("Missing symbol name in directive"));
2212 sym = symbol_find_or_make (name);
2213 restore_line_pointer (c);
2214 demand_empty_rest_of_line ();
2215 bfdsym = symbol_get_bfdsym (sym);
2216 elfsym = elf_symbol_from (bfdsym);
2217 gas_assert (elfsym);
2218 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2219 }
2220 #endif /* OBJ_ELF */
2221
2222 /* Output a 32-bit word, but mark as an instruction. */
2223
2224 static void
2225 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2226 {
2227 expressionS exp;
2228 unsigned n = 0;
2229
2230 #ifdef md_flush_pending_output
2231 md_flush_pending_output ();
2232 #endif
2233
2234 if (is_it_end_of_statement ())
2235 {
2236 demand_empty_rest_of_line ();
2237 return;
2238 }
2239
2240 /* Sections are assumed to start aligned. In executable section, there is no
2241 MAP_DATA symbol pending. So we only align the address during
2242 MAP_DATA --> MAP_INSN transition.
2243 For other sections, this is not guaranteed. */
2244 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2245 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2246 frag_align_code (2, 0);
2247
2248 #ifdef OBJ_ELF
2249 mapping_state (MAP_INSN);
2250 #endif
2251
2252 do
2253 {
2254 expression (&exp);
2255 if (exp.X_op != O_constant)
2256 {
2257 as_bad (_("constant expression required"));
2258 ignore_rest_of_line ();
2259 return;
2260 }
2261
2262 if (target_big_endian)
2263 {
2264 unsigned int val = exp.X_add_number;
2265 exp.X_add_number = SWAP_32 (val);
2266 }
2267 emit_expr (&exp, INSN_SIZE);
2268 ++n;
2269 }
2270 while (*input_line_pointer++ == ',');
2271
2272 dwarf2_emit_insn (n * INSN_SIZE);
2273
2274 /* Put terminator back into stream. */
2275 input_line_pointer--;
2276 demand_empty_rest_of_line ();
2277 }
2278
2279 static void
2280 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2281 {
2282 demand_empty_rest_of_line ();
2283 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2284 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2285 }
2286
2287 #ifdef OBJ_ELF
2288 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2289
2290 static void
2291 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2292 {
2293 expressionS exp;
2294
2295 expression (&exp);
2296 frag_grow (4);
2297 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2298 BFD_RELOC_AARCH64_TLSDESC_ADD);
2299
2300 demand_empty_rest_of_line ();
2301 }
2302
2303 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2304
2305 static void
2306 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2307 {
2308 expressionS exp;
2309
2310 /* Since we're just labelling the code, there's no need to define a
2311 mapping symbol. */
2312 expression (&exp);
2313 /* Make sure there is enough room in this frag for the following
2314 blr. This trick only works if the blr follows immediately after
2315 the .tlsdesc directive. */
2316 frag_grow (4);
2317 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2318 BFD_RELOC_AARCH64_TLSDESC_CALL);
2319
2320 demand_empty_rest_of_line ();
2321 }
2322
2323 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2324
2325 static void
2326 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2327 {
2328 expressionS exp;
2329
2330 expression (&exp);
2331 frag_grow (4);
2332 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2333 BFD_RELOC_AARCH64_TLSDESC_LDR);
2334
2335 demand_empty_rest_of_line ();
2336 }
2337 #endif /* OBJ_ELF */
2338
2339 #ifdef TE_PE
2340 static void
2341 s_secrel (int dummy ATTRIBUTE_UNUSED)
2342 {
2343 expressionS exp;
2344
2345 do
2346 {
2347 expression (&exp);
2348 if (exp.X_op == O_symbol)
2349 exp.X_op = O_secrel;
2350
2351 emit_expr (&exp, 4);
2352 }
2353 while (*input_line_pointer++ == ',');
2354
2355 input_line_pointer--;
2356 demand_empty_rest_of_line ();
2357 }
2358
2359 void
2360 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2361 {
2362 expressionS exp;
2363
2364 exp.X_op = O_secrel;
2365 exp.X_add_symbol = symbol;
2366 exp.X_add_number = 0;
2367 emit_expr (&exp, size);
2368 }
2369
2370 static void
2371 s_secidx (int dummy ATTRIBUTE_UNUSED)
2372 {
2373 expressionS exp;
2374
2375 do
2376 {
2377 expression (&exp);
2378 if (exp.X_op == O_symbol)
2379 exp.X_op = O_secidx;
2380
2381 emit_expr (&exp, 2);
2382 }
2383 while (*input_line_pointer++ == ',');
2384
2385 input_line_pointer--;
2386 demand_empty_rest_of_line ();
2387 }
2388 #endif /* TE_PE */
2389
2390 static void s_aarch64_arch (int);
2391 static void s_aarch64_cpu (int);
2392 static void s_aarch64_arch_extension (int);
2393
2394 /* This table describes all the machine specific pseudo-ops the assembler
2395 has to support. The fields are:
2396 pseudo-op name without dot
2397 function to call to execute this pseudo-op
2398 Integer arg to pass to the function. */
2399
2400 const pseudo_typeS md_pseudo_table[] = {
2401 /* Never called because '.req' does not start a line. */
2402 {"req", s_req, 0},
2403 {"unreq", s_unreq, 0},
2404 {"bss", s_bss, 0},
2405 {"even", s_even, 0},
2406 {"ltorg", s_ltorg, 0},
2407 {"pool", s_ltorg, 0},
2408 {"cpu", s_aarch64_cpu, 0},
2409 {"arch", s_aarch64_arch, 0},
2410 {"arch_extension", s_aarch64_arch_extension, 0},
2411 {"inst", s_aarch64_inst, 0},
2412 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2413 #ifdef OBJ_ELF
2414 {"tlsdescadd", s_tlsdescadd, 0},
2415 {"tlsdesccall", s_tlsdesccall, 0},
2416 {"tlsdescldr", s_tlsdescldr, 0},
2417 {"variant_pcs", s_variant_pcs, 0},
2418 #endif
2419 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2420 {"word", s_aarch64_cons, 4},
2421 {"long", s_aarch64_cons, 4},
2422 {"xword", s_aarch64_cons, 8},
2423 {"dword", s_aarch64_cons, 8},
2424 #endif
2425 #ifdef TE_PE
2426 {"secrel32", s_secrel, 0},
2427 {"secidx", s_secidx, 0},
2428 #endif
2429 {"float16", float_cons, 'h'},
2430 {"bfloat16", float_cons, 'b'},
2431 {0, 0, 0}
2432 };
2433 \f
2434
2435 /* Check whether STR points to a register name followed by a comma or the
2436 end of line; REG_TYPE indicates which register types are checked
2437 against. Return TRUE if STR is such a register name; otherwise return
2438 FALSE. The function does not intend to produce any diagnostics, but since
2439 the register parser aarch64_reg_parse, which is called by this function,
2440 does produce diagnostics, we call clear_error to clear any diagnostics
2441 that may be generated by aarch64_reg_parse.
2442 Also, the function returns FALSE directly if there is any user error
2443 present at the function entry. This prevents the existing diagnostics
2444 state from being spoiled.
2445 The function currently serves parse_constant_immediate and
2446 parse_big_immediate only. */
2447 static bool
2448 reg_name_p (char *str, aarch64_reg_type reg_type)
2449 {
2450 const reg_entry *reg;
2451
2452 /* Prevent the diagnostics state from being spoiled. */
2453 if (error_p ())
2454 return false;
2455
2456 reg = aarch64_reg_parse (&str, reg_type, NULL);
2457
2458 /* Clear the parsing error that may be set by the reg parser. */
2459 clear_error ();
2460
2461 if (!reg)
2462 return false;
2463
2464 skip_whitespace (str);
2465 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2466 return true;
2467
2468 return false;
2469 }
2470
2471 /* Parser functions used exclusively in instruction operands. */
2472
2473 /* Parse an immediate expression which may not be constant.
2474
2475 To prevent the expression parser from pushing a register name
2476 into the symbol table as an undefined symbol, firstly a check is
2477 done to find out whether STR is a register of type REG_TYPE followed
2478 by a comma or the end of line. Return FALSE if STR is such a string. */
2479
2480 static bool
2481 parse_immediate_expression (char **str, expressionS *exp,
2482 aarch64_reg_type reg_type)
2483 {
2484 if (reg_name_p (*str, reg_type))
2485 {
2486 set_recoverable_error (_("immediate operand required"));
2487 return false;
2488 }
2489
2490 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2491
2492 if (exp->X_op == O_absent)
2493 {
2494 set_fatal_syntax_error (_("missing immediate expression"));
2495 return false;
2496 }
2497
2498 return true;
2499 }
2500
2501 /* Constant immediate-value read function for use in insn parsing.
2502 STR points to the beginning of the immediate (with the optional
2503 leading #); *VAL receives the value. REG_TYPE says which register
2504 names should be treated as registers rather than as symbolic immediates.
2505
2506 Return TRUE on success; otherwise return FALSE. */
2507
2508 static bool
2509 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2510 {
2511 expressionS exp;
2512
2513 if (! parse_immediate_expression (str, &exp, reg_type))
2514 return false;
2515
2516 if (exp.X_op != O_constant)
2517 {
2518 set_syntax_error (_("constant expression required"));
2519 return false;
2520 }
2521
2522 *val = exp.X_add_number;
2523 return true;
2524 }
2525
2526 static uint32_t
2527 encode_imm_float_bits (uint32_t imm)
2528 {
2529 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2530 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2531 }
2532
2533 /* Return TRUE if the single-precision floating-point value encoded in IMM
2534 can be expressed in the AArch64 8-bit signed floating-point format with
2535 3-bit exponent and normalized 4 bits of precision; in other words, the
2536 floating-point value must be expressable as
2537 (+/-) n / 16 * power (2, r)
2538 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2539
2540 static bool
2541 aarch64_imm_float_p (uint32_t imm)
2542 {
2543 /* If a single-precision floating-point value has the following bit
2544 pattern, it can be expressed in the AArch64 8-bit floating-point
2545 format:
2546
2547 3 32222222 2221111111111
2548 1 09876543 21098765432109876543210
2549 n Eeeeeexx xxxx0000000000000000000
2550
2551 where n, e and each x are either 0 or 1 independently, with
2552 E == ~ e. */
2553
2554 uint32_t pattern;
2555
2556 /* Prepare the pattern for 'Eeeeee'. */
2557 if (((imm >> 30) & 0x1) == 0)
2558 pattern = 0x3e000000;
2559 else
2560 pattern = 0x40000000;
2561
2562 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2563 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2564 }
2565
2566 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2567 as an IEEE float without any loss of precision. Store the value in
2568 *FPWORD if so. */
2569
2570 static bool
2571 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2572 {
2573 /* If a double-precision floating-point value has the following bit
2574 pattern, it can be expressed in a float:
2575
2576 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2577 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2578 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2579
2580 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2581 if Eeee_eeee != 1111_1111
2582
2583 where n, e, s and S are either 0 or 1 independently and where ~ is the
2584 inverse of E. */
2585
2586 uint32_t pattern;
2587 uint32_t high32 = imm >> 32;
2588 uint32_t low32 = imm;
2589
2590 /* Lower 29 bits need to be 0s. */
2591 if ((imm & 0x1fffffff) != 0)
2592 return false;
2593
2594 /* Prepare the pattern for 'Eeeeeeeee'. */
2595 if (((high32 >> 30) & 0x1) == 0)
2596 pattern = 0x38000000;
2597 else
2598 pattern = 0x40000000;
2599
2600 /* Check E~~~. */
2601 if ((high32 & 0x78000000) != pattern)
2602 return false;
2603
2604 /* Check Eeee_eeee != 1111_1111. */
2605 if ((high32 & 0x7ff00000) == 0x47f00000)
2606 return false;
2607
2608 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2609 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2610 | (low32 >> 29)); /* 3 S bits. */
2611 return true;
2612 }
2613
2614 /* Return true if we should treat OPERAND as a double-precision
2615 floating-point operand rather than a single-precision one. */
2616 static bool
2617 double_precision_operand_p (const aarch64_opnd_info *operand)
2618 {
2619 /* Check for unsuffixed SVE registers, which are allowed
2620 for LDR and STR but not in instructions that require an
2621 immediate. We get better error messages if we arbitrarily
2622 pick one size, parse the immediate normally, and then
2623 report the match failure in the normal way. */
2624 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2625 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2626 }
2627
2628 /* Parse a floating-point immediate. Return TRUE on success and return the
2629 value in *IMMED in the format of IEEE754 single-precision encoding.
2630 *CCP points to the start of the string; DP_P is TRUE when the immediate
2631 is expected to be in double-precision (N.B. this only matters when
2632 hexadecimal representation is involved). REG_TYPE says which register
2633 names should be treated as registers rather than as symbolic immediates.
2634
2635 This routine accepts any IEEE float; it is up to the callers to reject
2636 invalid ones. */
2637
2638 static bool
2639 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2640 aarch64_reg_type reg_type)
2641 {
2642 char *str = *ccp;
2643 char *fpnum;
2644 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2645 int64_t val = 0;
2646 unsigned fpword = 0;
2647 bool hex_p = false;
2648
2649 skip_past_char (&str, '#');
2650
2651 fpnum = str;
2652 skip_whitespace (fpnum);
2653
2654 if (startswith (fpnum, "0x"))
2655 {
2656 /* Support the hexadecimal representation of the IEEE754 encoding.
2657 Double-precision is expected when DP_P is TRUE, otherwise the
2658 representation should be in single-precision. */
2659 if (! parse_constant_immediate (&str, &val, reg_type))
2660 goto invalid_fp;
2661
2662 if (dp_p)
2663 {
2664 if (!can_convert_double_to_float (val, &fpword))
2665 goto invalid_fp;
2666 }
2667 else if ((uint64_t) val > 0xffffffff)
2668 goto invalid_fp;
2669 else
2670 fpword = val;
2671
2672 hex_p = true;
2673 }
2674 else if (reg_name_p (str, reg_type))
2675 {
2676 set_recoverable_error (_("immediate operand required"));
2677 return false;
2678 }
2679
2680 if (! hex_p)
2681 {
2682 int i;
2683
2684 if ((str = atof_ieee (str, 's', words)) == NULL)
2685 goto invalid_fp;
2686
2687 /* Our FP word must be 32 bits (single-precision FP). */
2688 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2689 {
2690 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2691 fpword |= words[i];
2692 }
2693 }
2694
2695 *immed = fpword;
2696 *ccp = str;
2697 return true;
2698
2699 invalid_fp:
2700 set_fatal_syntax_error (_("invalid floating-point constant"));
2701 return false;
2702 }
2703
2704 /* Less-generic immediate-value read function with the possibility of loading
2705 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2706 instructions.
2707
2708 To prevent the expression parser from pushing a register name into the
2709 symbol table as an undefined symbol, a check is firstly done to find
2710 out whether STR is a register of type REG_TYPE followed by a comma or
2711 the end of line. Return FALSE if STR is such a register. */
2712
2713 static bool
2714 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2715 {
2716 char *ptr = *str;
2717
2718 if (reg_name_p (ptr, reg_type))
2719 {
2720 set_syntax_error (_("immediate operand required"));
2721 return false;
2722 }
2723
2724 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2725
2726 if (inst.reloc.exp.X_op == O_constant)
2727 *imm = inst.reloc.exp.X_add_number;
2728
2729 *str = ptr;
2730
2731 return true;
2732 }
2733
2734 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2735 if NEED_LIBOPCODES is non-zero, the fixup will need
2736 assistance from the libopcodes. */
2737
2738 static inline void
2739 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2740 const aarch64_opnd_info *operand,
2741 int need_libopcodes_p)
2742 {
2743 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2744 reloc->opnd = operand->type;
2745 if (need_libopcodes_p)
2746 reloc->need_libopcodes_p = 1;
2747 };
2748
2749 /* Return TRUE if the instruction needs to be fixed up later internally by
2750 the GAS; otherwise return FALSE. */
2751
2752 static inline bool
2753 aarch64_gas_internal_fixup_p (void)
2754 {
2755 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2756 }
2757
2758 /* Assign the immediate value to the relevant field in *OPERAND if
2759 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2760 needs an internal fixup in a later stage.
2761 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2762 IMM.VALUE that may get assigned with the constant. */
2763 static inline void
2764 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2765 aarch64_opnd_info *operand,
2766 int addr_off_p,
2767 int need_libopcodes_p,
2768 int skip_p)
2769 {
2770 if (reloc->exp.X_op == O_constant)
2771 {
2772 if (addr_off_p)
2773 operand->addr.offset.imm = reloc->exp.X_add_number;
2774 else
2775 operand->imm.value = reloc->exp.X_add_number;
2776 reloc->type = BFD_RELOC_UNUSED;
2777 }
2778 else
2779 {
2780 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2781 /* Tell libopcodes to ignore this operand or not. This is helpful
2782 when one of the operands needs to be fixed up later but we need
2783 libopcodes to check the other operands. */
2784 operand->skip = skip_p;
2785 }
2786 }
2787
2788 /* Relocation modifiers. Each entry in the table contains the textual
2789 name for the relocation which may be placed before a symbol used as
2790 a load/store offset, or add immediate. It must be surrounded by a
2791 leading and trailing colon, for example:
2792
2793 ldr x0, [x1, #:rello:varsym]
2794 add x0, x1, #:rello:varsym */
2795
2796 struct reloc_table_entry
2797 {
2798 const char *name;
2799 int pc_rel;
2800 bfd_reloc_code_real_type adr_type;
2801 bfd_reloc_code_real_type adrp_type;
2802 bfd_reloc_code_real_type movw_type;
2803 bfd_reloc_code_real_type add_type;
2804 bfd_reloc_code_real_type ldst_type;
2805 bfd_reloc_code_real_type ld_literal_type;
2806 };
2807
2808 static struct reloc_table_entry reloc_table[] =
2809 {
2810 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2811 {"lo12", 0,
2812 0, /* adr_type */
2813 0,
2814 0,
2815 BFD_RELOC_AARCH64_ADD_LO12,
2816 BFD_RELOC_AARCH64_LDST_LO12,
2817 0},
2818
2819 /* Higher 21 bits of pc-relative page offset: ADRP */
2820 {"pg_hi21", 1,
2821 0, /* adr_type */
2822 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2823 0,
2824 0,
2825 0,
2826 0},
2827
2828 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2829 {"pg_hi21_nc", 1,
2830 0, /* adr_type */
2831 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2832 0,
2833 0,
2834 0,
2835 0},
2836
2837 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2838 {"abs_g0", 0,
2839 0, /* adr_type */
2840 0,
2841 BFD_RELOC_AARCH64_MOVW_G0,
2842 0,
2843 0,
2844 0},
2845
2846 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2847 {"abs_g0_s", 0,
2848 0, /* adr_type */
2849 0,
2850 BFD_RELOC_AARCH64_MOVW_G0_S,
2851 0,
2852 0,
2853 0},
2854
2855 /* Less significant bits 0-15 of address/value: MOVK, no check */
2856 {"abs_g0_nc", 0,
2857 0, /* adr_type */
2858 0,
2859 BFD_RELOC_AARCH64_MOVW_G0_NC,
2860 0,
2861 0,
2862 0},
2863
2864 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2865 {"abs_g1", 0,
2866 0, /* adr_type */
2867 0,
2868 BFD_RELOC_AARCH64_MOVW_G1,
2869 0,
2870 0,
2871 0},
2872
2873 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2874 {"abs_g1_s", 0,
2875 0, /* adr_type */
2876 0,
2877 BFD_RELOC_AARCH64_MOVW_G1_S,
2878 0,
2879 0,
2880 0},
2881
2882 /* Less significant bits 16-31 of address/value: MOVK, no check */
2883 {"abs_g1_nc", 0,
2884 0, /* adr_type */
2885 0,
2886 BFD_RELOC_AARCH64_MOVW_G1_NC,
2887 0,
2888 0,
2889 0},
2890
2891 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2892 {"abs_g2", 0,
2893 0, /* adr_type */
2894 0,
2895 BFD_RELOC_AARCH64_MOVW_G2,
2896 0,
2897 0,
2898 0},
2899
2900 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2901 {"abs_g2_s", 0,
2902 0, /* adr_type */
2903 0,
2904 BFD_RELOC_AARCH64_MOVW_G2_S,
2905 0,
2906 0,
2907 0},
2908
2909 /* Less significant bits 32-47 of address/value: MOVK, no check */
2910 {"abs_g2_nc", 0,
2911 0, /* adr_type */
2912 0,
2913 BFD_RELOC_AARCH64_MOVW_G2_NC,
2914 0,
2915 0,
2916 0},
2917
2918 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2919 {"abs_g3", 0,
2920 0, /* adr_type */
2921 0,
2922 BFD_RELOC_AARCH64_MOVW_G3,
2923 0,
2924 0,
2925 0},
2926
2927 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2928 {"prel_g0", 1,
2929 0, /* adr_type */
2930 0,
2931 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2932 0,
2933 0,
2934 0},
2935
2936 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2937 {"prel_g0_nc", 1,
2938 0, /* adr_type */
2939 0,
2940 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2941 0,
2942 0,
2943 0},
2944
2945 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2946 {"prel_g1", 1,
2947 0, /* adr_type */
2948 0,
2949 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2950 0,
2951 0,
2952 0},
2953
2954 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2955 {"prel_g1_nc", 1,
2956 0, /* adr_type */
2957 0,
2958 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2959 0,
2960 0,
2961 0},
2962
2963 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2964 {"prel_g2", 1,
2965 0, /* adr_type */
2966 0,
2967 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2968 0,
2969 0,
2970 0},
2971
2972 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2973 {"prel_g2_nc", 1,
2974 0, /* adr_type */
2975 0,
2976 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2977 0,
2978 0,
2979 0},
2980
2981 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2982 {"prel_g3", 1,
2983 0, /* adr_type */
2984 0,
2985 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2986 0,
2987 0,
2988 0},
2989
2990 /* Get to the page containing GOT entry for a symbol. */
2991 {"got", 1,
2992 0, /* adr_type */
2993 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2994 0,
2995 0,
2996 0,
2997 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2998
2999 /* 12 bit offset into the page containing GOT entry for that symbol. */
3000 {"got_lo12", 0,
3001 0, /* adr_type */
3002 0,
3003 0,
3004 0,
3005 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
3006 0},
3007
3008 /* 0-15 bits of address/value: MOVk, no check. */
3009 {"gotoff_g0_nc", 0,
3010 0, /* adr_type */
3011 0,
3012 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
3013 0,
3014 0,
3015 0},
3016
3017 /* Most significant bits 16-31 of address/value: MOVZ. */
3018 {"gotoff_g1", 0,
3019 0, /* adr_type */
3020 0,
3021 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3022 0,
3023 0,
3024 0},
3025
3026 /* 15 bit offset into the page containing GOT entry for that symbol. */
3027 {"gotoff_lo15", 0,
3028 0, /* adr_type */
3029 0,
3030 0,
3031 0,
3032 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3033 0},
3034
3035 /* Get to the page containing GOT TLS entry for a symbol */
3036 {"gottprel_g0_nc", 0,
3037 0, /* adr_type */
3038 0,
3039 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3040 0,
3041 0,
3042 0},
3043
3044 /* Get to the page containing GOT TLS entry for a symbol */
3045 {"gottprel_g1", 0,
3046 0, /* adr_type */
3047 0,
3048 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3049 0,
3050 0,
3051 0},
3052
3053 /* Get to the page containing GOT TLS entry for a symbol */
3054 {"tlsgd", 0,
3055 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3056 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3057 0,
3058 0,
3059 0,
3060 0},
3061
3062 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3063 {"tlsgd_lo12", 0,
3064 0, /* adr_type */
3065 0,
3066 0,
3067 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3068 0,
3069 0},
3070
3071 /* Lower 16 bits address/value: MOVk. */
3072 {"tlsgd_g0_nc", 0,
3073 0, /* adr_type */
3074 0,
3075 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3076 0,
3077 0,
3078 0},
3079
3080 /* Most significant bits 16-31 of address/value: MOVZ. */
3081 {"tlsgd_g1", 0,
3082 0, /* adr_type */
3083 0,
3084 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3085 0,
3086 0,
3087 0},
3088
3089 /* Get to the page containing GOT TLS entry for a symbol */
3090 {"tlsdesc", 0,
3091 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3092 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3093 0,
3094 0,
3095 0,
3096 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3097
3098 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3099 {"tlsdesc_lo12", 0,
3100 0, /* adr_type */
3101 0,
3102 0,
3103 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3104 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3105 0},
3106
3107 /* Get to the page containing GOT TLS entry for a symbol.
3108 The same as GD, we allocate two consecutive GOT slots
3109 for module index and module offset, the only difference
3110 with GD is the module offset should be initialized to
3111 zero without any outstanding runtime relocation. */
3112 {"tlsldm", 0,
3113 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3114 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3115 0,
3116 0,
3117 0,
3118 0},
3119
3120 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3121 {"tlsldm_lo12_nc", 0,
3122 0, /* adr_type */
3123 0,
3124 0,
3125 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3126 0,
3127 0},
3128
3129 /* 12 bit offset into the module TLS base address. */
3130 {"dtprel_lo12", 0,
3131 0, /* adr_type */
3132 0,
3133 0,
3134 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3135 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3136 0},
3137
3138 /* Same as dtprel_lo12, no overflow check. */
3139 {"dtprel_lo12_nc", 0,
3140 0, /* adr_type */
3141 0,
3142 0,
3143 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3144 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3145 0},
3146
3147 /* bits[23:12] of offset to the module TLS base address. */
3148 {"dtprel_hi12", 0,
3149 0, /* adr_type */
3150 0,
3151 0,
3152 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3153 0,
3154 0},
3155
3156 /* bits[15:0] of offset to the module TLS base address. */
3157 {"dtprel_g0", 0,
3158 0, /* adr_type */
3159 0,
3160 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3161 0,
3162 0,
3163 0},
3164
3165 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3166 {"dtprel_g0_nc", 0,
3167 0, /* adr_type */
3168 0,
3169 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3170 0,
3171 0,
3172 0},
3173
3174 /* bits[31:16] of offset to the module TLS base address. */
3175 {"dtprel_g1", 0,
3176 0, /* adr_type */
3177 0,
3178 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3179 0,
3180 0,
3181 0},
3182
3183 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3184 {"dtprel_g1_nc", 0,
3185 0, /* adr_type */
3186 0,
3187 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3188 0,
3189 0,
3190 0},
3191
3192 /* bits[47:32] of offset to the module TLS base address. */
3193 {"dtprel_g2", 0,
3194 0, /* adr_type */
3195 0,
3196 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3197 0,
3198 0,
3199 0},
3200
3201 /* Lower 16 bit offset into GOT entry for a symbol */
3202 {"tlsdesc_off_g0_nc", 0,
3203 0, /* adr_type */
3204 0,
3205 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3206 0,
3207 0,
3208 0},
3209
3210 /* Higher 16 bit offset into GOT entry for a symbol */
3211 {"tlsdesc_off_g1", 0,
3212 0, /* adr_type */
3213 0,
3214 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3215 0,
3216 0,
3217 0},
3218
3219 /* Get to the page containing GOT TLS entry for a symbol */
3220 {"gottprel", 0,
3221 0, /* adr_type */
3222 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3223 0,
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3227
3228 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3229 {"gottprel_lo12", 0,
3230 0, /* adr_type */
3231 0,
3232 0,
3233 0,
3234 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3235 0},
3236
3237 /* Get tp offset for a symbol. */
3238 {"tprel", 0,
3239 0, /* adr_type */
3240 0,
3241 0,
3242 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3243 0,
3244 0},
3245
3246 /* Get tp offset for a symbol. */
3247 {"tprel_lo12", 0,
3248 0, /* adr_type */
3249 0,
3250 0,
3251 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3252 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3253 0},
3254
3255 /* Get tp offset for a symbol. */
3256 {"tprel_hi12", 0,
3257 0, /* adr_type */
3258 0,
3259 0,
3260 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3261 0,
3262 0},
3263
3264 /* Get tp offset for a symbol. */
3265 {"tprel_lo12_nc", 0,
3266 0, /* adr_type */
3267 0,
3268 0,
3269 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3270 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3271 0},
3272
3273 /* Most significant bits 32-47 of address/value: MOVZ. */
3274 {"tprel_g2", 0,
3275 0, /* adr_type */
3276 0,
3277 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3278 0,
3279 0,
3280 0},
3281
3282 /* Most significant bits 16-31 of address/value: MOVZ. */
3283 {"tprel_g1", 0,
3284 0, /* adr_type */
3285 0,
3286 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3287 0,
3288 0,
3289 0},
3290
3291 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3292 {"tprel_g1_nc", 0,
3293 0, /* adr_type */
3294 0,
3295 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3296 0,
3297 0,
3298 0},
3299
3300 /* Most significant bits 0-15 of address/value: MOVZ. */
3301 {"tprel_g0", 0,
3302 0, /* adr_type */
3303 0,
3304 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3305 0,
3306 0,
3307 0},
3308
3309 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3310 {"tprel_g0_nc", 0,
3311 0, /* adr_type */
3312 0,
3313 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3314 0,
3315 0,
3316 0},
3317
3318 /* 15bit offset from got entry to base address of GOT table. */
3319 {"gotpage_lo15", 0,
3320 0,
3321 0,
3322 0,
3323 0,
3324 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3325 0},
3326
3327 /* 14bit offset from got entry to base address of GOT table. */
3328 {"gotpage_lo14", 0,
3329 0,
3330 0,
3331 0,
3332 0,
3333 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3334 0},
3335 };
3336
3337 /* Given the address of a pointer pointing to the textual name of a
3338 relocation as may appear in assembler source, attempt to find its
3339 details in reloc_table. The pointer will be updated to the character
3340 after the trailing colon. On failure, NULL will be returned;
3341 otherwise return the reloc_table_entry. */
3342
3343 static struct reloc_table_entry *
3344 find_reloc_table_entry (char **str)
3345 {
3346 unsigned int i;
3347 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3348 {
3349 int length = strlen (reloc_table[i].name);
3350
3351 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3352 && (*str)[length] == ':')
3353 {
3354 *str += (length + 1);
3355 return &reloc_table[i];
3356 }
3357 }
3358
3359 return NULL;
3360 }
3361
3362 /* Returns 0 if the relocation should never be forced,
3363 1 if the relocation must be forced, and -1 if either
3364 result is OK. */
3365
3366 static signed int
3367 aarch64_force_reloc (unsigned int type)
3368 {
3369 switch (type)
3370 {
3371 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3372 /* Perform these "immediate" internal relocations
3373 even if the symbol is extern or weak. */
3374 return 0;
3375
3376 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3377 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3378 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3379 /* Pseudo relocs that need to be fixed up according to
3380 ilp32_p. */
3381 return 1;
3382
3383 case BFD_RELOC_AARCH64_ADD_LO12:
3384 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3385 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3386 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3387 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3388 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3389 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3390 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3391 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3392 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3393 case BFD_RELOC_AARCH64_LDST128_LO12:
3394 case BFD_RELOC_AARCH64_LDST16_LO12:
3395 case BFD_RELOC_AARCH64_LDST32_LO12:
3396 case BFD_RELOC_AARCH64_LDST64_LO12:
3397 case BFD_RELOC_AARCH64_LDST8_LO12:
3398 case BFD_RELOC_AARCH64_LDST_LO12:
3399 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3400 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3401 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3402 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3403 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3404 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3405 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3406 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3407 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3408 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3409 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3410 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3411 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3412 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3413 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3414 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3415 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3416 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3417 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3418 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3419 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3420 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3421 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3422 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3423 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3424 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3425 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3426 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3427 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3428 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3429 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3430 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3431 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3432 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3433 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3434 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3435 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3436 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3437 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3438 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3439 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3440 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3441 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3442 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3443 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3444 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3445 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3446 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3447 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3448 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3449 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3450 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3451 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3453 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3454 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3456 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3457 /* Always leave these relocations for the linker. */
3458 return 1;
3459
3460 default:
3461 return -1;
3462 }
3463 }
3464
3465 int
3466 aarch64_force_relocation (struct fix *fixp)
3467 {
3468 int res = aarch64_force_reloc (fixp->fx_r_type);
3469
3470 if (res == -1)
3471 return generic_force_reloc (fixp);
3472 return res;
3473 }
3474
3475 /* Mode argument to parse_shift and parser_shifter_operand. */
3476 enum parse_shift_mode
3477 {
3478 SHIFTED_NONE, /* no shifter allowed */
3479 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3480 "#imm{,lsl #n}" */
3481 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3482 "#imm" */
3483 SHIFTED_LSL, /* bare "lsl #n" */
3484 SHIFTED_MUL, /* bare "mul #n" */
3485 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3486 SHIFTED_MUL_VL, /* "mul vl" */
3487 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3488 };
3489
3490 /* Parse a <shift> operator on an AArch64 data processing instruction.
3491 Return TRUE on success; otherwise return FALSE. */
3492 static bool
3493 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3494 {
3495 const struct aarch64_name_value_pair *shift_op;
3496 enum aarch64_modifier_kind kind;
3497 expressionS exp;
3498 int exp_has_prefix;
3499 char *s = *str;
3500 char *p = s;
3501
3502 for (p = *str; ISALPHA (*p); p++)
3503 ;
3504
3505 if (p == *str)
3506 {
3507 set_syntax_error (_("shift expression expected"));
3508 return false;
3509 }
3510
3511 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3512
3513 if (shift_op == NULL)
3514 {
3515 set_syntax_error (_("shift operator expected"));
3516 return false;
3517 }
3518
3519 kind = aarch64_get_operand_modifier (shift_op);
3520
3521 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3522 {
3523 set_syntax_error (_("invalid use of 'MSL'"));
3524 return false;
3525 }
3526
3527 if (kind == AARCH64_MOD_MUL
3528 && mode != SHIFTED_MUL
3529 && mode != SHIFTED_MUL_VL)
3530 {
3531 set_syntax_error (_("invalid use of 'MUL'"));
3532 return false;
3533 }
3534
3535 switch (mode)
3536 {
3537 case SHIFTED_LOGIC_IMM:
3538 if (aarch64_extend_operator_p (kind))
3539 {
3540 set_syntax_error (_("extending shift is not permitted"));
3541 return false;
3542 }
3543 break;
3544
3545 case SHIFTED_ARITH_IMM:
3546 if (kind == AARCH64_MOD_ROR)
3547 {
3548 set_syntax_error (_("'ROR' shift is not permitted"));
3549 return false;
3550 }
3551 break;
3552
3553 case SHIFTED_LSL:
3554 if (kind != AARCH64_MOD_LSL)
3555 {
3556 set_syntax_error (_("only 'LSL' shift is permitted"));
3557 return false;
3558 }
3559 break;
3560
3561 case SHIFTED_MUL:
3562 if (kind != AARCH64_MOD_MUL)
3563 {
3564 set_syntax_error (_("only 'MUL' is permitted"));
3565 return false;
3566 }
3567 break;
3568
3569 case SHIFTED_MUL_VL:
3570 /* "MUL VL" consists of two separate tokens. Require the first
3571 token to be "MUL" and look for a following "VL". */
3572 if (kind == AARCH64_MOD_MUL)
3573 {
3574 skip_whitespace (p);
3575 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3576 {
3577 p += 2;
3578 kind = AARCH64_MOD_MUL_VL;
3579 break;
3580 }
3581 }
3582 set_syntax_error (_("only 'MUL VL' is permitted"));
3583 return false;
3584
3585 case SHIFTED_REG_OFFSET:
3586 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3587 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3588 {
3589 set_fatal_syntax_error
3590 (_("invalid shift for the register offset addressing mode"));
3591 return false;
3592 }
3593 break;
3594
3595 case SHIFTED_LSL_MSL:
3596 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3597 {
3598 set_syntax_error (_("invalid shift operator"));
3599 return false;
3600 }
3601 break;
3602
3603 default:
3604 abort ();
3605 }
3606
3607 /* Whitespace can appear here if the next thing is a bare digit. */
3608 skip_whitespace (p);
3609
3610 /* Parse shift amount. */
3611 exp_has_prefix = 0;
3612 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3613 exp.X_op = O_absent;
3614 else
3615 {
3616 if (is_immediate_prefix (*p))
3617 {
3618 p++;
3619 exp_has_prefix = 1;
3620 }
3621 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3622 }
3623 if (kind == AARCH64_MOD_MUL_VL)
3624 /* For consistency, give MUL VL the same shift amount as an implicit
3625 MUL #1. */
3626 operand->shifter.amount = 1;
3627 else if (exp.X_op == O_absent)
3628 {
3629 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3630 {
3631 set_syntax_error (_("missing shift amount"));
3632 return false;
3633 }
3634 operand->shifter.amount = 0;
3635 }
3636 else if (exp.X_op != O_constant)
3637 {
3638 set_syntax_error (_("constant shift amount required"));
3639 return false;
3640 }
3641 /* For parsing purposes, MUL #n has no inherent range. The range
3642 depends on the operand and will be checked by operand-specific
3643 routines. */
3644 else if (kind != AARCH64_MOD_MUL
3645 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3646 {
3647 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3648 return false;
3649 }
3650 else
3651 {
3652 operand->shifter.amount = exp.X_add_number;
3653 operand->shifter.amount_present = 1;
3654 }
3655
3656 operand->shifter.operator_present = 1;
3657 operand->shifter.kind = kind;
3658
3659 *str = p;
3660 return true;
3661 }
3662
3663 /* Parse a <shifter_operand> for a data processing instruction:
3664
3665 #<immediate>
3666 #<immediate>, LSL #imm
3667
3668 Validation of immediate operands is deferred to md_apply_fix.
3669
3670 Return TRUE on success; otherwise return FALSE. */
3671
3672 static bool
3673 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3674 enum parse_shift_mode mode)
3675 {
3676 char *p;
3677
3678 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3679 return false;
3680
3681 p = *str;
3682
3683 /* Accept an immediate expression. */
3684 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3685 REJECT_ABSENT))
3686 return false;
3687
3688 /* Accept optional LSL for arithmetic immediate values. */
3689 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3690 if (! parse_shift (&p, operand, SHIFTED_LSL))
3691 return false;
3692
3693 /* Not accept any shifter for logical immediate values. */
3694 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3695 && parse_shift (&p, operand, mode))
3696 {
3697 set_syntax_error (_("unexpected shift operator"));
3698 return false;
3699 }
3700
3701 *str = p;
3702 return true;
3703 }
3704
3705 /* Parse a <shifter_operand> for a data processing instruction:
3706
3707 <Rm>
3708 <Rm>, <shift>
3709 #<immediate>
3710 #<immediate>, LSL #imm
3711
3712 where <shift> is handled by parse_shift above, and the last two
3713 cases are handled by the function above.
3714
3715 Validation of immediate operands is deferred to md_apply_fix.
3716
3717 Return TRUE on success; otherwise return FALSE. */
3718
3719 static bool
3720 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3721 enum parse_shift_mode mode)
3722 {
3723 const reg_entry *reg;
3724 aarch64_opnd_qualifier_t qualifier;
3725 enum aarch64_operand_class opd_class
3726 = aarch64_get_operand_class (operand->type);
3727
3728 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3729 if (reg)
3730 {
3731 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3732 {
3733 set_syntax_error (_("unexpected register in the immediate operand"));
3734 return false;
3735 }
3736
3737 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3738 {
3739 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3740 return false;
3741 }
3742
3743 operand->reg.regno = reg->number;
3744 operand->qualifier = qualifier;
3745
3746 /* Accept optional shift operation on register. */
3747 if (! skip_past_comma (str))
3748 return true;
3749
3750 if (! parse_shift (str, operand, mode))
3751 return false;
3752
3753 return true;
3754 }
3755 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3756 {
3757 set_syntax_error
3758 (_("integer register expected in the extended/shifted operand "
3759 "register"));
3760 return false;
3761 }
3762
3763 /* We have a shifted immediate variable. */
3764 return parse_shifter_operand_imm (str, operand, mode);
3765 }
3766
3767 /* Return TRUE on success; return FALSE otherwise. */
3768
3769 static bool
3770 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3771 enum parse_shift_mode mode)
3772 {
3773 char *p = *str;
3774
3775 /* Determine if we have the sequence of characters #: or just :
3776 coming next. If we do, then we check for a :rello: relocation
3777 modifier. If we don't, punt the whole lot to
3778 parse_shifter_operand. */
3779
3780 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3781 {
3782 struct reloc_table_entry *entry;
3783
3784 if (p[0] == '#')
3785 p += 2;
3786 else
3787 p++;
3788 *str = p;
3789
3790 /* Try to parse a relocation. Anything else is an error. */
3791 if (!(entry = find_reloc_table_entry (str)))
3792 {
3793 set_syntax_error (_("unknown relocation modifier"));
3794 return false;
3795 }
3796
3797 if (entry->add_type == 0)
3798 {
3799 set_syntax_error
3800 (_("this relocation modifier is not allowed on this instruction"));
3801 return false;
3802 }
3803
3804 /* Save str before we decompose it. */
3805 p = *str;
3806
3807 /* Next, we parse the expression. */
3808 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3809 REJECT_ABSENT))
3810 return false;
3811
3812 /* Record the relocation type (use the ADD variant here). */
3813 inst.reloc.type = entry->add_type;
3814 inst.reloc.pc_rel = entry->pc_rel;
3815
3816 /* If str is empty, we've reached the end, stop here. */
3817 if (**str == '\0')
3818 return true;
3819
3820 /* Otherwise, we have a shifted reloc modifier, so rewind to
3821 recover the variable name and continue parsing for the shifter. */
3822 *str = p;
3823 return parse_shifter_operand_imm (str, operand, mode);
3824 }
3825
3826 return parse_shifter_operand (str, operand, mode);
3827 }
3828
3829 /* Parse all forms of an address expression. Information is written
3830 to *OPERAND and/or inst.reloc.
3831
3832 The A64 instruction set has the following addressing modes:
3833
3834 Offset
3835 [base] // in SIMD ld/st structure
3836 [base{,#0}] // in ld/st exclusive
3837 [base{,#imm}]
3838 [base,Xm{,LSL #imm}]
3839 [base,Xm,SXTX {#imm}]
3840 [base,Wm,(S|U)XTW {#imm}]
3841 Pre-indexed
3842 [base]! // in ldraa/ldrab exclusive
3843 [base,#imm]!
3844 Post-indexed
3845 [base],#imm
3846 [base],Xm // in SIMD ld/st structure
3847 PC-relative (literal)
3848 label
3849 SVE:
3850 [base,#imm,MUL VL]
3851 [base,Zm.D{,LSL #imm}]
3852 [base,Zm.S,(S|U)XTW {#imm}]
3853 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3854 [Zn.S,#imm]
3855 [Zn.D,#imm]
3856 [Zn.S{, Xm}]
3857 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3858 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3859 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3860
3861 (As a convenience, the notation "=immediate" is permitted in conjunction
3862 with the pc-relative literal load instructions to automatically place an
3863 immediate value or symbolic address in a nearby literal pool and generate
3864 a hidden label which references it.)
3865
3866 Upon a successful parsing, the address structure in *OPERAND will be
3867 filled in the following way:
3868
3869 .base_regno = <base>
3870 .offset.is_reg // 1 if the offset is a register
3871 .offset.imm = <imm>
3872 .offset.regno = <Rm>
3873
3874 For different addressing modes defined in the A64 ISA:
3875
3876 Offset
3877 .pcrel=0; .preind=1; .postind=0; .writeback=0
3878 Pre-indexed
3879 .pcrel=0; .preind=1; .postind=0; .writeback=1
3880 Post-indexed
3881 .pcrel=0; .preind=0; .postind=1; .writeback=1
3882 PC-relative (literal)
3883 .pcrel=1; .preind=1; .postind=0; .writeback=0
3884
3885 The shift/extension information, if any, will be stored in .shifter.
3886 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3887 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3888 corresponding register.
3889
3890 BASE_TYPE says which types of base register should be accepted and
3891 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3892 is the type of shifter that is allowed for immediate offsets,
3893 or SHIFTED_NONE if none.
3894
3895 In all other respects, it is the caller's responsibility to check
3896 for addressing modes not supported by the instruction, and to set
3897 inst.reloc.type. */
3898
3899 static bool
3900 parse_address_main (char **str, aarch64_opnd_info *operand,
3901 aarch64_opnd_qualifier_t *base_qualifier,
3902 aarch64_opnd_qualifier_t *offset_qualifier,
3903 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3904 enum parse_shift_mode imm_shift_mode)
3905 {
3906 char *p = *str;
3907 const reg_entry *reg;
3908 expressionS *exp = &inst.reloc.exp;
3909
3910 *base_qualifier = AARCH64_OPND_QLF_NIL;
3911 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3912 if (! skip_past_char (&p, '['))
3913 {
3914 /* =immediate or label. */
3915 operand->addr.pcrel = 1;
3916 operand->addr.preind = 1;
3917
3918 /* #:<reloc_op>:<symbol> */
3919 skip_past_char (&p, '#');
3920 if (skip_past_char (&p, ':'))
3921 {
3922 bfd_reloc_code_real_type ty;
3923 struct reloc_table_entry *entry;
3924
3925 /* Try to parse a relocation modifier. Anything else is
3926 an error. */
3927 entry = find_reloc_table_entry (&p);
3928 if (! entry)
3929 {
3930 set_syntax_error (_("unknown relocation modifier"));
3931 return false;
3932 }
3933
3934 switch (operand->type)
3935 {
3936 case AARCH64_OPND_ADDR_PCREL21:
3937 /* adr */
3938 ty = entry->adr_type;
3939 break;
3940
3941 default:
3942 ty = entry->ld_literal_type;
3943 break;
3944 }
3945
3946 if (ty == 0)
3947 {
3948 set_syntax_error
3949 (_("this relocation modifier is not allowed on this "
3950 "instruction"));
3951 return false;
3952 }
3953
3954 /* #:<reloc_op>: */
3955 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3956 {
3957 set_syntax_error (_("invalid relocation expression"));
3958 return false;
3959 }
3960 /* #:<reloc_op>:<expr> */
3961 /* Record the relocation type. */
3962 inst.reloc.type = ty;
3963 inst.reloc.pc_rel = entry->pc_rel;
3964 }
3965 else
3966 {
3967 if (skip_past_char (&p, '='))
3968 /* =immediate; need to generate the literal in the literal pool. */
3969 inst.gen_lit_pool = 1;
3970
3971 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3972 {
3973 set_syntax_error (_("invalid address"));
3974 return false;
3975 }
3976 }
3977
3978 *str = p;
3979 return true;
3980 }
3981
3982 /* [ */
3983
3984 bool alpha_base_p = ISALPHA (*p);
3985 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3986 if (!reg || !aarch64_check_reg_type (reg, base_type))
3987 {
3988 if (reg
3989 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3990 && *base_qualifier == AARCH64_OPND_QLF_W)
3991 set_syntax_error (_("expected a 64-bit base register"));
3992 else if (alpha_base_p)
3993 set_syntax_error (_("invalid base register"));
3994 else
3995 set_syntax_error (_("expected a base register"));
3996 return false;
3997 }
3998 operand->addr.base_regno = reg->number;
3999
4000 /* [Xn */
4001 if (skip_past_comma (&p))
4002 {
4003 /* [Xn, */
4004 operand->addr.preind = 1;
4005
4006 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
4007 if (reg)
4008 {
4009 if (!aarch64_check_reg_type (reg, offset_type))
4010 {
4011 set_syntax_error (_("invalid offset register"));
4012 return false;
4013 }
4014
4015 /* [Xn,Rm */
4016 operand->addr.offset.regno = reg->number;
4017 operand->addr.offset.is_reg = 1;
4018 /* Shifted index. */
4019 if (skip_past_comma (&p))
4020 {
4021 /* [Xn,Rm, */
4022 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4023 /* Use the diagnostics set in parse_shift, so not set new
4024 error message here. */
4025 return false;
4026 }
4027 /* We only accept:
4028 [base,Xm] # For vector plus scalar SVE2 indexing.
4029 [base,Xm{,LSL #imm}]
4030 [base,Xm,SXTX {#imm}]
4031 [base,Wm,(S|U)XTW {#imm}] */
4032 if (operand->shifter.kind == AARCH64_MOD_NONE
4033 || operand->shifter.kind == AARCH64_MOD_LSL
4034 || operand->shifter.kind == AARCH64_MOD_SXTX)
4035 {
4036 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4037 {
4038 set_syntax_error (_("invalid use of 32-bit register offset"));
4039 return false;
4040 }
4041 if (aarch64_get_qualifier_esize (*base_qualifier)
4042 != aarch64_get_qualifier_esize (*offset_qualifier)
4043 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4044 || *base_qualifier != AARCH64_OPND_QLF_S_S
4045 || *offset_qualifier != AARCH64_OPND_QLF_X))
4046 {
4047 set_syntax_error (_("offset has different size from base"));
4048 return false;
4049 }
4050 }
4051 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4052 {
4053 set_syntax_error (_("invalid use of 64-bit register offset"));
4054 return false;
4055 }
4056 }
4057 else
4058 {
4059 /* [Xn,#:<reloc_op>:<symbol> */
4060 skip_past_char (&p, '#');
4061 if (skip_past_char (&p, ':'))
4062 {
4063 struct reloc_table_entry *entry;
4064
4065 /* Try to parse a relocation modifier. Anything else is
4066 an error. */
4067 if (!(entry = find_reloc_table_entry (&p)))
4068 {
4069 set_syntax_error (_("unknown relocation modifier"));
4070 return false;
4071 }
4072
4073 if (entry->ldst_type == 0)
4074 {
4075 set_syntax_error
4076 (_("this relocation modifier is not allowed on this "
4077 "instruction"));
4078 return false;
4079 }
4080
4081 /* [Xn,#:<reloc_op>: */
4082 /* We now have the group relocation table entry corresponding to
4083 the name in the assembler source. Next, we parse the
4084 expression. */
4085 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4086 {
4087 set_syntax_error (_("invalid relocation expression"));
4088 return false;
4089 }
4090
4091 /* [Xn,#:<reloc_op>:<expr> */
4092 /* Record the load/store relocation type. */
4093 inst.reloc.type = entry->ldst_type;
4094 inst.reloc.pc_rel = entry->pc_rel;
4095 }
4096 else
4097 {
4098 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4099 {
4100 set_syntax_error (_("invalid expression in the address"));
4101 return false;
4102 }
4103 /* [Xn,<expr> */
4104 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4105 /* [Xn,<expr>,<shifter> */
4106 if (! parse_shift (&p, operand, imm_shift_mode))
4107 return false;
4108 }
4109 }
4110 }
4111
4112 if (! skip_past_char (&p, ']'))
4113 {
4114 set_syntax_error (_("']' expected"));
4115 return false;
4116 }
4117
4118 if (skip_past_char (&p, '!'))
4119 {
4120 if (operand->addr.preind && operand->addr.offset.is_reg)
4121 {
4122 set_syntax_error (_("register offset not allowed in pre-indexed "
4123 "addressing mode"));
4124 return false;
4125 }
4126 /* [Xn]! */
4127 operand->addr.writeback = 1;
4128 }
4129 else if (skip_past_comma (&p))
4130 {
4131 /* [Xn], */
4132 operand->addr.postind = 1;
4133 operand->addr.writeback = 1;
4134
4135 if (operand->addr.preind)
4136 {
4137 set_syntax_error (_("cannot combine pre- and post-indexing"));
4138 return false;
4139 }
4140
4141 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4142 if (reg)
4143 {
4144 /* [Xn],Xm */
4145 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4146 {
4147 set_syntax_error (_("invalid offset register"));
4148 return false;
4149 }
4150
4151 operand->addr.offset.regno = reg->number;
4152 operand->addr.offset.is_reg = 1;
4153 }
4154 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4155 {
4156 /* [Xn],#expr */
4157 set_syntax_error (_("invalid expression in the address"));
4158 return false;
4159 }
4160 }
4161
4162 /* If at this point neither .preind nor .postind is set, we have a
4163 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4164 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4165 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4166 [Zn.<T>, xzr]. */
4167 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4168 {
4169 if (operand->addr.writeback)
4170 {
4171 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4172 {
4173 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4174 operand->addr.offset.is_reg = 0;
4175 operand->addr.offset.imm = 0;
4176 operand->addr.preind = 1;
4177 }
4178 else
4179 {
4180 /* Reject [Rn]! */
4181 set_syntax_error (_("missing offset in the pre-indexed address"));
4182 return false;
4183 }
4184 }
4185 else
4186 {
4187 operand->addr.preind = 1;
4188 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4189 {
4190 operand->addr.offset.is_reg = 1;
4191 operand->addr.offset.regno = REG_ZR;
4192 *offset_qualifier = AARCH64_OPND_QLF_X;
4193 }
4194 else
4195 {
4196 inst.reloc.exp.X_op = O_constant;
4197 inst.reloc.exp.X_add_number = 0;
4198 }
4199 }
4200 }
4201
4202 *str = p;
4203 return true;
4204 }
4205
4206 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4207 on success. */
4208 static bool
4209 parse_address (char **str, aarch64_opnd_info *operand)
4210 {
4211 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4212 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4213 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4214 }
4215
4216 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4217 The arguments have the same meaning as for parse_address_main.
4218 Return TRUE on success. */
4219 static bool
4220 parse_sve_address (char **str, aarch64_opnd_info *operand,
4221 aarch64_opnd_qualifier_t *base_qualifier,
4222 aarch64_opnd_qualifier_t *offset_qualifier)
4223 {
4224 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4225 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4226 SHIFTED_MUL_VL);
4227 }
4228
4229 /* Parse a register X0-X30. The register must be 64-bit and register 31
4230 is unallocated. */
4231 static bool
4232 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4233 {
4234 const reg_entry *reg = parse_reg (str);
4235 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4236 {
4237 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4238 return false;
4239 }
4240 operand->reg.regno = reg->number;
4241 operand->qualifier = AARCH64_OPND_QLF_X;
4242 return true;
4243 }
4244
4245 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4246 Return TRUE on success; otherwise return FALSE. */
4247 static bool
4248 parse_half (char **str, int *internal_fixup_p)
4249 {
4250 char *p = *str;
4251
4252 skip_past_char (&p, '#');
4253
4254 gas_assert (internal_fixup_p);
4255 *internal_fixup_p = 0;
4256
4257 if (*p == ':')
4258 {
4259 struct reloc_table_entry *entry;
4260
4261 /* Try to parse a relocation. Anything else is an error. */
4262 ++p;
4263
4264 if (!(entry = find_reloc_table_entry (&p)))
4265 {
4266 set_syntax_error (_("unknown relocation modifier"));
4267 return false;
4268 }
4269
4270 if (entry->movw_type == 0)
4271 {
4272 set_syntax_error
4273 (_("this relocation modifier is not allowed on this instruction"));
4274 return false;
4275 }
4276
4277 inst.reloc.type = entry->movw_type;
4278 }
4279 else
4280 *internal_fixup_p = 1;
4281
4282 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4283 return false;
4284
4285 *str = p;
4286 return true;
4287 }
4288
4289 /* Parse an operand for an ADRP instruction:
4290 ADRP <Xd>, <label>
4291 Return TRUE on success; otherwise return FALSE. */
4292
4293 static bool
4294 parse_adrp (char **str)
4295 {
4296 char *p;
4297
4298 p = *str;
4299 if (*p == ':')
4300 {
4301 struct reloc_table_entry *entry;
4302
4303 /* Try to parse a relocation. Anything else is an error. */
4304 ++p;
4305 if (!(entry = find_reloc_table_entry (&p)))
4306 {
4307 set_syntax_error (_("unknown relocation modifier"));
4308 return false;
4309 }
4310
4311 if (entry->adrp_type == 0)
4312 {
4313 set_syntax_error
4314 (_("this relocation modifier is not allowed on this instruction"));
4315 return false;
4316 }
4317
4318 inst.reloc.type = entry->adrp_type;
4319 }
4320 else
4321 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4322
4323 inst.reloc.pc_rel = 1;
4324 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4325 return false;
4326 *str = p;
4327 return true;
4328 }
4329
4330 /* Miscellaneous. */
4331
4332 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4333 of SIZE tokens in which index I gives the token for field value I,
4334 or is null if field value I is invalid. If the symbolic operand
4335 can also be given as a 0-based integer, REG_TYPE says which register
4336 names should be treated as registers rather than as symbolic immediates
4337 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4338
4339 Return true on success, moving *STR past the operand and storing the
4340 field value in *VAL. */
4341
4342 static int
4343 parse_enum_string (char **str, int64_t *val, const char *const *array,
4344 size_t size, aarch64_reg_type reg_type)
4345 {
4346 expressionS exp;
4347 char *p, *q;
4348 size_t i;
4349
4350 /* Match C-like tokens. */
4351 p = q = *str;
4352 while (ISALNUM (*q))
4353 q++;
4354
4355 for (i = 0; i < size; ++i)
4356 if (array[i]
4357 && strncasecmp (array[i], p, q - p) == 0
4358 && array[i][q - p] == 0)
4359 {
4360 *val = i;
4361 *str = q;
4362 return true;
4363 }
4364
4365 if (reg_type == REG_TYPE_MAX)
4366 return false;
4367
4368 if (!parse_immediate_expression (&p, &exp, reg_type))
4369 return false;
4370
4371 if (exp.X_op == O_constant
4372 && (uint64_t) exp.X_add_number < size)
4373 {
4374 *val = exp.X_add_number;
4375 *str = p;
4376 return true;
4377 }
4378
4379 /* Use the default error for this operand. */
4380 return false;
4381 }
4382
4383 /* Parse an option for a preload instruction. Returns the encoding for the
4384 option, or PARSE_FAIL. */
4385
4386 static int
4387 parse_pldop (char **str)
4388 {
4389 char *p, *q;
4390 const struct aarch64_name_value_pair *o;
4391
4392 p = q = *str;
4393 while (ISALNUM (*q))
4394 q++;
4395
4396 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4397 if (!o)
4398 return PARSE_FAIL;
4399
4400 *str = q;
4401 return o->value;
4402 }
4403
4404 /* Parse an option for a barrier instruction. Returns the encoding for the
4405 option, or PARSE_FAIL. */
4406
4407 static int
4408 parse_barrier (char **str)
4409 {
4410 char *p, *q;
4411 const struct aarch64_name_value_pair *o;
4412
4413 p = q = *str;
4414 while (ISALPHA (*q))
4415 q++;
4416
4417 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4418 if (!o)
4419 return PARSE_FAIL;
4420
4421 *str = q;
4422 return o->value;
4423 }
4424
4425 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4426 return 0 if successful. Otherwise return PARSE_FAIL. */
4427
4428 static int
4429 parse_barrier_psb (char **str,
4430 const struct aarch64_name_value_pair ** hint_opt)
4431 {
4432 char *p, *q;
4433 const struct aarch64_name_value_pair *o;
4434
4435 p = q = *str;
4436 while (ISALPHA (*q))
4437 q++;
4438
4439 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4440 if (!o)
4441 {
4442 set_fatal_syntax_error
4443 ( _("unknown or missing option to PSB/TSB"));
4444 return PARSE_FAIL;
4445 }
4446
4447 if (o->value != 0x11)
4448 {
4449 /* PSB only accepts option name 'CSYNC'. */
4450 set_syntax_error
4451 (_("the specified option is not accepted for PSB/TSB"));
4452 return PARSE_FAIL;
4453 }
4454
4455 *str = q;
4456 *hint_opt = o;
4457 return 0;
4458 }
4459
4460 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4461 return 0 if successful. Otherwise return PARSE_FAIL. */
4462
4463 static int
4464 parse_bti_operand (char **str,
4465 const struct aarch64_name_value_pair ** hint_opt)
4466 {
4467 char *p, *q;
4468 const struct aarch64_name_value_pair *o;
4469
4470 p = q = *str;
4471 while (ISALPHA (*q))
4472 q++;
4473
4474 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4475 if (!o)
4476 {
4477 set_fatal_syntax_error
4478 ( _("unknown option to BTI"));
4479 return PARSE_FAIL;
4480 }
4481
4482 switch (o->value)
4483 {
4484 /* Valid BTI operands. */
4485 case HINT_OPD_C:
4486 case HINT_OPD_J:
4487 case HINT_OPD_JC:
4488 break;
4489
4490 default:
4491 set_syntax_error
4492 (_("unknown option to BTI"));
4493 return PARSE_FAIL;
4494 }
4495
4496 *str = q;
4497 *hint_opt = o;
4498 return 0;
4499 }
4500
4501 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4502 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4503 on failure. Format:
4504
4505 REG_TYPE.QUALIFIER
4506
4507 Side effect: Update STR with current parse position of success.
4508
4509 FLAGS is as for parse_typed_reg. */
4510
4511 static const reg_entry *
4512 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4513 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4514 {
4515 struct vector_type_el vectype;
4516 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4517 PTR_FULL_REG | flags);
4518 if (!reg)
4519 return NULL;
4520
4521 if (vectype.type == NT_invtype)
4522 *qualifier = AARCH64_OPND_QLF_NIL;
4523 else
4524 {
4525 *qualifier = vectype_to_qualifier (&vectype);
4526 if (*qualifier == AARCH64_OPND_QLF_NIL)
4527 return NULL;
4528 }
4529
4530 return reg;
4531 }
4532
4533 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4534
4535 #<imm>
4536 <imm>
4537
4538 Function return TRUE if immediate was found, or FALSE.
4539 */
4540 static bool
4541 parse_sme_immediate (char **str, int64_t *imm)
4542 {
4543 int64_t val;
4544 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4545 return false;
4546
4547 *imm = val;
4548 return true;
4549 }
4550
4551 /* Parse index with selection register and immediate offset:
4552
4553 [<Wv>, <imm>]
4554 [<Wv>, #<imm>]
4555
4556 Return true on success, populating OPND with the parsed index. */
4557
4558 static bool
4559 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4560 {
4561 const reg_entry *reg;
4562
4563 if (!skip_past_char (str, '['))
4564 {
4565 set_syntax_error (_("expected '['"));
4566 return false;
4567 }
4568
4569 /* The selection register, encoded in the 2-bit Rv field. */
4570 reg = parse_reg (str);
4571 if (reg == NULL || reg->type != REG_TYPE_R_32)
4572 {
4573 set_syntax_error (_("expected a 32-bit selection register"));
4574 return false;
4575 }
4576 opnd->index.regno = reg->number;
4577
4578 if (!skip_past_char (str, ','))
4579 {
4580 set_syntax_error (_("missing immediate offset"));
4581 return false;
4582 }
4583
4584 if (!parse_sme_immediate (str, &opnd->index.imm))
4585 {
4586 set_syntax_error (_("expected a constant immediate offset"));
4587 return false;
4588 }
4589
4590 if (skip_past_char (str, ':'))
4591 {
4592 int64_t end;
4593 if (!parse_sme_immediate (str, &end))
4594 {
4595 set_syntax_error (_("expected a constant immediate offset"));
4596 return false;
4597 }
4598 if (end < opnd->index.imm)
4599 {
4600 set_syntax_error (_("the last offset is less than the"
4601 " first offset"));
4602 return false;
4603 }
4604 if (end == opnd->index.imm)
4605 {
4606 set_syntax_error (_("the last offset is equal to the"
4607 " first offset"));
4608 return false;
4609 }
4610 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4611 }
4612
4613 opnd->group_size = 0;
4614 if (skip_past_char (str, ','))
4615 {
4616 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4617 {
4618 *str += 4;
4619 opnd->group_size = 2;
4620 }
4621 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4622 {
4623 *str += 4;
4624 opnd->group_size = 4;
4625 }
4626 else
4627 {
4628 set_syntax_error (_("invalid vector group size"));
4629 return false;
4630 }
4631 }
4632
4633 if (!skip_past_char (str, ']'))
4634 {
4635 set_syntax_error (_("expected ']'"));
4636 return false;
4637 }
4638
4639 return true;
4640 }
4641
4642 /* Parse a register of type REG_TYPE that might have an element type
4643 qualifier and that is indexed by two values: a 32-bit register,
4644 followed by an immediate. The ranges of the register and the
4645 immediate vary by opcode and are checked in libopcodes.
4646
4647 Return true on success, populating OPND with information about
4648 the operand and setting QUALIFIER to the register qualifier.
4649
4650 Field format examples:
4651
4652 <Pm>.<T>[<Wv>< #<imm>]
4653 ZA[<Wv>, #<imm>]
4654 <ZAn><HV>.<T>[<Wv>, #<imm>]
4655
4656 FLAGS is as for parse_typed_reg. */
4657
4658 static bool
4659 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4660 struct aarch64_indexed_za *opnd,
4661 aarch64_opnd_qualifier_t *qualifier,
4662 unsigned int flags)
4663 {
4664 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4665 if (!reg)
4666 return false;
4667
4668 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4669 opnd->regno = reg->number;
4670
4671 return parse_sme_za_index (str, opnd);
4672 }
4673
4674 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4675 operand. */
4676
4677 static bool
4678 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4679 struct aarch64_indexed_za *opnd,
4680 aarch64_opnd_qualifier_t *qualifier)
4681 {
4682 if (!skip_past_char (str, '{'))
4683 {
4684 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4685 return false;
4686 }
4687
4688 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4689 PTR_IN_REGLIST))
4690 return false;
4691
4692 if (!skip_past_char (str, '}'))
4693 {
4694 set_syntax_error (_("expected '}'"));
4695 return false;
4696 }
4697
4698 return true;
4699 }
4700
4701 /* Parse list of up to eight 64-bit element tile names separated by commas in
4702 SME's ZERO instruction:
4703
4704 ZERO { <mask> }
4705
4706 Function returns <mask>:
4707
4708 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4709 */
4710 static int
4711 parse_sme_zero_mask(char **str)
4712 {
4713 char *q;
4714 int mask;
4715 aarch64_opnd_qualifier_t qualifier;
4716 unsigned int ptr_flags = PTR_IN_REGLIST;
4717
4718 mask = 0x00;
4719 q = *str;
4720 do
4721 {
4722 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4723 &qualifier, ptr_flags);
4724 if (!reg)
4725 return PARSE_FAIL;
4726
4727 if (reg->type == REG_TYPE_ZA)
4728 {
4729 if (qualifier != AARCH64_OPND_QLF_NIL)
4730 {
4731 set_syntax_error ("ZA should not have a size suffix");
4732 return PARSE_FAIL;
4733 }
4734 /* { ZA } is assembled as all-ones immediate. */
4735 mask = 0xff;
4736 }
4737 else
4738 {
4739 int regno = reg->number;
4740 if (qualifier == AARCH64_OPND_QLF_S_B)
4741 {
4742 /* { ZA0.B } is assembled as all-ones immediate. */
4743 mask = 0xff;
4744 }
4745 else if (qualifier == AARCH64_OPND_QLF_S_H)
4746 mask |= 0x55 << regno;
4747 else if (qualifier == AARCH64_OPND_QLF_S_S)
4748 mask |= 0x11 << regno;
4749 else if (qualifier == AARCH64_OPND_QLF_S_D)
4750 mask |= 0x01 << regno;
4751 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4752 {
4753 set_syntax_error (_("ZA tile masks do not operate at .Q"
4754 " granularity"));
4755 return PARSE_FAIL;
4756 }
4757 else if (qualifier == AARCH64_OPND_QLF_NIL)
4758 {
4759 set_syntax_error (_("missing ZA tile size"));
4760 return PARSE_FAIL;
4761 }
4762 else
4763 {
4764 set_syntax_error (_("invalid ZA tile"));
4765 return PARSE_FAIL;
4766 }
4767 }
4768 ptr_flags |= PTR_GOOD_MATCH;
4769 }
4770 while (skip_past_char (&q, ','));
4771
4772 *str = q;
4773 return mask;
4774 }
4775
4776 /* Wraps in curly braces <mask> operand ZERO instruction:
4777
4778 ZERO { <mask> }
4779
4780 Function returns value of <mask> bit-field.
4781 */
4782 static int
4783 parse_sme_list_of_64bit_tiles (char **str)
4784 {
4785 int regno;
4786
4787 if (!skip_past_char (str, '{'))
4788 {
4789 set_syntax_error (_("expected '{'"));
4790 return PARSE_FAIL;
4791 }
4792
4793 /* Empty <mask> list is an all-zeros immediate. */
4794 if (!skip_past_char (str, '}'))
4795 {
4796 regno = parse_sme_zero_mask (str);
4797 if (regno == PARSE_FAIL)
4798 return PARSE_FAIL;
4799
4800 if (!skip_past_char (str, '}'))
4801 {
4802 set_syntax_error (_("expected '}'"));
4803 return PARSE_FAIL;
4804 }
4805 }
4806 else
4807 regno = 0x00;
4808
4809 return regno;
4810 }
4811
4812 /* Parse streaming mode operand for SMSTART and SMSTOP.
4813
4814 {SM | ZA}
4815
4816 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4817 */
4818 static int
4819 parse_sme_sm_za (char **str)
4820 {
4821 char *p, *q;
4822
4823 p = q = *str;
4824 while (ISALPHA (*q))
4825 q++;
4826
4827 if ((q - p != 2)
4828 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4829 {
4830 set_syntax_error (_("expected SM or ZA operand"));
4831 return PARSE_FAIL;
4832 }
4833
4834 *str = q;
4835 return TOLOWER (p[0]);
4836 }
4837
4838 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4839 Returns the encoding for the option, or PARSE_FAIL.
4840
4841 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4842 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4843
4844 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4845 field, otherwise as a system register.
4846 */
4847
4848 static int
4849 parse_sys_reg (char **str, htab_t sys_regs,
4850 int imple_defined_p, int pstatefield_p,
4851 uint32_t* flags)
4852 {
4853 char *p, *q;
4854 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4855 const aarch64_sys_reg *o;
4856 int value;
4857
4858 p = buf;
4859 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4860 if (p < buf + (sizeof (buf) - 1))
4861 *p++ = TOLOWER (*q);
4862 *p = '\0';
4863
4864 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4865 valid system register. This is enforced by construction of the hash
4866 table. */
4867 if (p - buf != q - *str)
4868 return PARSE_FAIL;
4869
4870 o = str_hash_find (sys_regs, buf);
4871 if (!o)
4872 {
4873 if (!imple_defined_p)
4874 return PARSE_FAIL;
4875 else
4876 {
4877 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4878 unsigned int op0, op1, cn, cm, op2;
4879
4880 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4881 != 5)
4882 return PARSE_FAIL;
4883 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4884 return PARSE_FAIL;
4885 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4886 if (flags)
4887 *flags = 0;
4888 }
4889 }
4890 else
4891 {
4892 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4893 as_bad (_("selected processor does not support PSTATE field "
4894 "name '%s'"), buf);
4895 if (!pstatefield_p
4896 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4897 o->value, o->flags, o->features))
4898 as_bad (_("selected processor does not support system register "
4899 "name '%s'"), buf);
4900 if (aarch64_sys_reg_deprecated_p (o->flags))
4901 as_warn (_("system register name '%s' is deprecated and may be "
4902 "removed in a future release"), buf);
4903 value = o->value;
4904 if (flags)
4905 *flags = o->flags;
4906 }
4907
4908 *str = q;
4909 return value;
4910 }
4911
4912 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4913 for the option, or NULL. */
4914
4915 static const aarch64_sys_ins_reg *
4916 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4917 {
4918 char *p, *q;
4919 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4920 const aarch64_sys_ins_reg *o;
4921
4922 p = buf;
4923 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4924 if (p < buf + (sizeof (buf) - 1))
4925 *p++ = TOLOWER (*q);
4926 *p = '\0';
4927
4928 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4929 valid system register. This is enforced by construction of the hash
4930 table. */
4931 if (p - buf != q - *str)
4932 return NULL;
4933
4934 o = str_hash_find (sys_ins_regs, buf);
4935 if (!o)
4936 return NULL;
4937
4938 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4939 o->name, o->value, o->flags, 0))
4940 as_bad (_("selected processor does not support system register "
4941 "name '%s'"), buf);
4942 if (aarch64_sys_reg_deprecated_p (o->flags))
4943 as_warn (_("system register name '%s' is deprecated and may be "
4944 "removed in a future release"), buf);
4945
4946 *str = q;
4947 return o;
4948 }
4949 \f
4950 #define po_char_or_fail(chr) do { \
4951 if (! skip_past_char (&str, chr)) \
4952 goto failure; \
4953 } while (0)
4954
4955 #define po_reg_or_fail(regtype) do { \
4956 reg = aarch64_reg_parse (&str, regtype, NULL); \
4957 if (!reg) \
4958 goto failure; \
4959 } while (0)
4960
4961 #define po_int_fp_reg_or_fail(reg_type) do { \
4962 reg = parse_reg (&str); \
4963 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4964 { \
4965 set_expected_reg_error (reg_type, reg, 0); \
4966 goto failure; \
4967 } \
4968 info->reg.regno = reg->number; \
4969 info->qualifier = inherent_reg_qualifier (reg); \
4970 } while (0)
4971
4972 #define po_imm_nc_or_fail() do { \
4973 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4974 goto failure; \
4975 } while (0)
4976
4977 #define po_imm_or_fail(min, max) do { \
4978 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4979 goto failure; \
4980 if (val < min || val > max) \
4981 { \
4982 set_fatal_syntax_error (_("immediate value out of range "\
4983 #min " to "#max)); \
4984 goto failure; \
4985 } \
4986 } while (0)
4987
4988 #define po_enum_or_fail(array) do { \
4989 if (!parse_enum_string (&str, &val, array, \
4990 ARRAY_SIZE (array), imm_reg_type)) \
4991 goto failure; \
4992 } while (0)
4993
4994 #define po_strict_enum_or_fail(array) do { \
4995 if (!parse_enum_string (&str, &val, array, \
4996 ARRAY_SIZE (array), REG_TYPE_MAX)) \
4997 goto failure; \
4998 } while (0)
4999
5000 #define po_misc_or_fail(expr) do { \
5001 if (!expr) \
5002 goto failure; \
5003 } while (0)
5004 \f
5005 /* A primitive log calculator. */
5006
5007 static inline unsigned int
5008 get_log2 (unsigned int n)
5009 {
5010 unsigned int count = 0;
5011 while (n > 1)
5012 {
5013 n >>= 1;
5014 count += 1;
5015 }
5016 return count;
5017 }
5018
5019 /* encode the 12-bit imm field of Add/sub immediate */
5020 static inline uint32_t
5021 encode_addsub_imm (uint32_t imm)
5022 {
5023 return imm << 10;
5024 }
5025
5026 /* encode the shift amount field of Add/sub immediate */
5027 static inline uint32_t
5028 encode_addsub_imm_shift_amount (uint32_t cnt)
5029 {
5030 return cnt << 22;
5031 }
5032
5033
5034 /* encode the imm field of Adr instruction */
5035 static inline uint32_t
5036 encode_adr_imm (uint32_t imm)
5037 {
5038 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
5039 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
5040 }
5041
5042 /* encode the immediate field of Move wide immediate */
5043 static inline uint32_t
5044 encode_movw_imm (uint32_t imm)
5045 {
5046 return imm << 5;
5047 }
5048
5049 /* encode the 26-bit offset of unconditional branch */
5050 static inline uint32_t
5051 encode_branch_ofs_26 (uint32_t ofs)
5052 {
5053 return ofs & ((1 << 26) - 1);
5054 }
5055
5056 /* encode the 19-bit offset of conditional branch and compare & branch */
5057 static inline uint32_t
5058 encode_cond_branch_ofs_19 (uint32_t ofs)
5059 {
5060 return (ofs & ((1 << 19) - 1)) << 5;
5061 }
5062
5063 /* encode the 19-bit offset of ld literal */
5064 static inline uint32_t
5065 encode_ld_lit_ofs_19 (uint32_t ofs)
5066 {
5067 return (ofs & ((1 << 19) - 1)) << 5;
5068 }
5069
5070 /* Encode the 14-bit offset of test & branch. */
5071 static inline uint32_t
5072 encode_tst_branch_ofs_14 (uint32_t ofs)
5073 {
5074 return (ofs & ((1 << 14) - 1)) << 5;
5075 }
5076
5077 /* Encode the 16-bit imm field of svc/hvc/smc. */
5078 static inline uint32_t
5079 encode_svc_imm (uint32_t imm)
5080 {
5081 return imm << 5;
5082 }
5083
5084 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5085 static inline uint32_t
5086 reencode_addsub_switch_add_sub (uint32_t opcode)
5087 {
5088 return opcode ^ (1 << 30);
5089 }
5090
5091 static inline uint32_t
5092 reencode_movzn_to_movz (uint32_t opcode)
5093 {
5094 return opcode | (1 << 30);
5095 }
5096
5097 static inline uint32_t
5098 reencode_movzn_to_movn (uint32_t opcode)
5099 {
5100 return opcode & ~(1 << 30);
5101 }
5102
5103 /* Overall per-instruction processing. */
5104
5105 /* We need to be able to fix up arbitrary expressions in some statements.
5106 This is so that we can handle symbols that are an arbitrary distance from
5107 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5108 which returns part of an address in a form which will be valid for
5109 a data instruction. We do this by pushing the expression into a symbol
5110 in the expr_section, and creating a fix for that. */
5111
5112 static fixS *
5113 fix_new_aarch64 (fragS * frag,
5114 int where,
5115 short int size,
5116 expressionS * exp,
5117 int pc_rel,
5118 int reloc)
5119 {
5120 fixS *new_fix;
5121
5122 switch (exp->X_op)
5123 {
5124 case O_constant:
5125 case O_symbol:
5126 case O_add:
5127 case O_subtract:
5128 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5129 break;
5130
5131 default:
5132 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5133 pc_rel, reloc);
5134 break;
5135 }
5136 return new_fix;
5137 }
5138 \f
5139 /* Diagnostics on operands errors. */
5140
5141 /* By default, output verbose error message.
5142 Disable the verbose error message by -mno-verbose-error. */
5143 static int verbose_error_p = 1;
5144
5145 #ifdef DEBUG_AARCH64
5146 /* N.B. this is only for the purpose of debugging. */
5147 const char* operand_mismatch_kind_names[] =
5148 {
5149 "AARCH64_OPDE_NIL",
5150 "AARCH64_OPDE_RECOVERABLE",
5151 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5152 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5153 "AARCH64_OPDE_SYNTAX_ERROR",
5154 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5155 "AARCH64_OPDE_INVALID_VARIANT",
5156 "AARCH64_OPDE_INVALID_VG_SIZE",
5157 "AARCH64_OPDE_REG_LIST_LENGTH",
5158 "AARCH64_OPDE_REG_LIST_STRIDE",
5159 "AARCH64_OPDE_UNTIED_IMMS",
5160 "AARCH64_OPDE_UNTIED_OPERAND",
5161 "AARCH64_OPDE_OUT_OF_RANGE",
5162 "AARCH64_OPDE_UNALIGNED",
5163 "AARCH64_OPDE_OTHER_ERROR",
5164 "AARCH64_OPDE_INVALID_REGNO",
5165 };
5166 #endif /* DEBUG_AARCH64 */
5167
5168 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5169
5170 When multiple errors of different kinds are found in the same assembly
5171 line, only the error of the highest severity will be picked up for
5172 issuing the diagnostics. */
5173
5174 static inline bool
5175 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5176 enum aarch64_operand_error_kind rhs)
5177 {
5178 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5179 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5180 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5181 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5182 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5183 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5184 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5185 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5186 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5187 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5188 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5189 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5190 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5191 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5192 return lhs > rhs;
5193 }
5194
5195 /* Helper routine to get the mnemonic name from the assembly instruction
5196 line; should only be called for the diagnosis purpose, as there is
5197 string copy operation involved, which may affect the runtime
5198 performance if used in elsewhere. */
5199
5200 static const char*
5201 get_mnemonic_name (const char *str)
5202 {
5203 static char mnemonic[32];
5204 char *ptr;
5205
5206 /* Get the first 15 bytes and assume that the full name is included. */
5207 strncpy (mnemonic, str, 31);
5208 mnemonic[31] = '\0';
5209
5210 /* Scan up to the end of the mnemonic, which must end in white space,
5211 '.', or end of string. */
5212 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5213 ;
5214
5215 *ptr = '\0';
5216
5217 /* Append '...' to the truncated long name. */
5218 if (ptr - mnemonic == 31)
5219 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5220
5221 return mnemonic;
5222 }
5223
5224 static void
5225 reset_aarch64_instruction (aarch64_instruction *instruction)
5226 {
5227 memset (instruction, '\0', sizeof (aarch64_instruction));
5228 instruction->reloc.type = BFD_RELOC_UNUSED;
5229 }
5230
5231 /* Data structures storing one user error in the assembly code related to
5232 operands. */
5233
5234 struct operand_error_record
5235 {
5236 const aarch64_opcode *opcode;
5237 aarch64_operand_error detail;
5238 struct operand_error_record *next;
5239 };
5240
5241 typedef struct operand_error_record operand_error_record;
5242
5243 struct operand_errors
5244 {
5245 operand_error_record *head;
5246 operand_error_record *tail;
5247 };
5248
5249 typedef struct operand_errors operand_errors;
5250
5251 /* Top-level data structure reporting user errors for the current line of
5252 the assembly code.
5253 The way md_assemble works is that all opcodes sharing the same mnemonic
5254 name are iterated to find a match to the assembly line. In this data
5255 structure, each of the such opcodes will have one operand_error_record
5256 allocated and inserted. In other words, excessive errors related with
5257 a single opcode are disregarded. */
5258 operand_errors operand_error_report;
5259
5260 /* Free record nodes. */
5261 static operand_error_record *free_opnd_error_record_nodes = NULL;
5262
5263 /* Initialize the data structure that stores the operand mismatch
5264 information on assembling one line of the assembly code. */
5265 static void
5266 init_operand_error_report (void)
5267 {
5268 if (operand_error_report.head != NULL)
5269 {
5270 gas_assert (operand_error_report.tail != NULL);
5271 operand_error_report.tail->next = free_opnd_error_record_nodes;
5272 free_opnd_error_record_nodes = operand_error_report.head;
5273 operand_error_report.head = NULL;
5274 operand_error_report.tail = NULL;
5275 return;
5276 }
5277 gas_assert (operand_error_report.tail == NULL);
5278 }
5279
5280 /* Return TRUE if some operand error has been recorded during the
5281 parsing of the current assembly line using the opcode *OPCODE;
5282 otherwise return FALSE. */
5283 static inline bool
5284 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5285 {
5286 operand_error_record *record = operand_error_report.head;
5287 return record && record->opcode == opcode;
5288 }
5289
5290 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5291 OPCODE field is initialized with OPCODE.
5292 N.B. only one record for each opcode, i.e. the maximum of one error is
5293 recorded for each instruction template. */
5294
5295 static void
5296 add_operand_error_record (const operand_error_record* new_record)
5297 {
5298 const aarch64_opcode *opcode = new_record->opcode;
5299 operand_error_record* record = operand_error_report.head;
5300
5301 /* The record may have been created for this opcode. If not, we need
5302 to prepare one. */
5303 if (! opcode_has_operand_error_p (opcode))
5304 {
5305 /* Get one empty record. */
5306 if (free_opnd_error_record_nodes == NULL)
5307 {
5308 record = XNEW (operand_error_record);
5309 }
5310 else
5311 {
5312 record = free_opnd_error_record_nodes;
5313 free_opnd_error_record_nodes = record->next;
5314 }
5315 record->opcode = opcode;
5316 /* Insert at the head. */
5317 record->next = operand_error_report.head;
5318 operand_error_report.head = record;
5319 if (operand_error_report.tail == NULL)
5320 operand_error_report.tail = record;
5321 }
5322 else if (record->detail.kind != AARCH64_OPDE_NIL
5323 && record->detail.index <= new_record->detail.index
5324 && operand_error_higher_severity_p (record->detail.kind,
5325 new_record->detail.kind))
5326 {
5327 /* In the case of multiple errors found on operands related with a
5328 single opcode, only record the error of the leftmost operand and
5329 only if the error is of higher severity. */
5330 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5331 " the existing error %s on operand %d",
5332 operand_mismatch_kind_names[new_record->detail.kind],
5333 new_record->detail.index,
5334 operand_mismatch_kind_names[record->detail.kind],
5335 record->detail.index);
5336 return;
5337 }
5338
5339 record->detail = new_record->detail;
5340 }
5341
5342 static inline void
5343 record_operand_error_info (const aarch64_opcode *opcode,
5344 aarch64_operand_error *error_info)
5345 {
5346 operand_error_record record;
5347 record.opcode = opcode;
5348 record.detail = *error_info;
5349 add_operand_error_record (&record);
5350 }
5351
5352 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5353 error message *ERROR, for operand IDX (count from 0). */
5354
5355 static void
5356 record_operand_error (const aarch64_opcode *opcode, int idx,
5357 enum aarch64_operand_error_kind kind,
5358 const char* error)
5359 {
5360 aarch64_operand_error info;
5361 memset(&info, 0, sizeof (info));
5362 info.index = idx;
5363 info.kind = kind;
5364 info.error = error;
5365 info.non_fatal = false;
5366 record_operand_error_info (opcode, &info);
5367 }
5368
5369 static void
5370 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5371 enum aarch64_operand_error_kind kind,
5372 const char* error, const int *extra_data)
5373 {
5374 aarch64_operand_error info;
5375 info.index = idx;
5376 info.kind = kind;
5377 info.error = error;
5378 info.data[0].i = extra_data[0];
5379 info.data[1].i = extra_data[1];
5380 info.data[2].i = extra_data[2];
5381 info.non_fatal = false;
5382 record_operand_error_info (opcode, &info);
5383 }
5384
5385 static void
5386 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5387 const char* error, int lower_bound,
5388 int upper_bound)
5389 {
5390 int data[3] = {lower_bound, upper_bound, 0};
5391 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5392 error, data);
5393 }
5394
5395 /* Remove the operand error record for *OPCODE. */
5396 static void ATTRIBUTE_UNUSED
5397 remove_operand_error_record (const aarch64_opcode *opcode)
5398 {
5399 if (opcode_has_operand_error_p (opcode))
5400 {
5401 operand_error_record* record = operand_error_report.head;
5402 gas_assert (record != NULL && operand_error_report.tail != NULL);
5403 operand_error_report.head = record->next;
5404 record->next = free_opnd_error_record_nodes;
5405 free_opnd_error_record_nodes = record;
5406 if (operand_error_report.head == NULL)
5407 {
5408 gas_assert (operand_error_report.tail == record);
5409 operand_error_report.tail = NULL;
5410 }
5411 }
5412 }
5413
5414 /* Given the instruction in *INSTR, return the index of the best matched
5415 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5416
5417 Return -1 if there is no qualifier sequence; return the first match
5418 if there is multiple matches found. */
5419
5420 static int
5421 find_best_match (const aarch64_inst *instr,
5422 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5423 {
5424 int i, num_opnds, max_num_matched, idx;
5425
5426 num_opnds = aarch64_num_of_operands (instr->opcode);
5427 if (num_opnds == 0)
5428 {
5429 DEBUG_TRACE ("no operand");
5430 return -1;
5431 }
5432
5433 max_num_matched = 0;
5434 idx = 0;
5435
5436 /* For each pattern. */
5437 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5438 {
5439 int j, num_matched;
5440 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5441
5442 /* Most opcodes has much fewer patterns in the list. */
5443 if (empty_qualifier_sequence_p (qualifiers))
5444 {
5445 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5446 break;
5447 }
5448
5449 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5450 if (*qualifiers == instr->operands[j].qualifier)
5451 ++num_matched;
5452
5453 if (num_matched > max_num_matched)
5454 {
5455 max_num_matched = num_matched;
5456 idx = i;
5457 }
5458 }
5459
5460 DEBUG_TRACE ("return with %d", idx);
5461 return idx;
5462 }
5463
5464 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5465 corresponding operands in *INSTR. */
5466
5467 static inline void
5468 assign_qualifier_sequence (aarch64_inst *instr,
5469 const aarch64_opnd_qualifier_t *qualifiers)
5470 {
5471 int i = 0;
5472 int num_opnds = aarch64_num_of_operands (instr->opcode);
5473 gas_assert (num_opnds);
5474 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5475 instr->operands[i].qualifier = *qualifiers;
5476 }
5477
5478 /* Callback used by aarch64_print_operand to apply STYLE to the
5479 disassembler output created from FMT and ARGS. The STYLER object holds
5480 any required state. Must return a pointer to a string (created from FMT
5481 and ARGS) that will continue to be valid until the complete disassembled
5482 instruction has been printed.
5483
5484 We don't currently add any styling to the output of the disassembler as
5485 used within assembler error messages, and so STYLE is ignored here. A
5486 new string is allocated on the obstack help within STYLER and returned
5487 to the caller. */
5488
5489 static const char *aarch64_apply_style
5490 (struct aarch64_styler *styler,
5491 enum disassembler_style style ATTRIBUTE_UNUSED,
5492 const char *fmt, va_list args)
5493 {
5494 int res;
5495 char *ptr;
5496 struct obstack *stack = (struct obstack *) styler->state;
5497 va_list ap;
5498
5499 /* Calculate the required space. */
5500 va_copy (ap, args);
5501 res = vsnprintf (NULL, 0, fmt, ap);
5502 va_end (ap);
5503 gas_assert (res >= 0);
5504
5505 /* Allocate space on the obstack and format the result. */
5506 ptr = (char *) obstack_alloc (stack, res + 1);
5507 res = vsnprintf (ptr, (res + 1), fmt, args);
5508 gas_assert (res >= 0);
5509
5510 return ptr;
5511 }
5512
5513 /* Print operands for the diagnosis purpose. */
5514
5515 static void
5516 print_operands (char *buf, const aarch64_opcode *opcode,
5517 const aarch64_opnd_info *opnds)
5518 {
5519 int i;
5520 struct aarch64_styler styler;
5521 struct obstack content;
5522 obstack_init (&content);
5523
5524 styler.apply_style = aarch64_apply_style;
5525 styler.state = (void *) &content;
5526
5527 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5528 {
5529 char str[128];
5530 char cmt[128];
5531
5532 /* We regard the opcode operand info more, however we also look into
5533 the inst->operands to support the disassembling of the optional
5534 operand.
5535 The two operand code should be the same in all cases, apart from
5536 when the operand can be optional. */
5537 if (opcode->operands[i] == AARCH64_OPND_NIL
5538 || opnds[i].type == AARCH64_OPND_NIL)
5539 break;
5540
5541 /* Generate the operand string in STR. */
5542 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5543 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5544
5545 /* Delimiter. */
5546 if (str[0] != '\0')
5547 strcat (buf, i == 0 ? " " : ", ");
5548
5549 /* Append the operand string. */
5550 strcat (buf, str);
5551
5552 /* Append a comment. This works because only the last operand ever
5553 adds a comment. If that ever changes then we'll need to be
5554 smarter here. */
5555 if (cmt[0] != '\0')
5556 {
5557 strcat (buf, "\t// ");
5558 strcat (buf, cmt);
5559 }
5560 }
5561
5562 obstack_free (&content, NULL);
5563 }
5564
5565 /* Send to stderr a string as information. */
5566
5567 static void
5568 output_info (const char *format, ...)
5569 {
5570 const char *file;
5571 unsigned int line;
5572 va_list args;
5573
5574 file = as_where (&line);
5575 if (file)
5576 {
5577 if (line != 0)
5578 fprintf (stderr, "%s:%u: ", file, line);
5579 else
5580 fprintf (stderr, "%s: ", file);
5581 }
5582 fprintf (stderr, _("Info: "));
5583 va_start (args, format);
5584 vfprintf (stderr, format, args);
5585 va_end (args);
5586 (void) putc ('\n', stderr);
5587 }
5588
5589 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5590 relates to registers or register lists. If so, return a string that
5591 reports the error against "operand %d", otherwise return null. */
5592
5593 static const char *
5594 get_reg_error_message (const aarch64_operand_error *detail)
5595 {
5596 /* Handle the case where we found a register that was expected
5597 to be in a register list outside of a register list. */
5598 if ((detail->data[1].i & detail->data[2].i) != 0
5599 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5600 return _("missing braces at operand %d");
5601
5602 /* If some opcodes expected a register, and we found a register,
5603 complain about the difference. */
5604 if (detail->data[2].i)
5605 {
5606 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5607 ? detail->data[1].i & ~SEF_IN_REGLIST
5608 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5609 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5610 if (!msg)
5611 msg = N_("unexpected register type at operand %d");
5612 return msg;
5613 }
5614
5615 /* Handle the case where we got to the point of trying to parse a
5616 register within a register list, but didn't find a known register. */
5617 if (detail->data[1].i & SEF_IN_REGLIST)
5618 {
5619 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5620 const char *msg = get_reg_expected_msg (expected, 0);
5621 if (!msg)
5622 msg = _("invalid register list at operand %d");
5623 return msg;
5624 }
5625
5626 /* Punt if register-related problems weren't the only errors. */
5627 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5628 return NULL;
5629
5630 /* Handle the case where the only acceptable things are registers. */
5631 if (detail->data[1].i == 0)
5632 {
5633 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5634 if (!msg)
5635 msg = _("expected a register at operand %d");
5636 return msg;
5637 }
5638
5639 /* Handle the case where the only acceptable things are register lists,
5640 and there was no opening '{'. */
5641 if (detail->data[0].i == 0)
5642 return _("expected '{' at operand %d");
5643
5644 return _("expected a register or register list at operand %d");
5645 }
5646
5647 /* Output one operand error record. */
5648
5649 static void
5650 output_operand_error_record (const operand_error_record *record, char *str)
5651 {
5652 const aarch64_operand_error *detail = &record->detail;
5653 int idx = detail->index;
5654 const aarch64_opcode *opcode = record->opcode;
5655 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5656 : AARCH64_OPND_NIL);
5657
5658 typedef void (*handler_t)(const char *format, ...);
5659 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5660 const char *msg = detail->error;
5661
5662 switch (detail->kind)
5663 {
5664 case AARCH64_OPDE_NIL:
5665 gas_assert (0);
5666 break;
5667
5668 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5669 handler (_("this `%s' should have an immediately preceding `%s'"
5670 " -- `%s'"),
5671 detail->data[0].s, detail->data[1].s, str);
5672 break;
5673
5674 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5675 handler (_("the preceding `%s' should be followed by `%s` rather"
5676 " than `%s` -- `%s'"),
5677 detail->data[1].s, detail->data[0].s, opcode->name, str);
5678 break;
5679
5680 case AARCH64_OPDE_SYNTAX_ERROR:
5681 if (!msg && idx >= 0)
5682 {
5683 msg = get_reg_error_message (detail);
5684 if (msg)
5685 {
5686 char *full_msg = xasprintf (msg, idx + 1);
5687 handler (_("%s -- `%s'"), full_msg, str);
5688 free (full_msg);
5689 break;
5690 }
5691 }
5692 /* Fall through. */
5693
5694 case AARCH64_OPDE_RECOVERABLE:
5695 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5696 case AARCH64_OPDE_OTHER_ERROR:
5697 /* Use the prepared error message if there is, otherwise use the
5698 operand description string to describe the error. */
5699 if (msg != NULL)
5700 {
5701 if (idx < 0)
5702 handler (_("%s -- `%s'"), msg, str);
5703 else
5704 handler (_("%s at operand %d -- `%s'"),
5705 msg, idx + 1, str);
5706 }
5707 else
5708 {
5709 gas_assert (idx >= 0);
5710 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5711 aarch64_get_operand_desc (opd_code), str);
5712 }
5713 break;
5714
5715 case AARCH64_OPDE_INVALID_VARIANT:
5716 handler (_("operand mismatch -- `%s'"), str);
5717 if (verbose_error_p)
5718 {
5719 /* We will try to correct the erroneous instruction and also provide
5720 more information e.g. all other valid variants.
5721
5722 The string representation of the corrected instruction and other
5723 valid variants are generated by
5724
5725 1) obtaining the intermediate representation of the erroneous
5726 instruction;
5727 2) manipulating the IR, e.g. replacing the operand qualifier;
5728 3) printing out the instruction by calling the printer functions
5729 shared with the disassembler.
5730
5731 The limitation of this method is that the exact input assembly
5732 line cannot be accurately reproduced in some cases, for example an
5733 optional operand present in the actual assembly line will be
5734 omitted in the output; likewise for the optional syntax rules,
5735 e.g. the # before the immediate. Another limitation is that the
5736 assembly symbols and relocation operations in the assembly line
5737 currently cannot be printed out in the error report. Last but not
5738 least, when there is other error(s) co-exist with this error, the
5739 'corrected' instruction may be still incorrect, e.g. given
5740 'ldnp h0,h1,[x0,#6]!'
5741 this diagnosis will provide the version:
5742 'ldnp s0,s1,[x0,#6]!'
5743 which is still not right. */
5744 size_t len = strlen (get_mnemonic_name (str));
5745 int i, qlf_idx;
5746 bool result;
5747 char buf[2048];
5748 aarch64_inst *inst_base = &inst.base;
5749 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5750
5751 /* Init inst. */
5752 reset_aarch64_instruction (&inst);
5753 inst_base->opcode = opcode;
5754
5755 /* Reset the error report so that there is no side effect on the
5756 following operand parsing. */
5757 init_operand_error_report ();
5758
5759 /* Fill inst. */
5760 result = parse_operands (str + len, opcode)
5761 && programmer_friendly_fixup (&inst);
5762 gas_assert (result);
5763 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5764 NULL, NULL, insn_sequence);
5765 gas_assert (!result);
5766
5767 /* Find the most matched qualifier sequence. */
5768 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5769 gas_assert (qlf_idx > -1);
5770
5771 /* Assign the qualifiers. */
5772 assign_qualifier_sequence (inst_base,
5773 opcode->qualifiers_list[qlf_idx]);
5774
5775 /* Print the hint. */
5776 output_info (_(" did you mean this?"));
5777 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5778 print_operands (buf, opcode, inst_base->operands);
5779 output_info (_(" %s"), buf);
5780
5781 /* Print out other variant(s) if there is any. */
5782 if (qlf_idx != 0 ||
5783 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5784 output_info (_(" other valid variant(s):"));
5785
5786 /* For each pattern. */
5787 qualifiers_list = opcode->qualifiers_list;
5788 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5789 {
5790 /* Most opcodes has much fewer patterns in the list.
5791 First NIL qualifier indicates the end in the list. */
5792 if (empty_qualifier_sequence_p (*qualifiers_list))
5793 break;
5794
5795 if (i != qlf_idx)
5796 {
5797 /* Mnemonics name. */
5798 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5799
5800 /* Assign the qualifiers. */
5801 assign_qualifier_sequence (inst_base, *qualifiers_list);
5802
5803 /* Print instruction. */
5804 print_operands (buf, opcode, inst_base->operands);
5805
5806 output_info (_(" %s"), buf);
5807 }
5808 }
5809 }
5810 break;
5811
5812 case AARCH64_OPDE_UNTIED_IMMS:
5813 handler (_("operand %d must have the same immediate value "
5814 "as operand 1 -- `%s'"),
5815 detail->index + 1, str);
5816 break;
5817
5818 case AARCH64_OPDE_UNTIED_OPERAND:
5819 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5820 detail->index + 1, str);
5821 break;
5822
5823 case AARCH64_OPDE_INVALID_REGNO:
5824 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5825 detail->data[0].s, detail->data[1].i,
5826 detail->data[0].s, detail->data[2].i, idx + 1, str);
5827 break;
5828
5829 case AARCH64_OPDE_OUT_OF_RANGE:
5830 if (detail->data[0].i != detail->data[1].i)
5831 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5832 msg ? msg : _("immediate value"),
5833 detail->data[0].i, detail->data[1].i, idx + 1, str);
5834 else
5835 handler (_("%s must be %d at operand %d -- `%s'"),
5836 msg ? msg : _("immediate value"),
5837 detail->data[0].i, idx + 1, str);
5838 break;
5839
5840 case AARCH64_OPDE_INVALID_VG_SIZE:
5841 if (detail->data[0].i == 0)
5842 handler (_("unexpected vector group size at operand %d -- `%s'"),
5843 idx + 1, str);
5844 else
5845 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5846 idx + 1, detail->data[0].i, str);
5847 break;
5848
5849 case AARCH64_OPDE_REG_LIST_LENGTH:
5850 if (detail->data[0].i == (1 << 1))
5851 handler (_("expected a single-register list at operand %d -- `%s'"),
5852 idx + 1, str);
5853 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5854 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5855 get_log2 (detail->data[0].i), idx + 1, str);
5856 else if (detail->data[0].i == 0x14)
5857 handler (_("expected a list of %d or %d registers at"
5858 " operand %d -- `%s'"),
5859 2, 4, idx + 1, str);
5860 else
5861 handler (_("invalid number of registers in the list"
5862 " at operand %d -- `%s'"), idx + 1, str);
5863 break;
5864
5865 case AARCH64_OPDE_REG_LIST_STRIDE:
5866 if (detail->data[0].i == (1 << 1))
5867 handler (_("the register list must have a stride of %d"
5868 " at operand %d -- `%s'"), 1, idx + 1, str);
5869 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5870 handler (_("the register list must have a stride of %d or %d"
5871 " at operand %d -- `%s`"), 1,
5872 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5873 else
5874 handler (_("invalid register stride at operand %d -- `%s'"),
5875 idx + 1, str);
5876 break;
5877
5878 case AARCH64_OPDE_UNALIGNED:
5879 handler (_("immediate value must be a multiple of "
5880 "%d at operand %d -- `%s'"),
5881 detail->data[0].i, idx + 1, str);
5882 break;
5883
5884 default:
5885 gas_assert (0);
5886 break;
5887 }
5888 }
5889
5890 /* Return true if the presence of error A against an instruction means
5891 that error B should not be reported. This is only used as a first pass,
5892 to pick the kind of error that we should report. */
5893
5894 static bool
5895 better_error_p (operand_error_record *a, operand_error_record *b)
5896 {
5897 /* For errors reported during parsing, prefer errors that relate to
5898 later operands, since that implies that the earlier operands were
5899 syntactically valid.
5900
5901 For example, if we see a register R instead of an immediate in
5902 operand N, we'll report that as a recoverable "immediate operand
5903 required" error. This is because there is often another opcode
5904 entry that accepts a register operand N, and any errors about R
5905 should be reported against the register forms of the instruction.
5906 But if no such register form exists, the recoverable error should
5907 still win over a syntax error against operand N-1.
5908
5909 For these purposes, count an error reported at the end of the
5910 assembly string as equivalent to an error reported against the
5911 final operand. This means that opcode entries that expect more
5912 operands win over "unexpected characters following instruction". */
5913 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5914 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5915 {
5916 int a_index = (a->detail.index < 0
5917 ? aarch64_num_of_operands (a->opcode) - 1
5918 : a->detail.index);
5919 int b_index = (b->detail.index < 0
5920 ? aarch64_num_of_operands (b->opcode) - 1
5921 : b->detail.index);
5922 if (a_index != b_index)
5923 return a_index > b_index;
5924 }
5925 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5926 }
5927
5928 /* Process and output the error message about the operand mismatching.
5929
5930 When this function is called, the operand error information had
5931 been collected for an assembly line and there will be multiple
5932 errors in the case of multiple instruction templates; output the
5933 error message that most closely describes the problem.
5934
5935 The errors to be printed can be filtered on printing all errors
5936 or only non-fatal errors. This distinction has to be made because
5937 the error buffer may already be filled with fatal errors we don't want to
5938 print due to the different instruction templates. */
5939
5940 static void
5941 output_operand_error_report (char *str, bool non_fatal_only)
5942 {
5943 enum aarch64_operand_error_kind kind;
5944 operand_error_record *curr;
5945 operand_error_record *head = operand_error_report.head;
5946 operand_error_record *record;
5947
5948 /* No error to report. */
5949 if (head == NULL)
5950 return;
5951
5952 gas_assert (head != NULL && operand_error_report.tail != NULL);
5953
5954 /* Only one error. */
5955 if (head == operand_error_report.tail)
5956 {
5957 /* If the only error is a non-fatal one and we don't want to print it,
5958 just exit. */
5959 if (!non_fatal_only || head->detail.non_fatal)
5960 {
5961 DEBUG_TRACE ("single opcode entry with error kind: %s",
5962 operand_mismatch_kind_names[head->detail.kind]);
5963 output_operand_error_record (head, str);
5964 }
5965 return;
5966 }
5967
5968 /* Find the error kind of the highest severity. */
5969 DEBUG_TRACE ("multiple opcode entries with error kind");
5970 record = NULL;
5971 for (curr = head; curr != NULL; curr = curr->next)
5972 {
5973 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5974 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5975 {
5976 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5977 operand_mismatch_kind_names[curr->detail.kind],
5978 curr->detail.data[0].i, curr->detail.data[1].i,
5979 curr->detail.data[2].i);
5980 }
5981 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5982 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5983 {
5984 DEBUG_TRACE ("\t%s [%x]",
5985 operand_mismatch_kind_names[curr->detail.kind],
5986 curr->detail.data[0].i);
5987 }
5988 else
5989 {
5990 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5991 }
5992 if ((!non_fatal_only || curr->detail.non_fatal)
5993 && (!record || better_error_p (curr, record)))
5994 record = curr;
5995 }
5996
5997 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5998 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5999
6000 /* Pick up one of errors of KIND to report. */
6001 record = NULL;
6002 for (curr = head; curr != NULL; curr = curr->next)
6003 {
6004 /* If we don't want to print non-fatal errors then don't consider them
6005 at all. */
6006 if (curr->detail.kind != kind
6007 || (non_fatal_only && !curr->detail.non_fatal))
6008 continue;
6009 /* If there are multiple errors, pick up the one with the highest
6010 mismatching operand index. In the case of multiple errors with
6011 the equally highest operand index, pick up the first one or the
6012 first one with non-NULL error message. */
6013 if (!record || curr->detail.index > record->detail.index)
6014 record = curr;
6015 else if (curr->detail.index == record->detail.index
6016 && !record->detail.error)
6017 {
6018 if (curr->detail.error)
6019 record = curr;
6020 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
6021 {
6022 record->detail.data[0].i |= curr->detail.data[0].i;
6023 record->detail.data[1].i |= curr->detail.data[1].i;
6024 record->detail.data[2].i |= curr->detail.data[2].i;
6025 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
6026 operand_mismatch_kind_names[kind],
6027 curr->detail.data[0].i, curr->detail.data[1].i,
6028 curr->detail.data[2].i);
6029 }
6030 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
6031 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
6032 {
6033 record->detail.data[0].i |= curr->detail.data[0].i;
6034 DEBUG_TRACE ("\t--> %s [%x]",
6035 operand_mismatch_kind_names[kind],
6036 curr->detail.data[0].i);
6037 }
6038 /* Pick the variant with the cloest match. */
6039 else if (kind == AARCH64_OPDE_INVALID_VARIANT
6040 && record->detail.data[0].i > curr->detail.data[0].i)
6041 record = curr;
6042 }
6043 }
6044
6045 /* The way errors are collected in the back-end is a bit non-intuitive. But
6046 essentially, because each operand template is tried recursively you may
6047 always have errors collected from the previous tried OPND. These are
6048 usually skipped if there is one successful match. However now with the
6049 non-fatal errors we have to ignore those previously collected hard errors
6050 when we're only interested in printing the non-fatal ones. This condition
6051 prevents us from printing errors that are not appropriate, since we did
6052 match a condition, but it also has warnings that it wants to print. */
6053 if (non_fatal_only && !record)
6054 return;
6055
6056 gas_assert (record);
6057 DEBUG_TRACE ("Pick up error kind %s to report",
6058 operand_mismatch_kind_names[kind]);
6059
6060 /* Output. */
6061 output_operand_error_record (record, str);
6062 }
6063 \f
6064 /* Write an AARCH64 instruction to buf - always little-endian. */
6065 static void
6066 put_aarch64_insn (char *buf, uint32_t insn)
6067 {
6068 unsigned char *where = (unsigned char *) buf;
6069 where[0] = insn;
6070 where[1] = insn >> 8;
6071 where[2] = insn >> 16;
6072 where[3] = insn >> 24;
6073 }
6074
6075 static uint32_t
6076 get_aarch64_insn (char *buf)
6077 {
6078 unsigned char *where = (unsigned char *) buf;
6079 uint32_t result;
6080 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6081 | ((uint32_t) where[3] << 24)));
6082 return result;
6083 }
6084
6085 static void
6086 output_inst (struct aarch64_inst *new_inst)
6087 {
6088 char *to = NULL;
6089
6090 to = frag_more (INSN_SIZE);
6091
6092 frag_now->tc_frag_data.recorded = 1;
6093
6094 put_aarch64_insn (to, inst.base.value);
6095
6096 if (inst.reloc.type != BFD_RELOC_UNUSED)
6097 {
6098 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6099 INSN_SIZE, &inst.reloc.exp,
6100 inst.reloc.pc_rel,
6101 inst.reloc.type);
6102 DEBUG_TRACE ("Prepared relocation fix up");
6103 /* Don't check the addend value against the instruction size,
6104 that's the job of our code in md_apply_fix(). */
6105 fixp->fx_no_overflow = 1;
6106 if (new_inst != NULL)
6107 fixp->tc_fix_data.inst = new_inst;
6108 if (aarch64_gas_internal_fixup_p ())
6109 {
6110 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6111 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6112 fixp->fx_addnumber = inst.reloc.flags;
6113 }
6114 }
6115
6116 dwarf2_emit_insn (INSN_SIZE);
6117 }
6118
6119 /* Link together opcodes of the same name. */
6120
6121 struct templates
6122 {
6123 const aarch64_opcode *opcode;
6124 struct templates *next;
6125 };
6126
6127 typedef struct templates templates;
6128
6129 static templates *
6130 lookup_mnemonic (const char *start, int len)
6131 {
6132 templates *templ = NULL;
6133
6134 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6135 return templ;
6136 }
6137
6138 /* Subroutine of md_assemble, responsible for looking up the primary
6139 opcode from the mnemonic the user wrote. BASE points to the beginning
6140 of the mnemonic, DOT points to the first '.' within the mnemonic
6141 (if any) and END points to the end of the mnemonic. */
6142
6143 static templates *
6144 opcode_lookup (char *base, char *dot, char *end)
6145 {
6146 const aarch64_cond *cond;
6147 char condname[16];
6148 int len;
6149
6150 if (dot == end)
6151 return 0;
6152
6153 inst.cond = COND_ALWAYS;
6154
6155 /* Handle a possible condition. */
6156 if (dot)
6157 {
6158 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6159 if (!cond)
6160 return 0;
6161 inst.cond = cond->value;
6162 len = dot - base;
6163 }
6164 else
6165 len = end - base;
6166
6167 if (inst.cond == COND_ALWAYS)
6168 {
6169 /* Look for unaffixed mnemonic. */
6170 return lookup_mnemonic (base, len);
6171 }
6172 else if (len <= 13)
6173 {
6174 /* append ".c" to mnemonic if conditional */
6175 memcpy (condname, base, len);
6176 memcpy (condname + len, ".c", 2);
6177 base = condname;
6178 len += 2;
6179 return lookup_mnemonic (base, len);
6180 }
6181
6182 return NULL;
6183 }
6184
6185 /* Process an optional operand that is found omitted from the assembly line.
6186 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6187 instruction's opcode entry while IDX is the index of this omitted operand.
6188 */
6189
6190 static void
6191 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6192 int idx, aarch64_opnd_info *operand)
6193 {
6194 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6195 gas_assert (optional_operand_p (opcode, idx));
6196 gas_assert (!operand->present);
6197
6198 switch (type)
6199 {
6200 case AARCH64_OPND_Rd:
6201 case AARCH64_OPND_Rn:
6202 case AARCH64_OPND_Rm:
6203 case AARCH64_OPND_Rt:
6204 case AARCH64_OPND_Rt2:
6205 case AARCH64_OPND_Rt_LS64:
6206 case AARCH64_OPND_Rt_SP:
6207 case AARCH64_OPND_Rs:
6208 case AARCH64_OPND_Ra:
6209 case AARCH64_OPND_Rt_SYS:
6210 case AARCH64_OPND_Rd_SP:
6211 case AARCH64_OPND_Rn_SP:
6212 case AARCH64_OPND_Rm_SP:
6213 case AARCH64_OPND_Fd:
6214 case AARCH64_OPND_Fn:
6215 case AARCH64_OPND_Fm:
6216 case AARCH64_OPND_Fa:
6217 case AARCH64_OPND_Ft:
6218 case AARCH64_OPND_Ft2:
6219 case AARCH64_OPND_Sd:
6220 case AARCH64_OPND_Sn:
6221 case AARCH64_OPND_Sm:
6222 case AARCH64_OPND_Va:
6223 case AARCH64_OPND_Vd:
6224 case AARCH64_OPND_Vn:
6225 case AARCH64_OPND_Vm:
6226 case AARCH64_OPND_VdD1:
6227 case AARCH64_OPND_VnD1:
6228 operand->reg.regno = default_value;
6229 break;
6230
6231 case AARCH64_OPND_Ed:
6232 case AARCH64_OPND_En:
6233 case AARCH64_OPND_Em:
6234 case AARCH64_OPND_Em16:
6235 case AARCH64_OPND_SM3_IMM2:
6236 operand->reglane.regno = default_value;
6237 break;
6238
6239 case AARCH64_OPND_IDX:
6240 case AARCH64_OPND_BIT_NUM:
6241 case AARCH64_OPND_IMMR:
6242 case AARCH64_OPND_IMMS:
6243 case AARCH64_OPND_SHLL_IMM:
6244 case AARCH64_OPND_IMM_VLSL:
6245 case AARCH64_OPND_IMM_VLSR:
6246 case AARCH64_OPND_CCMP_IMM:
6247 case AARCH64_OPND_FBITS:
6248 case AARCH64_OPND_UIMM4:
6249 case AARCH64_OPND_UIMM3_OP1:
6250 case AARCH64_OPND_UIMM3_OP2:
6251 case AARCH64_OPND_IMM:
6252 case AARCH64_OPND_IMM_2:
6253 case AARCH64_OPND_WIDTH:
6254 case AARCH64_OPND_UIMM7:
6255 case AARCH64_OPND_NZCV:
6256 case AARCH64_OPND_SVE_PATTERN:
6257 case AARCH64_OPND_SVE_PRFOP:
6258 operand->imm.value = default_value;
6259 break;
6260
6261 case AARCH64_OPND_SVE_PATTERN_SCALED:
6262 operand->imm.value = default_value;
6263 operand->shifter.kind = AARCH64_MOD_MUL;
6264 operand->shifter.amount = 1;
6265 break;
6266
6267 case AARCH64_OPND_EXCEPTION:
6268 inst.reloc.type = BFD_RELOC_UNUSED;
6269 break;
6270
6271 case AARCH64_OPND_BARRIER_ISB:
6272 operand->barrier = aarch64_barrier_options + default_value;
6273 break;
6274
6275 case AARCH64_OPND_BTI_TARGET:
6276 operand->hint_option = aarch64_hint_options + default_value;
6277 break;
6278
6279 default:
6280 break;
6281 }
6282 }
6283
6284 /* Process the relocation type for move wide instructions.
6285 Return TRUE on success; otherwise return FALSE. */
6286
6287 static bool
6288 process_movw_reloc_info (void)
6289 {
6290 int is32;
6291 unsigned shift;
6292
6293 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6294
6295 if (inst.base.opcode->op == OP_MOVK)
6296 switch (inst.reloc.type)
6297 {
6298 case BFD_RELOC_AARCH64_MOVW_G0_S:
6299 case BFD_RELOC_AARCH64_MOVW_G1_S:
6300 case BFD_RELOC_AARCH64_MOVW_G2_S:
6301 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6302 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6303 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6304 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6305 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6306 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6307 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6308 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6309 set_syntax_error
6310 (_("the specified relocation type is not allowed for MOVK"));
6311 return false;
6312 default:
6313 break;
6314 }
6315
6316 switch (inst.reloc.type)
6317 {
6318 case BFD_RELOC_AARCH64_MOVW_G0:
6319 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6320 case BFD_RELOC_AARCH64_MOVW_G0_S:
6321 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6322 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6323 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6324 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6325 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6326 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6327 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6328 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6329 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6330 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6331 shift = 0;
6332 break;
6333 case BFD_RELOC_AARCH64_MOVW_G1:
6334 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6335 case BFD_RELOC_AARCH64_MOVW_G1_S:
6336 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6337 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6338 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6339 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6340 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6341 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6342 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6343 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6344 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6345 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6346 shift = 16;
6347 break;
6348 case BFD_RELOC_AARCH64_MOVW_G2:
6349 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6350 case BFD_RELOC_AARCH64_MOVW_G2_S:
6351 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6352 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6353 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6354 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6355 if (is32)
6356 {
6357 set_fatal_syntax_error
6358 (_("the specified relocation type is not allowed for 32-bit "
6359 "register"));
6360 return false;
6361 }
6362 shift = 32;
6363 break;
6364 case BFD_RELOC_AARCH64_MOVW_G3:
6365 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6366 if (is32)
6367 {
6368 set_fatal_syntax_error
6369 (_("the specified relocation type is not allowed for 32-bit "
6370 "register"));
6371 return false;
6372 }
6373 shift = 48;
6374 break;
6375 default:
6376 /* More cases should be added when more MOVW-related relocation types
6377 are supported in GAS. */
6378 gas_assert (aarch64_gas_internal_fixup_p ());
6379 /* The shift amount should have already been set by the parser. */
6380 return true;
6381 }
6382 inst.base.operands[1].shifter.amount = shift;
6383 return true;
6384 }
6385
6386 /* Determine and return the real reloc type code for an instruction
6387 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6388
6389 static inline bfd_reloc_code_real_type
6390 ldst_lo12_determine_real_reloc_type (void)
6391 {
6392 unsigned logsz, max_logsz;
6393 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6394 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6395
6396 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6397 {
6398 BFD_RELOC_AARCH64_LDST8_LO12,
6399 BFD_RELOC_AARCH64_LDST16_LO12,
6400 BFD_RELOC_AARCH64_LDST32_LO12,
6401 BFD_RELOC_AARCH64_LDST64_LO12,
6402 BFD_RELOC_AARCH64_LDST128_LO12
6403 },
6404 {
6405 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6406 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6407 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6408 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6409 BFD_RELOC_AARCH64_NONE
6410 },
6411 {
6412 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6413 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6414 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6415 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6416 BFD_RELOC_AARCH64_NONE
6417 },
6418 {
6419 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6420 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6421 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6422 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6423 BFD_RELOC_AARCH64_NONE
6424 },
6425 {
6426 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6427 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6428 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6429 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6430 BFD_RELOC_AARCH64_NONE
6431 }
6432 };
6433
6434 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6435 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6436 || (inst.reloc.type
6437 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6438 || (inst.reloc.type
6439 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6440 || (inst.reloc.type
6441 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6442 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6443
6444 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6445 opd1_qlf =
6446 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6447 1, opd0_qlf, 0);
6448 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6449
6450 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6451
6452 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6453 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6454 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6455 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6456 max_logsz = 3;
6457 else
6458 max_logsz = 4;
6459
6460 if (logsz > max_logsz)
6461 {
6462 /* SEE PR 27904 for an example of this. */
6463 set_fatal_syntax_error
6464 (_("relocation qualifier does not match instruction size"));
6465 return BFD_RELOC_AARCH64_NONE;
6466 }
6467
6468 /* In reloc.c, these pseudo relocation types should be defined in similar
6469 order as above reloc_ldst_lo12 array. Because the array index calculation
6470 below relies on this. */
6471 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6472 }
6473
6474 /* Check whether a register list REGINFO is valid. The registers have type
6475 REG_TYPE and must be numbered in increasing order (modulo the register
6476 bank size). They must have a consistent stride.
6477
6478 Return true if the list is valid, describing it in LIST if so. */
6479
6480 static bool
6481 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6482 aarch64_reg_type reg_type)
6483 {
6484 uint32_t i, nb_regs, prev_regno, incr, mask;
6485 mask = reg_type_mask (reg_type);
6486
6487 nb_regs = 1 + (reginfo & 0x3);
6488 reginfo >>= 2;
6489 prev_regno = reginfo & 0x1f;
6490 incr = 1;
6491
6492 list->first_regno = prev_regno;
6493 list->num_regs = nb_regs;
6494
6495 for (i = 1; i < nb_regs; ++i)
6496 {
6497 uint32_t curr_regno, curr_incr;
6498 reginfo >>= 5;
6499 curr_regno = reginfo & 0x1f;
6500 curr_incr = (curr_regno - prev_regno) & mask;
6501 if (curr_incr == 0)
6502 return false;
6503 else if (i == 1)
6504 incr = curr_incr;
6505 else if (curr_incr != incr)
6506 return false;
6507 prev_regno = curr_regno;
6508 }
6509
6510 list->stride = incr;
6511 return true;
6512 }
6513
6514 /* Generic instruction operand parser. This does no encoding and no
6515 semantic validation; it merely squirrels values away in the inst
6516 structure. Returns TRUE or FALSE depending on whether the
6517 specified grammar matched. */
6518
6519 static bool
6520 parse_operands (char *str, const aarch64_opcode *opcode)
6521 {
6522 int i;
6523 char *backtrack_pos = 0;
6524 const enum aarch64_opnd *operands = opcode->operands;
6525 aarch64_reg_type imm_reg_type;
6526
6527 clear_error ();
6528 skip_whitespace (str);
6529
6530 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6531 AARCH64_FEATURE_SVE
6532 | AARCH64_FEATURE_SVE2))
6533 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6534 else
6535 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6536
6537 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6538 {
6539 int64_t val;
6540 const reg_entry *reg;
6541 int comma_skipped_p = 0;
6542 struct vector_type_el vectype;
6543 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6544 aarch64_opnd_info *info = &inst.base.operands[i];
6545 aarch64_reg_type reg_type;
6546
6547 DEBUG_TRACE ("parse operand %d", i);
6548
6549 /* Assign the operand code. */
6550 info->type = operands[i];
6551
6552 if (optional_operand_p (opcode, i))
6553 {
6554 /* Remember where we are in case we need to backtrack. */
6555 gas_assert (!backtrack_pos);
6556 backtrack_pos = str;
6557 }
6558
6559 /* Expect comma between operands; the backtrack mechanism will take
6560 care of cases of omitted optional operand. */
6561 if (i > 0 && ! skip_past_char (&str, ','))
6562 {
6563 set_syntax_error (_("comma expected between operands"));
6564 goto failure;
6565 }
6566 else
6567 comma_skipped_p = 1;
6568
6569 switch (operands[i])
6570 {
6571 case AARCH64_OPND_Rd:
6572 case AARCH64_OPND_Rn:
6573 case AARCH64_OPND_Rm:
6574 case AARCH64_OPND_Rt:
6575 case AARCH64_OPND_Rt2:
6576 case AARCH64_OPND_Rs:
6577 case AARCH64_OPND_Ra:
6578 case AARCH64_OPND_Rt_LS64:
6579 case AARCH64_OPND_Rt_SYS:
6580 case AARCH64_OPND_PAIRREG:
6581 case AARCH64_OPND_SVE_Rm:
6582 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6583
6584 /* In LS64 load/store instructions Rt register number must be even
6585 and <=22. */
6586 if (operands[i] == AARCH64_OPND_Rt_LS64)
6587 {
6588 /* We've already checked if this is valid register.
6589 This will check if register number (Rt) is not undefined for LS64
6590 instructions:
6591 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6592 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6593 {
6594 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6595 goto failure;
6596 }
6597 }
6598 break;
6599
6600 case AARCH64_OPND_Rd_SP:
6601 case AARCH64_OPND_Rn_SP:
6602 case AARCH64_OPND_Rt_SP:
6603 case AARCH64_OPND_SVE_Rn_SP:
6604 case AARCH64_OPND_Rm_SP:
6605 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6606 break;
6607
6608 case AARCH64_OPND_Rm_EXT:
6609 case AARCH64_OPND_Rm_SFT:
6610 po_misc_or_fail (parse_shifter_operand
6611 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6612 ? SHIFTED_ARITH_IMM
6613 : SHIFTED_LOGIC_IMM)));
6614 if (!info->shifter.operator_present)
6615 {
6616 /* Default to LSL if not present. Libopcodes prefers shifter
6617 kind to be explicit. */
6618 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6619 info->shifter.kind = AARCH64_MOD_LSL;
6620 /* For Rm_EXT, libopcodes will carry out further check on whether
6621 or not stack pointer is used in the instruction (Recall that
6622 "the extend operator is not optional unless at least one of
6623 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6624 }
6625 break;
6626
6627 case AARCH64_OPND_Fd:
6628 case AARCH64_OPND_Fn:
6629 case AARCH64_OPND_Fm:
6630 case AARCH64_OPND_Fa:
6631 case AARCH64_OPND_Ft:
6632 case AARCH64_OPND_Ft2:
6633 case AARCH64_OPND_Sd:
6634 case AARCH64_OPND_Sn:
6635 case AARCH64_OPND_Sm:
6636 case AARCH64_OPND_SVE_VZn:
6637 case AARCH64_OPND_SVE_Vd:
6638 case AARCH64_OPND_SVE_Vm:
6639 case AARCH64_OPND_SVE_Vn:
6640 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6641 break;
6642
6643 case AARCH64_OPND_SVE_Pd:
6644 case AARCH64_OPND_SVE_Pg3:
6645 case AARCH64_OPND_SVE_Pg4_5:
6646 case AARCH64_OPND_SVE_Pg4_10:
6647 case AARCH64_OPND_SVE_Pg4_16:
6648 case AARCH64_OPND_SVE_Pm:
6649 case AARCH64_OPND_SVE_Pn:
6650 case AARCH64_OPND_SVE_Pt:
6651 case AARCH64_OPND_SME_Pm:
6652 reg_type = REG_TYPE_P;
6653 goto vector_reg;
6654
6655 case AARCH64_OPND_SVE_Za_5:
6656 case AARCH64_OPND_SVE_Za_16:
6657 case AARCH64_OPND_SVE_Zd:
6658 case AARCH64_OPND_SVE_Zm_5:
6659 case AARCH64_OPND_SVE_Zm_16:
6660 case AARCH64_OPND_SVE_Zn:
6661 case AARCH64_OPND_SVE_Zt:
6662 case AARCH64_OPND_SME_Zm:
6663 reg_type = REG_TYPE_Z;
6664 goto vector_reg;
6665
6666 case AARCH64_OPND_SVE_PNd:
6667 case AARCH64_OPND_SVE_PNg4_10:
6668 case AARCH64_OPND_SVE_PNn:
6669 case AARCH64_OPND_SVE_PNt:
6670 case AARCH64_OPND_SME_PNd3:
6671 case AARCH64_OPND_SME_PNg3:
6672 case AARCH64_OPND_SME_PNn:
6673 reg_type = REG_TYPE_PN;
6674 goto vector_reg;
6675
6676 case AARCH64_OPND_Va:
6677 case AARCH64_OPND_Vd:
6678 case AARCH64_OPND_Vn:
6679 case AARCH64_OPND_Vm:
6680 reg_type = REG_TYPE_V;
6681 vector_reg:
6682 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6683 if (!reg)
6684 goto failure;
6685 if (vectype.defined & NTA_HASINDEX)
6686 goto failure;
6687
6688 info->reg.regno = reg->number;
6689 if ((reg_type == REG_TYPE_P
6690 || reg_type == REG_TYPE_PN
6691 || reg_type == REG_TYPE_Z)
6692 && vectype.type == NT_invtype)
6693 /* Unqualified P and Z registers are allowed in certain
6694 contexts. Rely on F_STRICT qualifier checking to catch
6695 invalid uses. */
6696 info->qualifier = AARCH64_OPND_QLF_NIL;
6697 else
6698 {
6699 info->qualifier = vectype_to_qualifier (&vectype);
6700 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6701 goto failure;
6702 }
6703 break;
6704
6705 case AARCH64_OPND_VdD1:
6706 case AARCH64_OPND_VnD1:
6707 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6708 if (!reg)
6709 goto failure;
6710 if (vectype.type != NT_d || vectype.index != 1)
6711 {
6712 set_fatal_syntax_error
6713 (_("the top half of a 128-bit FP/SIMD register is expected"));
6714 goto failure;
6715 }
6716 info->reg.regno = reg->number;
6717 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6718 here; it is correct for the purpose of encoding/decoding since
6719 only the register number is explicitly encoded in the related
6720 instructions, although this appears a bit hacky. */
6721 info->qualifier = AARCH64_OPND_QLF_S_D;
6722 break;
6723
6724 case AARCH64_OPND_SVE_Zm3_INDEX:
6725 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6726 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6727 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6728 case AARCH64_OPND_SVE_Zm4_INDEX:
6729 case AARCH64_OPND_SVE_Zn_INDEX:
6730 case AARCH64_OPND_SME_Zm_INDEX1:
6731 case AARCH64_OPND_SME_Zm_INDEX2:
6732 case AARCH64_OPND_SME_Zn_INDEX1_16:
6733 case AARCH64_OPND_SME_Zn_INDEX2_15:
6734 case AARCH64_OPND_SME_Zn_INDEX2_16:
6735 case AARCH64_OPND_SME_Zn_INDEX3_14:
6736 case AARCH64_OPND_SME_Zn_INDEX3_15:
6737 case AARCH64_OPND_SME_Zn_INDEX4_14:
6738 reg_type = REG_TYPE_Z;
6739 goto vector_reg_index;
6740
6741 case AARCH64_OPND_Ed:
6742 case AARCH64_OPND_En:
6743 case AARCH64_OPND_Em:
6744 case AARCH64_OPND_Em16:
6745 case AARCH64_OPND_SM3_IMM2:
6746 reg_type = REG_TYPE_V;
6747 vector_reg_index:
6748 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6749 if (!reg)
6750 goto failure;
6751 if (!(vectype.defined & NTA_HASINDEX))
6752 goto failure;
6753
6754 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6755 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6756 info->qualifier = AARCH64_OPND_QLF_NIL;
6757 else
6758 {
6759 if (vectype.type == NT_invtype)
6760 goto failure;
6761 info->qualifier = vectype_to_qualifier (&vectype);
6762 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6763 goto failure;
6764 }
6765
6766 info->reglane.regno = reg->number;
6767 info->reglane.index = vectype.index;
6768 break;
6769
6770 case AARCH64_OPND_SVE_ZnxN:
6771 case AARCH64_OPND_SVE_ZtxN:
6772 case AARCH64_OPND_SME_Zdnx2:
6773 case AARCH64_OPND_SME_Zdnx4:
6774 case AARCH64_OPND_SME_Zmx2:
6775 case AARCH64_OPND_SME_Zmx4:
6776 case AARCH64_OPND_SME_Znx2:
6777 case AARCH64_OPND_SME_Znx4:
6778 case AARCH64_OPND_SME_Ztx2_STRIDED:
6779 case AARCH64_OPND_SME_Ztx4_STRIDED:
6780 reg_type = REG_TYPE_Z;
6781 goto vector_reg_list;
6782
6783 case AARCH64_OPND_SME_Pdx2:
6784 case AARCH64_OPND_SME_PdxN:
6785 reg_type = REG_TYPE_P;
6786 goto vector_reg_list;
6787
6788 case AARCH64_OPND_LVn:
6789 case AARCH64_OPND_LVt:
6790 case AARCH64_OPND_LVt_AL:
6791 case AARCH64_OPND_LEt:
6792 reg_type = REG_TYPE_V;
6793 vector_reg_list:
6794 if (reg_type == REG_TYPE_Z
6795 && get_opcode_dependent_value (opcode) == 1
6796 && *str != '{')
6797 {
6798 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6799 if (!reg)
6800 goto failure;
6801 info->reglist.first_regno = reg->number;
6802 info->reglist.num_regs = 1;
6803 info->reglist.stride = 1;
6804 }
6805 else
6806 {
6807 val = parse_vector_reg_list (&str, reg_type, &vectype);
6808 if (val == PARSE_FAIL)
6809 goto failure;
6810
6811 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6812 {
6813 set_fatal_syntax_error (_("invalid register list"));
6814 goto failure;
6815 }
6816
6817 if ((int) vectype.width > 0 && *str != ',')
6818 {
6819 set_fatal_syntax_error
6820 (_("expected element type rather than vector type"));
6821 goto failure;
6822 }
6823 }
6824 if (operands[i] == AARCH64_OPND_LEt)
6825 {
6826 if (!(vectype.defined & NTA_HASINDEX))
6827 goto failure;
6828 info->reglist.has_index = 1;
6829 info->reglist.index = vectype.index;
6830 }
6831 else
6832 {
6833 if (vectype.defined & NTA_HASINDEX)
6834 goto failure;
6835 if (!(vectype.defined & NTA_HASTYPE))
6836 {
6837 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6838 set_fatal_syntax_error (_("missing type suffix"));
6839 goto failure;
6840 }
6841 }
6842 info->qualifier = vectype_to_qualifier (&vectype);
6843 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6844 goto failure;
6845 break;
6846
6847 case AARCH64_OPND_CRn:
6848 case AARCH64_OPND_CRm:
6849 {
6850 char prefix = *(str++);
6851 if (prefix != 'c' && prefix != 'C')
6852 goto failure;
6853
6854 po_imm_nc_or_fail ();
6855 if (val > 15)
6856 {
6857 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6858 goto failure;
6859 }
6860 info->qualifier = AARCH64_OPND_QLF_CR;
6861 info->imm.value = val;
6862 break;
6863 }
6864
6865 case AARCH64_OPND_SHLL_IMM:
6866 case AARCH64_OPND_IMM_VLSR:
6867 po_imm_or_fail (1, 64);
6868 info->imm.value = val;
6869 break;
6870
6871 case AARCH64_OPND_CCMP_IMM:
6872 case AARCH64_OPND_SIMM5:
6873 case AARCH64_OPND_FBITS:
6874 case AARCH64_OPND_TME_UIMM16:
6875 case AARCH64_OPND_UIMM4:
6876 case AARCH64_OPND_UIMM4_ADDG:
6877 case AARCH64_OPND_UIMM10:
6878 case AARCH64_OPND_UIMM3_OP1:
6879 case AARCH64_OPND_UIMM3_OP2:
6880 case AARCH64_OPND_IMM_VLSL:
6881 case AARCH64_OPND_IMM:
6882 case AARCH64_OPND_IMM_2:
6883 case AARCH64_OPND_WIDTH:
6884 case AARCH64_OPND_SVE_INV_LIMM:
6885 case AARCH64_OPND_SVE_LIMM:
6886 case AARCH64_OPND_SVE_LIMM_MOV:
6887 case AARCH64_OPND_SVE_SHLIMM_PRED:
6888 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6889 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6890 case AARCH64_OPND_SVE_SHRIMM_PRED:
6891 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6892 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6893 case AARCH64_OPND_SVE_SIMM5:
6894 case AARCH64_OPND_SVE_SIMM5B:
6895 case AARCH64_OPND_SVE_SIMM6:
6896 case AARCH64_OPND_SVE_SIMM8:
6897 case AARCH64_OPND_SVE_UIMM3:
6898 case AARCH64_OPND_SVE_UIMM7:
6899 case AARCH64_OPND_SVE_UIMM8:
6900 case AARCH64_OPND_SVE_UIMM8_53:
6901 case AARCH64_OPND_IMM_ROT1:
6902 case AARCH64_OPND_IMM_ROT2:
6903 case AARCH64_OPND_IMM_ROT3:
6904 case AARCH64_OPND_SVE_IMM_ROT1:
6905 case AARCH64_OPND_SVE_IMM_ROT2:
6906 case AARCH64_OPND_SVE_IMM_ROT3:
6907 case AARCH64_OPND_CSSC_SIMM8:
6908 case AARCH64_OPND_CSSC_UIMM8:
6909 po_imm_nc_or_fail ();
6910 info->imm.value = val;
6911 break;
6912
6913 case AARCH64_OPND_SVE_AIMM:
6914 case AARCH64_OPND_SVE_ASIMM:
6915 po_imm_nc_or_fail ();
6916 info->imm.value = val;
6917 skip_whitespace (str);
6918 if (skip_past_comma (&str))
6919 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6920 else
6921 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6922 break;
6923
6924 case AARCH64_OPND_SVE_PATTERN:
6925 po_enum_or_fail (aarch64_sve_pattern_array);
6926 info->imm.value = val;
6927 break;
6928
6929 case AARCH64_OPND_SVE_PATTERN_SCALED:
6930 po_enum_or_fail (aarch64_sve_pattern_array);
6931 info->imm.value = val;
6932 if (skip_past_comma (&str)
6933 && !parse_shift (&str, info, SHIFTED_MUL))
6934 goto failure;
6935 if (!info->shifter.operator_present)
6936 {
6937 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6938 info->shifter.kind = AARCH64_MOD_MUL;
6939 info->shifter.amount = 1;
6940 }
6941 break;
6942
6943 case AARCH64_OPND_SVE_PRFOP:
6944 po_enum_or_fail (aarch64_sve_prfop_array);
6945 info->imm.value = val;
6946 break;
6947
6948 case AARCH64_OPND_UIMM7:
6949 po_imm_or_fail (0, 127);
6950 info->imm.value = val;
6951 break;
6952
6953 case AARCH64_OPND_IDX:
6954 case AARCH64_OPND_MASK:
6955 case AARCH64_OPND_BIT_NUM:
6956 case AARCH64_OPND_IMMR:
6957 case AARCH64_OPND_IMMS:
6958 po_imm_or_fail (0, 63);
6959 info->imm.value = val;
6960 break;
6961
6962 case AARCH64_OPND_IMM0:
6963 po_imm_nc_or_fail ();
6964 if (val != 0)
6965 {
6966 set_fatal_syntax_error (_("immediate zero expected"));
6967 goto failure;
6968 }
6969 info->imm.value = 0;
6970 break;
6971
6972 case AARCH64_OPND_FPIMM0:
6973 {
6974 int qfloat;
6975 bool res1 = false, res2 = false;
6976 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6977 it is probably not worth the effort to support it. */
6978 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6979 imm_reg_type))
6980 && (error_p ()
6981 || !(res2 = parse_constant_immediate (&str, &val,
6982 imm_reg_type))))
6983 goto failure;
6984 if ((res1 && qfloat == 0) || (res2 && val == 0))
6985 {
6986 info->imm.value = 0;
6987 info->imm.is_fp = 1;
6988 break;
6989 }
6990 set_fatal_syntax_error (_("immediate zero expected"));
6991 goto failure;
6992 }
6993
6994 case AARCH64_OPND_IMM_MOV:
6995 {
6996 char *saved = str;
6997 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
6998 || reg_name_p (str, REG_TYPE_V))
6999 goto failure;
7000 str = saved;
7001 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
7002 GE_OPT_PREFIX, REJECT_ABSENT));
7003 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
7004 later. fix_mov_imm_insn will try to determine a machine
7005 instruction (MOVZ, MOVN or ORR) for it and will issue an error
7006 message if the immediate cannot be moved by a single
7007 instruction. */
7008 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7009 inst.base.operands[i].skip = 1;
7010 }
7011 break;
7012
7013 case AARCH64_OPND_SIMD_IMM:
7014 case AARCH64_OPND_SIMD_IMM_SFT:
7015 if (! parse_big_immediate (&str, &val, imm_reg_type))
7016 goto failure;
7017 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7018 /* addr_off_p */ 0,
7019 /* need_libopcodes_p */ 1,
7020 /* skip_p */ 1);
7021 /* Parse shift.
7022 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
7023 shift, we don't check it here; we leave the checking to
7024 the libopcodes (operand_general_constraint_met_p). By
7025 doing this, we achieve better diagnostics. */
7026 if (skip_past_comma (&str)
7027 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
7028 goto failure;
7029 if (!info->shifter.operator_present
7030 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7031 {
7032 /* Default to LSL if not present. Libopcodes prefers shifter
7033 kind to be explicit. */
7034 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7035 info->shifter.kind = AARCH64_MOD_LSL;
7036 }
7037 break;
7038
7039 case AARCH64_OPND_FPIMM:
7040 case AARCH64_OPND_SIMD_FPIMM:
7041 case AARCH64_OPND_SVE_FPIMM8:
7042 {
7043 int qfloat;
7044 bool dp_p;
7045
7046 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7047 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7048 || !aarch64_imm_float_p (qfloat))
7049 {
7050 if (!error_p ())
7051 set_fatal_syntax_error (_("invalid floating-point"
7052 " constant"));
7053 goto failure;
7054 }
7055 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7056 inst.base.operands[i].imm.is_fp = 1;
7057 }
7058 break;
7059
7060 case AARCH64_OPND_SVE_I1_HALF_ONE:
7061 case AARCH64_OPND_SVE_I1_HALF_TWO:
7062 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7063 {
7064 int qfloat;
7065 bool dp_p;
7066
7067 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7068 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7069 {
7070 if (!error_p ())
7071 set_fatal_syntax_error (_("invalid floating-point"
7072 " constant"));
7073 goto failure;
7074 }
7075 inst.base.operands[i].imm.value = qfloat;
7076 inst.base.operands[i].imm.is_fp = 1;
7077 }
7078 break;
7079
7080 case AARCH64_OPND_LIMM:
7081 po_misc_or_fail (parse_shifter_operand (&str, info,
7082 SHIFTED_LOGIC_IMM));
7083 if (info->shifter.operator_present)
7084 {
7085 set_fatal_syntax_error
7086 (_("shift not allowed for bitmask immediate"));
7087 goto failure;
7088 }
7089 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7090 /* addr_off_p */ 0,
7091 /* need_libopcodes_p */ 1,
7092 /* skip_p */ 1);
7093 break;
7094
7095 case AARCH64_OPND_AIMM:
7096 if (opcode->op == OP_ADD)
7097 /* ADD may have relocation types. */
7098 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7099 SHIFTED_ARITH_IMM));
7100 else
7101 po_misc_or_fail (parse_shifter_operand (&str, info,
7102 SHIFTED_ARITH_IMM));
7103 switch (inst.reloc.type)
7104 {
7105 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7106 info->shifter.amount = 12;
7107 break;
7108 case BFD_RELOC_UNUSED:
7109 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7110 if (info->shifter.kind != AARCH64_MOD_NONE)
7111 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7112 inst.reloc.pc_rel = 0;
7113 break;
7114 default:
7115 break;
7116 }
7117 info->imm.value = 0;
7118 if (!info->shifter.operator_present)
7119 {
7120 /* Default to LSL if not present. Libopcodes prefers shifter
7121 kind to be explicit. */
7122 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7123 info->shifter.kind = AARCH64_MOD_LSL;
7124 }
7125 break;
7126
7127 case AARCH64_OPND_HALF:
7128 {
7129 /* #<imm16> or relocation. */
7130 int internal_fixup_p;
7131 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7132 if (internal_fixup_p)
7133 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7134 skip_whitespace (str);
7135 if (skip_past_comma (&str))
7136 {
7137 /* {, LSL #<shift>} */
7138 if (! aarch64_gas_internal_fixup_p ())
7139 {
7140 set_fatal_syntax_error (_("can't mix relocation modifier "
7141 "with explicit shift"));
7142 goto failure;
7143 }
7144 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7145 }
7146 else
7147 inst.base.operands[i].shifter.amount = 0;
7148 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7149 inst.base.operands[i].imm.value = 0;
7150 if (! process_movw_reloc_info ())
7151 goto failure;
7152 }
7153 break;
7154
7155 case AARCH64_OPND_EXCEPTION:
7156 case AARCH64_OPND_UNDEFINED:
7157 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7158 imm_reg_type));
7159 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7160 /* addr_off_p */ 0,
7161 /* need_libopcodes_p */ 0,
7162 /* skip_p */ 1);
7163 break;
7164
7165 case AARCH64_OPND_NZCV:
7166 {
7167 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7168 if (nzcv != NULL)
7169 {
7170 str += 4;
7171 info->imm.value = nzcv->value;
7172 break;
7173 }
7174 po_imm_or_fail (0, 15);
7175 info->imm.value = val;
7176 }
7177 break;
7178
7179 case AARCH64_OPND_COND:
7180 case AARCH64_OPND_COND1:
7181 {
7182 char *start = str;
7183 do
7184 str++;
7185 while (ISALPHA (*str));
7186 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7187 if (info->cond == NULL)
7188 {
7189 set_syntax_error (_("invalid condition"));
7190 goto failure;
7191 }
7192 else if (operands[i] == AARCH64_OPND_COND1
7193 && (info->cond->value & 0xe) == 0xe)
7194 {
7195 /* Do not allow AL or NV. */
7196 set_default_error ();
7197 goto failure;
7198 }
7199 }
7200 break;
7201
7202 case AARCH64_OPND_ADDR_ADRP:
7203 po_misc_or_fail (parse_adrp (&str));
7204 /* Clear the value as operand needs to be relocated. */
7205 info->imm.value = 0;
7206 break;
7207
7208 case AARCH64_OPND_ADDR_PCREL14:
7209 case AARCH64_OPND_ADDR_PCREL19:
7210 case AARCH64_OPND_ADDR_PCREL21:
7211 case AARCH64_OPND_ADDR_PCREL26:
7212 po_misc_or_fail (parse_address (&str, info));
7213 if (!info->addr.pcrel)
7214 {
7215 set_syntax_error (_("invalid pc-relative address"));
7216 goto failure;
7217 }
7218 if (inst.gen_lit_pool
7219 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7220 {
7221 /* Only permit "=value" in the literal load instructions.
7222 The literal will be generated by programmer_friendly_fixup. */
7223 set_syntax_error (_("invalid use of \"=immediate\""));
7224 goto failure;
7225 }
7226 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7227 {
7228 set_syntax_error (_("unrecognized relocation suffix"));
7229 goto failure;
7230 }
7231 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7232 {
7233 info->imm.value = inst.reloc.exp.X_add_number;
7234 inst.reloc.type = BFD_RELOC_UNUSED;
7235 }
7236 else
7237 {
7238 info->imm.value = 0;
7239 if (inst.reloc.type == BFD_RELOC_UNUSED)
7240 switch (opcode->iclass)
7241 {
7242 case compbranch:
7243 case condbranch:
7244 /* e.g. CBZ or B.COND */
7245 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7246 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7247 break;
7248 case testbranch:
7249 /* e.g. TBZ */
7250 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7251 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7252 break;
7253 case branch_imm:
7254 /* e.g. B or BL */
7255 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7256 inst.reloc.type =
7257 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7258 : BFD_RELOC_AARCH64_JUMP26;
7259 break;
7260 case loadlit:
7261 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7262 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7263 break;
7264 case pcreladdr:
7265 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7266 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7267 break;
7268 default:
7269 gas_assert (0);
7270 abort ();
7271 }
7272 inst.reloc.pc_rel = 1;
7273 }
7274 break;
7275
7276 case AARCH64_OPND_ADDR_SIMPLE:
7277 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7278 {
7279 /* [<Xn|SP>{, #<simm>}] */
7280 char *start = str;
7281 /* First use the normal address-parsing routines, to get
7282 the usual syntax errors. */
7283 po_misc_or_fail (parse_address (&str, info));
7284 if (info->addr.pcrel || info->addr.offset.is_reg
7285 || !info->addr.preind || info->addr.postind
7286 || info->addr.writeback)
7287 {
7288 set_syntax_error (_("invalid addressing mode"));
7289 goto failure;
7290 }
7291
7292 /* Then retry, matching the specific syntax of these addresses. */
7293 str = start;
7294 po_char_or_fail ('[');
7295 po_reg_or_fail (REG_TYPE_R64_SP);
7296 /* Accept optional ", #0". */
7297 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7298 && skip_past_char (&str, ','))
7299 {
7300 skip_past_char (&str, '#');
7301 if (! skip_past_char (&str, '0'))
7302 {
7303 set_fatal_syntax_error
7304 (_("the optional immediate offset can only be 0"));
7305 goto failure;
7306 }
7307 }
7308 po_char_or_fail (']');
7309 break;
7310 }
7311
7312 case AARCH64_OPND_ADDR_REGOFF:
7313 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7314 po_misc_or_fail (parse_address (&str, info));
7315 regoff_addr:
7316 if (info->addr.pcrel || !info->addr.offset.is_reg
7317 || !info->addr.preind || info->addr.postind
7318 || info->addr.writeback)
7319 {
7320 set_syntax_error (_("invalid addressing mode"));
7321 goto failure;
7322 }
7323 if (!info->shifter.operator_present)
7324 {
7325 /* Default to LSL if not present. Libopcodes prefers shifter
7326 kind to be explicit. */
7327 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7328 info->shifter.kind = AARCH64_MOD_LSL;
7329 }
7330 /* Qualifier to be deduced by libopcodes. */
7331 break;
7332
7333 case AARCH64_OPND_ADDR_SIMM7:
7334 po_misc_or_fail (parse_address (&str, info));
7335 if (info->addr.pcrel || info->addr.offset.is_reg
7336 || (!info->addr.preind && !info->addr.postind))
7337 {
7338 set_syntax_error (_("invalid addressing mode"));
7339 goto failure;
7340 }
7341 if (inst.reloc.type != BFD_RELOC_UNUSED)
7342 {
7343 set_syntax_error (_("relocation not allowed"));
7344 goto failure;
7345 }
7346 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7347 /* addr_off_p */ 1,
7348 /* need_libopcodes_p */ 1,
7349 /* skip_p */ 0);
7350 break;
7351
7352 case AARCH64_OPND_ADDR_SIMM9:
7353 case AARCH64_OPND_ADDR_SIMM9_2:
7354 case AARCH64_OPND_ADDR_SIMM11:
7355 case AARCH64_OPND_ADDR_SIMM13:
7356 po_misc_or_fail (parse_address (&str, info));
7357 if (info->addr.pcrel || info->addr.offset.is_reg
7358 || (!info->addr.preind && !info->addr.postind)
7359 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7360 && info->addr.writeback))
7361 {
7362 set_syntax_error (_("invalid addressing mode"));
7363 goto failure;
7364 }
7365 if (inst.reloc.type != BFD_RELOC_UNUSED)
7366 {
7367 set_syntax_error (_("relocation not allowed"));
7368 goto failure;
7369 }
7370 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7371 /* addr_off_p */ 1,
7372 /* need_libopcodes_p */ 1,
7373 /* skip_p */ 0);
7374 break;
7375
7376 case AARCH64_OPND_ADDR_SIMM10:
7377 case AARCH64_OPND_ADDR_OFFSET:
7378 po_misc_or_fail (parse_address (&str, info));
7379 if (info->addr.pcrel || info->addr.offset.is_reg
7380 || !info->addr.preind || info->addr.postind)
7381 {
7382 set_syntax_error (_("invalid addressing mode"));
7383 goto failure;
7384 }
7385 if (inst.reloc.type != BFD_RELOC_UNUSED)
7386 {
7387 set_syntax_error (_("relocation not allowed"));
7388 goto failure;
7389 }
7390 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7391 /* addr_off_p */ 1,
7392 /* need_libopcodes_p */ 1,
7393 /* skip_p */ 0);
7394 break;
7395
7396 case AARCH64_OPND_ADDR_UIMM12:
7397 po_misc_or_fail (parse_address (&str, info));
7398 if (info->addr.pcrel || info->addr.offset.is_reg
7399 || !info->addr.preind || info->addr.writeback)
7400 {
7401 set_syntax_error (_("invalid addressing mode"));
7402 goto failure;
7403 }
7404 if (inst.reloc.type == BFD_RELOC_UNUSED)
7405 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7406 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7407 || (inst.reloc.type
7408 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7409 || (inst.reloc.type
7410 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7411 || (inst.reloc.type
7412 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7413 || (inst.reloc.type
7414 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7415 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7416 /* Leave qualifier to be determined by libopcodes. */
7417 break;
7418
7419 case AARCH64_OPND_SIMD_ADDR_POST:
7420 /* [<Xn|SP>], <Xm|#<amount>> */
7421 po_misc_or_fail (parse_address (&str, info));
7422 if (!info->addr.postind || !info->addr.writeback)
7423 {
7424 set_syntax_error (_("invalid addressing mode"));
7425 goto failure;
7426 }
7427 if (!info->addr.offset.is_reg)
7428 {
7429 if (inst.reloc.exp.X_op == O_constant)
7430 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7431 else
7432 {
7433 set_fatal_syntax_error
7434 (_("writeback value must be an immediate constant"));
7435 goto failure;
7436 }
7437 }
7438 /* No qualifier. */
7439 break;
7440
7441 case AARCH64_OPND_SME_SM_ZA:
7442 /* { SM | ZA } */
7443 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7444 {
7445 set_syntax_error (_("unknown or missing PSTATE field name"));
7446 goto failure;
7447 }
7448 info->reg.regno = val;
7449 break;
7450
7451 case AARCH64_OPND_SME_PnT_Wm_imm:
7452 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7453 &info->indexed_za, &qualifier, 0))
7454 goto failure;
7455 info->qualifier = qualifier;
7456 break;
7457
7458 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7459 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7460 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7461 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7462 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7463 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7464 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7465 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7466 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7467 case AARCH64_OPND_SVE_ADDR_RI_U6:
7468 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7469 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7470 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7471 /* [X<n>{, #imm, MUL VL}]
7472 [X<n>{, #imm}]
7473 but recognizing SVE registers. */
7474 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7475 &offset_qualifier));
7476 if (base_qualifier != AARCH64_OPND_QLF_X)
7477 {
7478 set_syntax_error (_("invalid addressing mode"));
7479 goto failure;
7480 }
7481 sve_regimm:
7482 if (info->addr.pcrel || info->addr.offset.is_reg
7483 || !info->addr.preind || info->addr.writeback)
7484 {
7485 set_syntax_error (_("invalid addressing mode"));
7486 goto failure;
7487 }
7488 if (inst.reloc.type != BFD_RELOC_UNUSED
7489 || inst.reloc.exp.X_op != O_constant)
7490 {
7491 /* Make sure this has priority over
7492 "invalid addressing mode". */
7493 set_fatal_syntax_error (_("constant offset required"));
7494 goto failure;
7495 }
7496 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7497 break;
7498
7499 case AARCH64_OPND_SVE_ADDR_R:
7500 /* [<Xn|SP>{, <R><m>}]
7501 but recognizing SVE registers. */
7502 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7503 &offset_qualifier));
7504 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7505 {
7506 offset_qualifier = AARCH64_OPND_QLF_X;
7507 info->addr.offset.is_reg = 1;
7508 info->addr.offset.regno = 31;
7509 }
7510 else if (base_qualifier != AARCH64_OPND_QLF_X
7511 || offset_qualifier != AARCH64_OPND_QLF_X)
7512 {
7513 set_syntax_error (_("invalid addressing mode"));
7514 goto failure;
7515 }
7516 goto regoff_addr;
7517
7518 case AARCH64_OPND_SVE_ADDR_RR:
7519 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7520 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7521 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7522 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7523 case AARCH64_OPND_SVE_ADDR_RX:
7524 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7525 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7526 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7527 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7528 but recognizing SVE registers. */
7529 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7530 &offset_qualifier));
7531 if (base_qualifier != AARCH64_OPND_QLF_X
7532 || offset_qualifier != AARCH64_OPND_QLF_X)
7533 {
7534 set_syntax_error (_("invalid addressing mode"));
7535 goto failure;
7536 }
7537 goto regoff_addr;
7538
7539 case AARCH64_OPND_SVE_ADDR_RZ:
7540 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7541 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7542 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7543 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7544 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7545 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7546 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7547 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7548 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7549 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7550 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7551 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7552 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7553 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7554 &offset_qualifier));
7555 if (base_qualifier != AARCH64_OPND_QLF_X
7556 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7557 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7558 {
7559 set_syntax_error (_("invalid addressing mode"));
7560 goto failure;
7561 }
7562 info->qualifier = offset_qualifier;
7563 goto regoff_addr;
7564
7565 case AARCH64_OPND_SVE_ADDR_ZX:
7566 /* [Zn.<T>{, <Xm>}]. */
7567 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7568 &offset_qualifier));
7569 /* Things to check:
7570 base_qualifier either S_S or S_D
7571 offset_qualifier must be X
7572 */
7573 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7574 && base_qualifier != AARCH64_OPND_QLF_S_D)
7575 || offset_qualifier != AARCH64_OPND_QLF_X)
7576 {
7577 set_syntax_error (_("invalid addressing mode"));
7578 goto failure;
7579 }
7580 info->qualifier = base_qualifier;
7581 if (!info->addr.offset.is_reg || info->addr.pcrel
7582 || !info->addr.preind || info->addr.writeback
7583 || info->shifter.operator_present != 0)
7584 {
7585 set_syntax_error (_("invalid addressing mode"));
7586 goto failure;
7587 }
7588 info->shifter.kind = AARCH64_MOD_LSL;
7589 break;
7590
7591
7592 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7593 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7594 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7595 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7596 /* [Z<n>.<T>{, #imm}] */
7597 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7598 &offset_qualifier));
7599 if (base_qualifier != AARCH64_OPND_QLF_S_S
7600 && base_qualifier != AARCH64_OPND_QLF_S_D)
7601 {
7602 set_syntax_error (_("invalid addressing mode"));
7603 goto failure;
7604 }
7605 info->qualifier = base_qualifier;
7606 goto sve_regimm;
7607
7608 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7609 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7610 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7611 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7612 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7613
7614 We don't reject:
7615
7616 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7617
7618 here since we get better error messages by leaving it to
7619 the qualifier checking routines. */
7620 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7621 &offset_qualifier));
7622 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7623 && base_qualifier != AARCH64_OPND_QLF_S_D)
7624 || offset_qualifier != base_qualifier)
7625 {
7626 set_syntax_error (_("invalid addressing mode"));
7627 goto failure;
7628 }
7629 info->qualifier = base_qualifier;
7630 goto regoff_addr;
7631
7632 case AARCH64_OPND_SYSREG:
7633 {
7634 uint32_t sysreg_flags;
7635 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7636 &sysreg_flags)) == PARSE_FAIL)
7637 {
7638 set_syntax_error (_("unknown or missing system register name"));
7639 goto failure;
7640 }
7641 inst.base.operands[i].sysreg.value = val;
7642 inst.base.operands[i].sysreg.flags = sysreg_flags;
7643 break;
7644 }
7645
7646 case AARCH64_OPND_PSTATEFIELD:
7647 {
7648 uint32_t sysreg_flags;
7649 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7650 &sysreg_flags)) == PARSE_FAIL)
7651 {
7652 set_syntax_error (_("unknown or missing PSTATE field name"));
7653 goto failure;
7654 }
7655 inst.base.operands[i].pstatefield = val;
7656 inst.base.operands[i].sysreg.flags = sysreg_flags;
7657 break;
7658 }
7659
7660 case AARCH64_OPND_SYSREG_IC:
7661 inst.base.operands[i].sysins_op =
7662 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7663 goto sys_reg_ins;
7664
7665 case AARCH64_OPND_SYSREG_DC:
7666 inst.base.operands[i].sysins_op =
7667 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7668 goto sys_reg_ins;
7669
7670 case AARCH64_OPND_SYSREG_AT:
7671 inst.base.operands[i].sysins_op =
7672 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7673 goto sys_reg_ins;
7674
7675 case AARCH64_OPND_SYSREG_SR:
7676 inst.base.operands[i].sysins_op =
7677 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7678 goto sys_reg_ins;
7679
7680 case AARCH64_OPND_SYSREG_TLBI:
7681 inst.base.operands[i].sysins_op =
7682 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7683 sys_reg_ins:
7684 if (inst.base.operands[i].sysins_op == NULL)
7685 {
7686 set_fatal_syntax_error ( _("unknown or missing operation name"));
7687 goto failure;
7688 }
7689 break;
7690
7691 case AARCH64_OPND_BARRIER:
7692 case AARCH64_OPND_BARRIER_ISB:
7693 val = parse_barrier (&str);
7694 if (val != PARSE_FAIL
7695 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7696 {
7697 /* ISB only accepts options name 'sy'. */
7698 set_syntax_error
7699 (_("the specified option is not accepted in ISB"));
7700 /* Turn off backtrack as this optional operand is present. */
7701 backtrack_pos = 0;
7702 goto failure;
7703 }
7704 if (val != PARSE_FAIL
7705 && operands[i] == AARCH64_OPND_BARRIER)
7706 {
7707 /* Regular barriers accept options CRm (C0-C15).
7708 DSB nXS barrier variant accepts values > 15. */
7709 if (val < 0 || val > 15)
7710 {
7711 set_syntax_error (_("the specified option is not accepted in DSB"));
7712 goto failure;
7713 }
7714 }
7715 /* This is an extension to accept a 0..15 immediate. */
7716 if (val == PARSE_FAIL)
7717 po_imm_or_fail (0, 15);
7718 info->barrier = aarch64_barrier_options + val;
7719 break;
7720
7721 case AARCH64_OPND_BARRIER_DSB_NXS:
7722 val = parse_barrier (&str);
7723 if (val != PARSE_FAIL)
7724 {
7725 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7726 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7727 {
7728 set_syntax_error (_("the specified option is not accepted in DSB"));
7729 /* Turn off backtrack as this optional operand is present. */
7730 backtrack_pos = 0;
7731 goto failure;
7732 }
7733 }
7734 else
7735 {
7736 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7737 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7738 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7739 goto failure;
7740 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7741 {
7742 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7743 goto failure;
7744 }
7745 }
7746 /* Option index is encoded as 2-bit value in val<3:2>. */
7747 val = (val >> 2) - 4;
7748 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7749 break;
7750
7751 case AARCH64_OPND_PRFOP:
7752 val = parse_pldop (&str);
7753 /* This is an extension to accept a 0..31 immediate. */
7754 if (val == PARSE_FAIL)
7755 po_imm_or_fail (0, 31);
7756 inst.base.operands[i].prfop = aarch64_prfops + val;
7757 break;
7758
7759 case AARCH64_OPND_BARRIER_PSB:
7760 val = parse_barrier_psb (&str, &(info->hint_option));
7761 if (val == PARSE_FAIL)
7762 goto failure;
7763 break;
7764
7765 case AARCH64_OPND_SME_ZT0:
7766 po_reg_or_fail (REG_TYPE_ZT0);
7767 break;
7768
7769 case AARCH64_OPND_SME_ZT0_INDEX:
7770 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7771 if (!reg || vectype.type != NT_invtype)
7772 goto failure;
7773 if (!(vectype.defined & NTA_HASINDEX))
7774 {
7775 set_syntax_error (_("missing register index"));
7776 goto failure;
7777 }
7778 info->imm.value = vectype.index;
7779 break;
7780
7781 case AARCH64_OPND_SME_ZT0_LIST:
7782 if (*str != '{')
7783 {
7784 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7785 goto failure;
7786 }
7787 str++;
7788 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7789 goto failure;
7790 if (*str != '}')
7791 {
7792 set_syntax_error (_("expected '}' after ZT0"));
7793 goto failure;
7794 }
7795 str++;
7796 break;
7797
7798 case AARCH64_OPND_SME_PNn3_INDEX1:
7799 case AARCH64_OPND_SME_PNn3_INDEX2:
7800 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7801 if (!reg)
7802 goto failure;
7803 if (!(vectype.defined & NTA_HASINDEX))
7804 {
7805 set_syntax_error (_("missing register index"));
7806 goto failure;
7807 }
7808 info->reglane.regno = reg->number;
7809 info->reglane.index = vectype.index;
7810 if (vectype.type == NT_invtype)
7811 info->qualifier = AARCH64_OPND_QLF_NIL;
7812 else
7813 info->qualifier = vectype_to_qualifier (&vectype);
7814 break;
7815
7816 case AARCH64_OPND_BTI_TARGET:
7817 val = parse_bti_operand (&str, &(info->hint_option));
7818 if (val == PARSE_FAIL)
7819 goto failure;
7820 break;
7821
7822 case AARCH64_OPND_SME_ZAda_2b:
7823 case AARCH64_OPND_SME_ZAda_3b:
7824 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7825 if (!reg)
7826 goto failure;
7827 info->reg.regno = reg->number;
7828 info->qualifier = qualifier;
7829 break;
7830
7831 case AARCH64_OPND_SME_ZA_HV_idx_src:
7832 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7833 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7834 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7835 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7836 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7837 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7838 &info->indexed_za,
7839 &qualifier)
7840 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7841 &info->indexed_za, &qualifier, 0))
7842 goto failure;
7843 info->qualifier = qualifier;
7844 break;
7845
7846 case AARCH64_OPND_SME_list_of_64bit_tiles:
7847 val = parse_sme_list_of_64bit_tiles (&str);
7848 if (val == PARSE_FAIL)
7849 goto failure;
7850 info->imm.value = val;
7851 break;
7852
7853 case AARCH64_OPND_SME_ZA_array_off3_0:
7854 case AARCH64_OPND_SME_ZA_array_off3_5:
7855 case AARCH64_OPND_SME_ZA_array_off4:
7856 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7857 &info->indexed_za, &qualifier, 0))
7858 goto failure;
7859 info->qualifier = qualifier;
7860 break;
7861
7862 case AARCH64_OPND_SME_VLxN_10:
7863 case AARCH64_OPND_SME_VLxN_13:
7864 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7865 info->imm.value = val;
7866 break;
7867
7868 case AARCH64_OPND_MOPS_ADDR_Rd:
7869 case AARCH64_OPND_MOPS_ADDR_Rs:
7870 po_char_or_fail ('[');
7871 if (!parse_x0_to_x30 (&str, info))
7872 goto failure;
7873 po_char_or_fail (']');
7874 po_char_or_fail ('!');
7875 break;
7876
7877 case AARCH64_OPND_MOPS_WB_Rn:
7878 if (!parse_x0_to_x30 (&str, info))
7879 goto failure;
7880 po_char_or_fail ('!');
7881 break;
7882
7883 default:
7884 as_fatal (_("unhandled operand code %d"), operands[i]);
7885 }
7886
7887 /* If we get here, this operand was successfully parsed. */
7888 inst.base.operands[i].present = 1;
7889 continue;
7890
7891 failure:
7892 /* The parse routine should already have set the error, but in case
7893 not, set a default one here. */
7894 if (! error_p ())
7895 set_default_error ();
7896
7897 if (! backtrack_pos)
7898 goto parse_operands_return;
7899
7900 {
7901 /* We reach here because this operand is marked as optional, and
7902 either no operand was supplied or the operand was supplied but it
7903 was syntactically incorrect. In the latter case we report an
7904 error. In the former case we perform a few more checks before
7905 dropping through to the code to insert the default operand. */
7906
7907 char *tmp = backtrack_pos;
7908 char endchar = END_OF_INSN;
7909
7910 if (i != (aarch64_num_of_operands (opcode) - 1))
7911 endchar = ',';
7912 skip_past_char (&tmp, ',');
7913
7914 if (*tmp != endchar)
7915 /* The user has supplied an operand in the wrong format. */
7916 goto parse_operands_return;
7917
7918 /* Make sure there is not a comma before the optional operand.
7919 For example the fifth operand of 'sys' is optional:
7920
7921 sys #0,c0,c0,#0, <--- wrong
7922 sys #0,c0,c0,#0 <--- correct. */
7923 if (comma_skipped_p && i && endchar == END_OF_INSN)
7924 {
7925 set_fatal_syntax_error
7926 (_("unexpected comma before the omitted optional operand"));
7927 goto parse_operands_return;
7928 }
7929 }
7930
7931 /* Reaching here means we are dealing with an optional operand that is
7932 omitted from the assembly line. */
7933 gas_assert (optional_operand_p (opcode, i));
7934 info->present = 0;
7935 process_omitted_operand (operands[i], opcode, i, info);
7936
7937 /* Try again, skipping the optional operand at backtrack_pos. */
7938 str = backtrack_pos;
7939 backtrack_pos = 0;
7940
7941 /* Clear any error record after the omitted optional operand has been
7942 successfully handled. */
7943 clear_error ();
7944 }
7945
7946 /* Check if we have parsed all the operands. */
7947 if (*str != '\0' && ! error_p ())
7948 {
7949 /* Set I to the index of the last present operand; this is
7950 for the purpose of diagnostics. */
7951 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7952 ;
7953 set_fatal_syntax_error
7954 (_("unexpected characters following instruction"));
7955 }
7956
7957 parse_operands_return:
7958
7959 if (error_p ())
7960 {
7961 inst.parsing_error.index = i;
7962 DEBUG_TRACE ("parsing FAIL: %s - %s",
7963 operand_mismatch_kind_names[inst.parsing_error.kind],
7964 inst.parsing_error.error);
7965 /* Record the operand error properly; this is useful when there
7966 are multiple instruction templates for a mnemonic name, so that
7967 later on, we can select the error that most closely describes
7968 the problem. */
7969 record_operand_error_info (opcode, &inst.parsing_error);
7970 return false;
7971 }
7972 else
7973 {
7974 DEBUG_TRACE ("parsing SUCCESS");
7975 return true;
7976 }
7977 }
7978
7979 /* It does some fix-up to provide some programmer friendly feature while
7980 keeping the libopcodes happy, i.e. libopcodes only accepts
7981 the preferred architectural syntax.
7982 Return FALSE if there is any failure; otherwise return TRUE. */
7983
7984 static bool
7985 programmer_friendly_fixup (aarch64_instruction *instr)
7986 {
7987 aarch64_inst *base = &instr->base;
7988 const aarch64_opcode *opcode = base->opcode;
7989 enum aarch64_op op = opcode->op;
7990 aarch64_opnd_info *operands = base->operands;
7991
7992 DEBUG_TRACE ("enter");
7993
7994 switch (opcode->iclass)
7995 {
7996 case testbranch:
7997 /* TBNZ Xn|Wn, #uimm6, label
7998 Test and Branch Not Zero: conditionally jumps to label if bit number
7999 uimm6 in register Xn is not zero. The bit number implies the width of
8000 the register, which may be written and should be disassembled as Wn if
8001 uimm is less than 32. */
8002 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
8003 {
8004 if (operands[1].imm.value >= 32)
8005 {
8006 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
8007 0, 31);
8008 return false;
8009 }
8010 operands[0].qualifier = AARCH64_OPND_QLF_X;
8011 }
8012 break;
8013 case loadlit:
8014 /* LDR Wt, label | =value
8015 As a convenience assemblers will typically permit the notation
8016 "=value" in conjunction with the pc-relative literal load instructions
8017 to automatically place an immediate value or symbolic address in a
8018 nearby literal pool and generate a hidden label which references it.
8019 ISREG has been set to 0 in the case of =value. */
8020 if (instr->gen_lit_pool
8021 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8022 {
8023 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8024 if (op == OP_LDRSW_LIT)
8025 size = 4;
8026 if (instr->reloc.exp.X_op != O_constant
8027 && instr->reloc.exp.X_op != O_big
8028 && instr->reloc.exp.X_op != O_symbol)
8029 {
8030 record_operand_error (opcode, 1,
8031 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8032 _("constant expression expected"));
8033 return false;
8034 }
8035 if (! add_to_lit_pool (&instr->reloc.exp, size))
8036 {
8037 record_operand_error (opcode, 1,
8038 AARCH64_OPDE_OTHER_ERROR,
8039 _("literal pool insertion failed"));
8040 return false;
8041 }
8042 }
8043 break;
8044 case log_shift:
8045 case bitfield:
8046 /* UXT[BHW] Wd, Wn
8047 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8048 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8049 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8050 A programmer-friendly assembler should accept a destination Xd in
8051 place of Wd, however that is not the preferred form for disassembly.
8052 */
8053 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8054 && operands[1].qualifier == AARCH64_OPND_QLF_W
8055 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8056 operands[0].qualifier = AARCH64_OPND_QLF_W;
8057 break;
8058
8059 case addsub_ext:
8060 {
8061 /* In the 64-bit form, the final register operand is written as Wm
8062 for all but the (possibly omitted) UXTX/LSL and SXTX
8063 operators.
8064 As a programmer-friendly assembler, we accept e.g.
8065 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8066 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8067 int idx = aarch64_operand_index (opcode->operands,
8068 AARCH64_OPND_Rm_EXT);
8069 gas_assert (idx == 1 || idx == 2);
8070 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8071 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8072 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8073 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8074 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8075 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8076 }
8077 break;
8078
8079 default:
8080 break;
8081 }
8082
8083 DEBUG_TRACE ("exit with SUCCESS");
8084 return true;
8085 }
8086
8087 /* Check for loads and stores that will cause unpredictable behavior. */
8088
8089 static void
8090 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8091 {
8092 aarch64_inst *base = &instr->base;
8093 const aarch64_opcode *opcode = base->opcode;
8094 const aarch64_opnd_info *opnds = base->operands;
8095 switch (opcode->iclass)
8096 {
8097 case ldst_pos:
8098 case ldst_imm9:
8099 case ldst_imm10:
8100 case ldst_unscaled:
8101 case ldst_unpriv:
8102 /* Loading/storing the base register is unpredictable if writeback. */
8103 if ((aarch64_get_operand_class (opnds[0].type)
8104 == AARCH64_OPND_CLASS_INT_REG)
8105 && opnds[0].reg.regno == opnds[1].addr.base_regno
8106 && opnds[1].addr.base_regno != REG_SP
8107 /* Exempt STG/STZG/ST2G/STZ2G. */
8108 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8109 && opnds[1].addr.writeback)
8110 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8111 break;
8112
8113 case ldstpair_off:
8114 case ldstnapair_offs:
8115 case ldstpair_indexed:
8116 /* Loading/storing the base register is unpredictable if writeback. */
8117 if ((aarch64_get_operand_class (opnds[0].type)
8118 == AARCH64_OPND_CLASS_INT_REG)
8119 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8120 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8121 && opnds[2].addr.base_regno != REG_SP
8122 /* Exempt STGP. */
8123 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8124 && opnds[2].addr.writeback)
8125 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8126 /* Load operations must load different registers. */
8127 if ((opcode->opcode & (1 << 22))
8128 && opnds[0].reg.regno == opnds[1].reg.regno)
8129 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8130 break;
8131
8132 case ldstexcl:
8133 if ((aarch64_get_operand_class (opnds[0].type)
8134 == AARCH64_OPND_CLASS_INT_REG)
8135 && (aarch64_get_operand_class (opnds[1].type)
8136 == AARCH64_OPND_CLASS_INT_REG))
8137 {
8138 if ((opcode->opcode & (1 << 22)))
8139 {
8140 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8141 if ((opcode->opcode & (1 << 21))
8142 && opnds[0].reg.regno == opnds[1].reg.regno)
8143 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8144 }
8145 else
8146 {
8147 /* Store-Exclusive is unpredictable if Rt == Rs. */
8148 if (opnds[0].reg.regno == opnds[1].reg.regno)
8149 as_warn
8150 (_("unpredictable: identical transfer and status registers"
8151 " --`%s'"),str);
8152
8153 if (opnds[0].reg.regno == opnds[2].reg.regno)
8154 {
8155 if (!(opcode->opcode & (1 << 21)))
8156 /* Store-Exclusive is unpredictable if Rn == Rs. */
8157 as_warn
8158 (_("unpredictable: identical base and status registers"
8159 " --`%s'"),str);
8160 else
8161 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8162 as_warn
8163 (_("unpredictable: "
8164 "identical transfer and status registers"
8165 " --`%s'"),str);
8166 }
8167
8168 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8169 if ((opcode->opcode & (1 << 21))
8170 && opnds[0].reg.regno == opnds[3].reg.regno
8171 && opnds[3].reg.regno != REG_SP)
8172 as_warn (_("unpredictable: identical base and status registers"
8173 " --`%s'"),str);
8174 }
8175 }
8176 break;
8177
8178 default:
8179 break;
8180 }
8181 }
8182
8183 static void
8184 force_automatic_sequence_close (void)
8185 {
8186 struct aarch64_segment_info_type *tc_seg_info;
8187
8188 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8189 if (tc_seg_info->insn_sequence.instr)
8190 {
8191 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8192 _("previous `%s' sequence has not been closed"),
8193 tc_seg_info->insn_sequence.instr->opcode->name);
8194 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8195 }
8196 }
8197
8198 /* A wrapper function to interface with libopcodes on encoding and
8199 record the error message if there is any.
8200
8201 Return TRUE on success; otherwise return FALSE. */
8202
8203 static bool
8204 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8205 aarch64_insn *code)
8206 {
8207 aarch64_operand_error error_info;
8208 memset (&error_info, '\0', sizeof (error_info));
8209 error_info.kind = AARCH64_OPDE_NIL;
8210 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8211 && !error_info.non_fatal)
8212 return true;
8213
8214 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8215 record_operand_error_info (opcode, &error_info);
8216 return error_info.non_fatal;
8217 }
8218
8219 #ifdef DEBUG_AARCH64
8220 static inline void
8221 dump_opcode_operands (const aarch64_opcode *opcode)
8222 {
8223 int i = 0;
8224 while (opcode->operands[i] != AARCH64_OPND_NIL)
8225 {
8226 aarch64_verbose ("\t\t opnd%d: %s", i,
8227 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8228 ? aarch64_get_operand_name (opcode->operands[i])
8229 : aarch64_get_operand_desc (opcode->operands[i]));
8230 ++i;
8231 }
8232 }
8233 #endif /* DEBUG_AARCH64 */
8234
8235 /* This is the guts of the machine-dependent assembler. STR points to a
8236 machine dependent instruction. This function is supposed to emit
8237 the frags/bytes it assembles to. */
8238
8239 void
8240 md_assemble (char *str)
8241 {
8242 templates *template;
8243 const aarch64_opcode *opcode;
8244 struct aarch64_segment_info_type *tc_seg_info;
8245 aarch64_inst *inst_base;
8246 unsigned saved_cond;
8247
8248 /* Align the previous label if needed. */
8249 if (last_label_seen != NULL)
8250 {
8251 symbol_set_frag (last_label_seen, frag_now);
8252 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8253 S_SET_SEGMENT (last_label_seen, now_seg);
8254 }
8255
8256 /* Update the current insn_sequence from the segment. */
8257 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8258 insn_sequence = &tc_seg_info->insn_sequence;
8259 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8260
8261 inst.reloc.type = BFD_RELOC_UNUSED;
8262
8263 DEBUG_TRACE ("\n\n");
8264 DEBUG_TRACE ("==============================");
8265 DEBUG_TRACE ("Enter md_assemble with %s", str);
8266
8267 /* Scan up to the end of the mnemonic, which must end in whitespace,
8268 '.', or end of string. */
8269 char *p = str;
8270 char *dot = 0;
8271 for (; is_part_of_name (*p); p++)
8272 if (*p == '.' && !dot)
8273 dot = p;
8274
8275 if (p == str)
8276 {
8277 as_bad (_("unknown mnemonic -- `%s'"), str);
8278 return;
8279 }
8280
8281 if (!dot && create_register_alias (str, p))
8282 return;
8283
8284 template = opcode_lookup (str, dot, p);
8285 if (!template)
8286 {
8287 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8288 str);
8289 return;
8290 }
8291
8292 skip_whitespace (p);
8293 if (*p == ',')
8294 {
8295 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8296 get_mnemonic_name (str), str);
8297 return;
8298 }
8299
8300 init_operand_error_report ();
8301
8302 /* Sections are assumed to start aligned. In executable section, there is no
8303 MAP_DATA symbol pending. So we only align the address during
8304 MAP_DATA --> MAP_INSN transition.
8305 For other sections, this is not guaranteed. */
8306 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8307 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8308 frag_align_code (2, 0);
8309
8310 saved_cond = inst.cond;
8311 reset_aarch64_instruction (&inst);
8312 inst.cond = saved_cond;
8313
8314 /* Iterate through all opcode entries with the same mnemonic name. */
8315 do
8316 {
8317 opcode = template->opcode;
8318
8319 DEBUG_TRACE ("opcode %s found", opcode->name);
8320 #ifdef DEBUG_AARCH64
8321 if (debug_dump)
8322 dump_opcode_operands (opcode);
8323 #endif /* DEBUG_AARCH64 */
8324
8325 mapping_state (MAP_INSN);
8326
8327 inst_base = &inst.base;
8328 inst_base->opcode = opcode;
8329
8330 /* Truly conditionally executed instructions, e.g. b.cond. */
8331 if (opcode->flags & F_COND)
8332 {
8333 gas_assert (inst.cond != COND_ALWAYS);
8334 inst_base->cond = get_cond_from_value (inst.cond);
8335 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8336 }
8337 else if (inst.cond != COND_ALWAYS)
8338 {
8339 /* It shouldn't arrive here, where the assembly looks like a
8340 conditional instruction but the found opcode is unconditional. */
8341 gas_assert (0);
8342 continue;
8343 }
8344
8345 if (parse_operands (p, opcode)
8346 && programmer_friendly_fixup (&inst)
8347 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8348 {
8349 /* Check that this instruction is supported for this CPU. */
8350 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8351 {
8352 as_bad (_("selected processor does not support `%s'"), str);
8353 return;
8354 }
8355
8356 warn_unpredictable_ldst (&inst, str);
8357
8358 if (inst.reloc.type == BFD_RELOC_UNUSED
8359 || !inst.reloc.need_libopcodes_p)
8360 output_inst (NULL);
8361 else
8362 {
8363 /* If there is relocation generated for the instruction,
8364 store the instruction information for the future fix-up. */
8365 struct aarch64_inst *copy;
8366 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8367 copy = XNEW (struct aarch64_inst);
8368 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8369 output_inst (copy);
8370 }
8371
8372 /* Issue non-fatal messages if any. */
8373 output_operand_error_report (str, true);
8374 return;
8375 }
8376
8377 template = template->next;
8378 if (template != NULL)
8379 {
8380 reset_aarch64_instruction (&inst);
8381 inst.cond = saved_cond;
8382 }
8383 }
8384 while (template != NULL);
8385
8386 /* Issue the error messages if any. */
8387 output_operand_error_report (str, false);
8388 }
8389
8390 /* Various frobbings of labels and their addresses. */
8391
8392 void
8393 aarch64_start_line_hook (void)
8394 {
8395 last_label_seen = NULL;
8396 }
8397
8398 void
8399 aarch64_frob_label (symbolS * sym)
8400 {
8401 last_label_seen = sym;
8402
8403 dwarf2_emit_label (sym);
8404 }
8405
8406 void
8407 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8408 {
8409 /* Check to see if we have a block to close. */
8410 force_automatic_sequence_close ();
8411 }
8412
8413 int
8414 aarch64_data_in_code (void)
8415 {
8416 if (startswith (input_line_pointer + 1, "data:"))
8417 {
8418 *input_line_pointer = '/';
8419 input_line_pointer += 5;
8420 *input_line_pointer = 0;
8421 return 1;
8422 }
8423
8424 return 0;
8425 }
8426
8427 char *
8428 aarch64_canonicalize_symbol_name (char *name)
8429 {
8430 int len;
8431
8432 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8433 *(name + len - 5) = 0;
8434
8435 return name;
8436 }
8437 \f
8438 /* Table of all register names defined by default. The user can
8439 define additional names with .req. Note that all register names
8440 should appear in both upper and lowercase variants. Some registers
8441 also have mixed-case names. */
8442
8443 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8444 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8445 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8446 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8447 #define REGSET16(p,t) \
8448 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8449 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8450 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8451 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8452 #define REGSET16S(p,s,t) \
8453 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8454 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8455 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8456 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8457 #define REGSET31(p,t) \
8458 REGSET16(p, t), \
8459 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8460 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8461 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8462 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8463 #define REGSET(p,t) \
8464 REGSET31(p,t), REGNUM(p,31,t)
8465
8466 /* These go into aarch64_reg_hsh hash-table. */
8467 static const reg_entry reg_names[] = {
8468 /* Integer registers. */
8469 REGSET31 (x, R_64), REGSET31 (X, R_64),
8470 REGSET31 (w, R_32), REGSET31 (W, R_32),
8471
8472 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8473 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8474 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8475 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8476 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8477 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8478
8479 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8480 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8481
8482 /* Floating-point single precision registers. */
8483 REGSET (s, FP_S), REGSET (S, FP_S),
8484
8485 /* Floating-point double precision registers. */
8486 REGSET (d, FP_D), REGSET (D, FP_D),
8487
8488 /* Floating-point half precision registers. */
8489 REGSET (h, FP_H), REGSET (H, FP_H),
8490
8491 /* Floating-point byte precision registers. */
8492 REGSET (b, FP_B), REGSET (B, FP_B),
8493
8494 /* Floating-point quad precision registers. */
8495 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8496
8497 /* FP/SIMD registers. */
8498 REGSET (v, V), REGSET (V, V),
8499
8500 /* SVE vector registers. */
8501 REGSET (z, Z), REGSET (Z, Z),
8502
8503 /* SVE predicate(-as-mask) registers. */
8504 REGSET16 (p, P), REGSET16 (P, P),
8505
8506 /* SVE predicate-as-counter registers. */
8507 REGSET16 (pn, PN), REGSET16 (PN, PN),
8508
8509 /* SME ZA. We model this as a register because it acts syntactically
8510 like ZA0H, supporting qualifier suffixes and indexing. */
8511 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8512
8513 /* SME ZA tile registers. */
8514 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8515
8516 /* SME ZA tile registers (horizontal slice). */
8517 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8518
8519 /* SME ZA tile registers (vertical slice). */
8520 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8521
8522 /* SME2 ZT0. */
8523 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8524 };
8525
8526 #undef REGDEF
8527 #undef REGDEF_ALIAS
8528 #undef REGNUM
8529 #undef REGSET16
8530 #undef REGSET31
8531 #undef REGSET
8532
8533 #define N 1
8534 #define n 0
8535 #define Z 1
8536 #define z 0
8537 #define C 1
8538 #define c 0
8539 #define V 1
8540 #define v 0
8541 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8542 static const asm_nzcv nzcv_names[] = {
8543 {"nzcv", B (n, z, c, v)},
8544 {"nzcV", B (n, z, c, V)},
8545 {"nzCv", B (n, z, C, v)},
8546 {"nzCV", B (n, z, C, V)},
8547 {"nZcv", B (n, Z, c, v)},
8548 {"nZcV", B (n, Z, c, V)},
8549 {"nZCv", B (n, Z, C, v)},
8550 {"nZCV", B (n, Z, C, V)},
8551 {"Nzcv", B (N, z, c, v)},
8552 {"NzcV", B (N, z, c, V)},
8553 {"NzCv", B (N, z, C, v)},
8554 {"NzCV", B (N, z, C, V)},
8555 {"NZcv", B (N, Z, c, v)},
8556 {"NZcV", B (N, Z, c, V)},
8557 {"NZCv", B (N, Z, C, v)},
8558 {"NZCV", B (N, Z, C, V)}
8559 };
8560
8561 #undef N
8562 #undef n
8563 #undef Z
8564 #undef z
8565 #undef C
8566 #undef c
8567 #undef V
8568 #undef v
8569 #undef B
8570 \f
8571 /* MD interface: bits in the object file. */
8572
8573 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8574 for use in the a.out file, and stores them in the array pointed to by buf.
8575 This knows about the endian-ness of the target machine and does
8576 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8577 2 (short) and 4 (long) Floating numbers are put out as a series of
8578 LITTLENUMS (shorts, here at least). */
8579
8580 void
8581 md_number_to_chars (char *buf, valueT val, int n)
8582 {
8583 if (target_big_endian)
8584 number_to_chars_bigendian (buf, val, n);
8585 else
8586 number_to_chars_littleendian (buf, val, n);
8587 }
8588
8589 /* MD interface: Sections. */
8590
8591 /* Estimate the size of a frag before relaxing. Assume everything fits in
8592 4 bytes. */
8593
8594 int
8595 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8596 {
8597 fragp->fr_var = 4;
8598 return 4;
8599 }
8600
8601 /* Round up a section size to the appropriate boundary. */
8602
8603 valueT
8604 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8605 {
8606 return size;
8607 }
8608
8609 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8610 of an rs_align_code fragment.
8611
8612 Here we fill the frag with the appropriate info for padding the
8613 output stream. The resulting frag will consist of a fixed (fr_fix)
8614 and of a repeating (fr_var) part.
8615
8616 The fixed content is always emitted before the repeating content and
8617 these two parts are used as follows in constructing the output:
8618 - the fixed part will be used to align to a valid instruction word
8619 boundary, in case that we start at a misaligned address; as no
8620 executable instruction can live at the misaligned location, we
8621 simply fill with zeros;
8622 - the variable part will be used to cover the remaining padding and
8623 we fill using the AArch64 NOP instruction.
8624
8625 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8626 enough storage space for up to 3 bytes for padding the back to a valid
8627 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8628
8629 void
8630 aarch64_handle_align (fragS * fragP)
8631 {
8632 /* NOP = d503201f */
8633 /* AArch64 instructions are always little-endian. */
8634 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8635
8636 int bytes, fix, noop_size;
8637 char *p;
8638
8639 if (fragP->fr_type != rs_align_code)
8640 return;
8641
8642 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8643 p = fragP->fr_literal + fragP->fr_fix;
8644
8645 #ifdef OBJ_ELF
8646 gas_assert (fragP->tc_frag_data.recorded);
8647 #endif
8648
8649 noop_size = sizeof (aarch64_noop);
8650
8651 fix = bytes & (noop_size - 1);
8652 if (fix)
8653 {
8654 #if defined OBJ_ELF || defined OBJ_COFF
8655 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8656 #endif
8657 memset (p, 0, fix);
8658 p += fix;
8659 fragP->fr_fix += fix;
8660 }
8661
8662 if (noop_size)
8663 memcpy (p, aarch64_noop, noop_size);
8664 fragP->fr_var = noop_size;
8665 }
8666
8667 /* Perform target specific initialisation of a frag.
8668 Note - despite the name this initialisation is not done when the frag
8669 is created, but only when its type is assigned. A frag can be created
8670 and used a long time before its type is set, so beware of assuming that
8671 this initialisation is performed first. */
8672
8673 #ifndef OBJ_ELF
8674 void
8675 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8676 int max_chars ATTRIBUTE_UNUSED)
8677 {
8678 }
8679
8680 #else /* OBJ_ELF is defined. */
8681 void
8682 aarch64_init_frag (fragS * fragP, int max_chars)
8683 {
8684 /* Record a mapping symbol for alignment frags. We will delete this
8685 later if the alignment ends up empty. */
8686 if (!fragP->tc_frag_data.recorded)
8687 fragP->tc_frag_data.recorded = 1;
8688
8689 /* PR 21809: Do not set a mapping state for debug sections
8690 - it just confuses other tools. */
8691 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8692 return;
8693
8694 switch (fragP->fr_type)
8695 {
8696 case rs_align_test:
8697 case rs_fill:
8698 mapping_state_2 (MAP_DATA, max_chars);
8699 break;
8700 case rs_align:
8701 /* PR 20364: We can get alignment frags in code sections,
8702 so do not just assume that we should use the MAP_DATA state. */
8703 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8704 break;
8705 case rs_align_code:
8706 mapping_state_2 (MAP_INSN, max_chars);
8707 break;
8708 default:
8709 break;
8710 }
8711 }
8712
8713 /* Whether SFrame stack trace info is supported. */
8714
8715 bool
8716 aarch64_support_sframe_p (void)
8717 {
8718 /* At this time, SFrame is supported for aarch64 only. */
8719 return (aarch64_abi == AARCH64_ABI_LP64);
8720 }
8721
8722 /* Specify if RA tracking is needed. */
8723
8724 bool
8725 aarch64_sframe_ra_tracking_p (void)
8726 {
8727 return true;
8728 }
8729
8730 /* Specify the fixed offset to recover RA from CFA.
8731 (useful only when RA tracking is not needed). */
8732
8733 offsetT
8734 aarch64_sframe_cfa_ra_offset (void)
8735 {
8736 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8737 }
8738
8739 /* Get the abi/arch indentifier for SFrame. */
8740
8741 unsigned char
8742 aarch64_sframe_get_abi_arch (void)
8743 {
8744 unsigned char sframe_abi_arch = 0;
8745
8746 if (aarch64_support_sframe_p ())
8747 {
8748 sframe_abi_arch = target_big_endian
8749 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8750 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8751 }
8752
8753 return sframe_abi_arch;
8754 }
8755
8756 #endif /* OBJ_ELF */
8757 \f
8758 /* Initialize the DWARF-2 unwind information for this procedure. */
8759
8760 void
8761 tc_aarch64_frame_initial_instructions (void)
8762 {
8763 cfi_add_CFA_def_cfa (REG_SP, 0);
8764 }
8765
8766 /* Convert REGNAME to a DWARF-2 register number. */
8767
8768 int
8769 tc_aarch64_regname_to_dw2regnum (char *regname)
8770 {
8771 const reg_entry *reg = parse_reg (&regname);
8772 if (reg == NULL)
8773 return -1;
8774
8775 switch (reg->type)
8776 {
8777 case REG_TYPE_SP_32:
8778 case REG_TYPE_SP_64:
8779 case REG_TYPE_R_32:
8780 case REG_TYPE_R_64:
8781 return reg->number;
8782
8783 case REG_TYPE_FP_B:
8784 case REG_TYPE_FP_H:
8785 case REG_TYPE_FP_S:
8786 case REG_TYPE_FP_D:
8787 case REG_TYPE_FP_Q:
8788 return reg->number + 64;
8789
8790 default:
8791 break;
8792 }
8793 return -1;
8794 }
8795
8796 /* Implement DWARF2_ADDR_SIZE. */
8797
8798 int
8799 aarch64_dwarf2_addr_size (void)
8800 {
8801 if (ilp32_p)
8802 return 4;
8803 else if (llp64_p)
8804 return 8;
8805 return bfd_arch_bits_per_address (stdoutput) / 8;
8806 }
8807
8808 /* MD interface: Symbol and relocation handling. */
8809
8810 /* Return the address within the segment that a PC-relative fixup is
8811 relative to. For AArch64 PC-relative fixups applied to instructions
8812 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8813
8814 long
8815 md_pcrel_from_section (fixS * fixP, segT seg)
8816 {
8817 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8818
8819 /* If this is pc-relative and we are going to emit a relocation
8820 then we just want to put out any pipeline compensation that the linker
8821 will need. Otherwise we want to use the calculated base. */
8822 if (fixP->fx_pcrel
8823 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8824 || aarch64_force_relocation (fixP)))
8825 base = 0;
8826
8827 /* AArch64 should be consistent for all pc-relative relocations. */
8828 return base + AARCH64_PCREL_OFFSET;
8829 }
8830
8831 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8832 Otherwise we have no need to default values of symbols. */
8833
8834 symbolS *
8835 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8836 {
8837 #ifdef OBJ_ELF
8838 if (name[0] == '_' && name[1] == 'G'
8839 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8840 {
8841 if (!GOT_symbol)
8842 {
8843 if (symbol_find (name))
8844 as_bad (_("GOT already in the symbol table"));
8845
8846 GOT_symbol = symbol_new (name, undefined_section,
8847 &zero_address_frag, 0);
8848 }
8849
8850 return GOT_symbol;
8851 }
8852 #endif
8853
8854 return 0;
8855 }
8856
8857 /* Return non-zero if the indicated VALUE has overflowed the maximum
8858 range expressible by a unsigned number with the indicated number of
8859 BITS. */
8860
8861 static bool
8862 unsigned_overflow (valueT value, unsigned bits)
8863 {
8864 valueT lim;
8865 if (bits >= sizeof (valueT) * 8)
8866 return false;
8867 lim = (valueT) 1 << bits;
8868 return (value >= lim);
8869 }
8870
8871
8872 /* Return non-zero if the indicated VALUE has overflowed the maximum
8873 range expressible by an signed number with the indicated number of
8874 BITS. */
8875
8876 static bool
8877 signed_overflow (offsetT value, unsigned bits)
8878 {
8879 offsetT lim;
8880 if (bits >= sizeof (offsetT) * 8)
8881 return false;
8882 lim = (offsetT) 1 << (bits - 1);
8883 return (value < -lim || value >= lim);
8884 }
8885
8886 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8887 unsigned immediate offset load/store instruction, try to encode it as
8888 an unscaled, 9-bit, signed immediate offset load/store instruction.
8889 Return TRUE if it is successful; otherwise return FALSE.
8890
8891 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8892 in response to the standard LDR/STR mnemonics when the immediate offset is
8893 unambiguous, i.e. when it is negative or unaligned. */
8894
8895 static bool
8896 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8897 {
8898 int idx;
8899 enum aarch64_op new_op;
8900 const aarch64_opcode *new_opcode;
8901
8902 gas_assert (instr->opcode->iclass == ldst_pos);
8903
8904 switch (instr->opcode->op)
8905 {
8906 case OP_LDRB_POS:new_op = OP_LDURB; break;
8907 case OP_STRB_POS: new_op = OP_STURB; break;
8908 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8909 case OP_LDRH_POS: new_op = OP_LDURH; break;
8910 case OP_STRH_POS: new_op = OP_STURH; break;
8911 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8912 case OP_LDR_POS: new_op = OP_LDUR; break;
8913 case OP_STR_POS: new_op = OP_STUR; break;
8914 case OP_LDRF_POS: new_op = OP_LDURV; break;
8915 case OP_STRF_POS: new_op = OP_STURV; break;
8916 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8917 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8918 default: new_op = OP_NIL; break;
8919 }
8920
8921 if (new_op == OP_NIL)
8922 return false;
8923
8924 new_opcode = aarch64_get_opcode (new_op);
8925 gas_assert (new_opcode != NULL);
8926
8927 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8928 instr->opcode->op, new_opcode->op);
8929
8930 aarch64_replace_opcode (instr, new_opcode);
8931
8932 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8933 qualifier matching may fail because the out-of-date qualifier will
8934 prevent the operand being updated with a new and correct qualifier. */
8935 idx = aarch64_operand_index (instr->opcode->operands,
8936 AARCH64_OPND_ADDR_SIMM9);
8937 gas_assert (idx == 1);
8938 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8939
8940 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8941
8942 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8943 insn_sequence))
8944 return false;
8945
8946 return true;
8947 }
8948
8949 /* Called by fix_insn to fix a MOV immediate alias instruction.
8950
8951 Operand for a generic move immediate instruction, which is an alias
8952 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8953 a 32-bit/64-bit immediate value into general register. An assembler error
8954 shall result if the immediate cannot be created by a single one of these
8955 instructions. If there is a choice, then to ensure reversability an
8956 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8957
8958 static void
8959 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8960 {
8961 const aarch64_opcode *opcode;
8962
8963 /* Need to check if the destination is SP/ZR. The check has to be done
8964 before any aarch64_replace_opcode. */
8965 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8966 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8967
8968 instr->operands[1].imm.value = value;
8969 instr->operands[1].skip = 0;
8970
8971 if (try_mov_wide_p)
8972 {
8973 /* Try the MOVZ alias. */
8974 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8975 aarch64_replace_opcode (instr, opcode);
8976 if (aarch64_opcode_encode (instr->opcode, instr,
8977 &instr->value, NULL, NULL, insn_sequence))
8978 {
8979 put_aarch64_insn (buf, instr->value);
8980 return;
8981 }
8982 /* Try the MOVK alias. */
8983 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8984 aarch64_replace_opcode (instr, opcode);
8985 if (aarch64_opcode_encode (instr->opcode, instr,
8986 &instr->value, NULL, NULL, insn_sequence))
8987 {
8988 put_aarch64_insn (buf, instr->value);
8989 return;
8990 }
8991 }
8992
8993 if (try_mov_bitmask_p)
8994 {
8995 /* Try the ORR alias. */
8996 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8997 aarch64_replace_opcode (instr, opcode);
8998 if (aarch64_opcode_encode (instr->opcode, instr,
8999 &instr->value, NULL, NULL, insn_sequence))
9000 {
9001 put_aarch64_insn (buf, instr->value);
9002 return;
9003 }
9004 }
9005
9006 as_bad_where (fixP->fx_file, fixP->fx_line,
9007 _("immediate cannot be moved by a single instruction"));
9008 }
9009
9010 /* An instruction operand which is immediate related may have symbol used
9011 in the assembly, e.g.
9012
9013 mov w0, u32
9014 .set u32, 0x00ffff00
9015
9016 At the time when the assembly instruction is parsed, a referenced symbol,
9017 like 'u32' in the above example may not have been seen; a fixS is created
9018 in such a case and is handled here after symbols have been resolved.
9019 Instruction is fixed up with VALUE using the information in *FIXP plus
9020 extra information in FLAGS.
9021
9022 This function is called by md_apply_fix to fix up instructions that need
9023 a fix-up described above but does not involve any linker-time relocation. */
9024
9025 static void
9026 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9027 {
9028 int idx;
9029 uint32_t insn;
9030 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9031 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9032 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9033
9034 if (new_inst)
9035 {
9036 /* Now the instruction is about to be fixed-up, so the operand that
9037 was previously marked as 'ignored' needs to be unmarked in order
9038 to get the encoding done properly. */
9039 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9040 new_inst->operands[idx].skip = 0;
9041 }
9042
9043 gas_assert (opnd != AARCH64_OPND_NIL);
9044
9045 switch (opnd)
9046 {
9047 case AARCH64_OPND_EXCEPTION:
9048 case AARCH64_OPND_UNDEFINED:
9049 if (unsigned_overflow (value, 16))
9050 as_bad_where (fixP->fx_file, fixP->fx_line,
9051 _("immediate out of range"));
9052 insn = get_aarch64_insn (buf);
9053 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9054 put_aarch64_insn (buf, insn);
9055 break;
9056
9057 case AARCH64_OPND_AIMM:
9058 /* ADD or SUB with immediate.
9059 NOTE this assumes we come here with a add/sub shifted reg encoding
9060 3 322|2222|2 2 2 21111 111111
9061 1 098|7654|3 2 1 09876 543210 98765 43210
9062 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9063 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9064 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9065 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9066 ->
9067 3 322|2222|2 2 221111111111
9068 1 098|7654|3 2 109876543210 98765 43210
9069 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9070 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9071 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9072 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9073 Fields sf Rn Rd are already set. */
9074 insn = get_aarch64_insn (buf);
9075 if (value < 0)
9076 {
9077 /* Add <-> sub. */
9078 insn = reencode_addsub_switch_add_sub (insn);
9079 value = -value;
9080 }
9081
9082 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9083 && unsigned_overflow (value, 12))
9084 {
9085 /* Try to shift the value by 12 to make it fit. */
9086 if (((value >> 12) << 12) == value
9087 && ! unsigned_overflow (value, 12 + 12))
9088 {
9089 value >>= 12;
9090 insn |= encode_addsub_imm_shift_amount (1);
9091 }
9092 }
9093
9094 if (unsigned_overflow (value, 12))
9095 as_bad_where (fixP->fx_file, fixP->fx_line,
9096 _("immediate out of range"));
9097
9098 insn |= encode_addsub_imm (value);
9099
9100 put_aarch64_insn (buf, insn);
9101 break;
9102
9103 case AARCH64_OPND_SIMD_IMM:
9104 case AARCH64_OPND_SIMD_IMM_SFT:
9105 case AARCH64_OPND_LIMM:
9106 /* Bit mask immediate. */
9107 gas_assert (new_inst != NULL);
9108 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9109 new_inst->operands[idx].imm.value = value;
9110 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9111 &new_inst->value, NULL, NULL, insn_sequence))
9112 put_aarch64_insn (buf, new_inst->value);
9113 else
9114 as_bad_where (fixP->fx_file, fixP->fx_line,
9115 _("invalid immediate"));
9116 break;
9117
9118 case AARCH64_OPND_HALF:
9119 /* 16-bit unsigned immediate. */
9120 if (unsigned_overflow (value, 16))
9121 as_bad_where (fixP->fx_file, fixP->fx_line,
9122 _("immediate out of range"));
9123 insn = get_aarch64_insn (buf);
9124 insn |= encode_movw_imm (value & 0xffff);
9125 put_aarch64_insn (buf, insn);
9126 break;
9127
9128 case AARCH64_OPND_IMM_MOV:
9129 /* Operand for a generic move immediate instruction, which is
9130 an alias instruction that generates a single MOVZ, MOVN or ORR
9131 instruction to loads a 32-bit/64-bit immediate value into general
9132 register. An assembler error shall result if the immediate cannot be
9133 created by a single one of these instructions. If there is a choice,
9134 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9135 and MOVZ or MOVN to ORR. */
9136 gas_assert (new_inst != NULL);
9137 fix_mov_imm_insn (fixP, buf, new_inst, value);
9138 break;
9139
9140 case AARCH64_OPND_ADDR_SIMM7:
9141 case AARCH64_OPND_ADDR_SIMM9:
9142 case AARCH64_OPND_ADDR_SIMM9_2:
9143 case AARCH64_OPND_ADDR_SIMM10:
9144 case AARCH64_OPND_ADDR_UIMM12:
9145 case AARCH64_OPND_ADDR_SIMM11:
9146 case AARCH64_OPND_ADDR_SIMM13:
9147 /* Immediate offset in an address. */
9148 insn = get_aarch64_insn (buf);
9149
9150 gas_assert (new_inst != NULL && new_inst->value == insn);
9151 gas_assert (new_inst->opcode->operands[1] == opnd
9152 || new_inst->opcode->operands[2] == opnd);
9153
9154 /* Get the index of the address operand. */
9155 if (new_inst->opcode->operands[1] == opnd)
9156 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9157 idx = 1;
9158 else
9159 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9160 idx = 2;
9161
9162 /* Update the resolved offset value. */
9163 new_inst->operands[idx].addr.offset.imm = value;
9164
9165 /* Encode/fix-up. */
9166 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9167 &new_inst->value, NULL, NULL, insn_sequence))
9168 {
9169 put_aarch64_insn (buf, new_inst->value);
9170 break;
9171 }
9172 else if (new_inst->opcode->iclass == ldst_pos
9173 && try_to_encode_as_unscaled_ldst (new_inst))
9174 {
9175 put_aarch64_insn (buf, new_inst->value);
9176 break;
9177 }
9178
9179 as_bad_where (fixP->fx_file, fixP->fx_line,
9180 _("immediate offset out of range"));
9181 break;
9182
9183 default:
9184 gas_assert (0);
9185 as_fatal (_("unhandled operand code %d"), opnd);
9186 }
9187 }
9188
9189 /* Apply a fixup (fixP) to segment data, once it has been determined
9190 by our caller that we have all the info we need to fix it up.
9191
9192 Parameter valP is the pointer to the value of the bits. */
9193
9194 void
9195 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9196 {
9197 offsetT value = *valP;
9198 uint32_t insn;
9199 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9200 int scale;
9201 unsigned flags = fixP->fx_addnumber;
9202
9203 DEBUG_TRACE ("\n\n");
9204 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9205 DEBUG_TRACE ("Enter md_apply_fix");
9206
9207 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9208
9209 /* Note whether this will delete the relocation. */
9210
9211 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9212 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9213 fixP->fx_done = 1;
9214
9215 /* Process the relocations. */
9216 switch (fixP->fx_r_type)
9217 {
9218 case BFD_RELOC_NONE:
9219 /* This will need to go in the object file. */
9220 fixP->fx_done = 0;
9221 break;
9222
9223 case BFD_RELOC_8:
9224 case BFD_RELOC_8_PCREL:
9225 if (fixP->fx_done || !seg->use_rela_p)
9226 md_number_to_chars (buf, value, 1);
9227 break;
9228
9229 case BFD_RELOC_16:
9230 case BFD_RELOC_16_PCREL:
9231 if (fixP->fx_done || !seg->use_rela_p)
9232 md_number_to_chars (buf, value, 2);
9233 break;
9234
9235 case BFD_RELOC_32:
9236 case BFD_RELOC_32_PCREL:
9237 if (fixP->fx_done || !seg->use_rela_p)
9238 md_number_to_chars (buf, value, 4);
9239 break;
9240
9241 case BFD_RELOC_64:
9242 case BFD_RELOC_64_PCREL:
9243 if (fixP->fx_done || !seg->use_rela_p)
9244 md_number_to_chars (buf, value, 8);
9245 break;
9246
9247 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9248 /* We claim that these fixups have been processed here, even if
9249 in fact we generate an error because we do not have a reloc
9250 for them, so tc_gen_reloc() will reject them. */
9251 fixP->fx_done = 1;
9252 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9253 {
9254 as_bad_where (fixP->fx_file, fixP->fx_line,
9255 _("undefined symbol %s used as an immediate value"),
9256 S_GET_NAME (fixP->fx_addsy));
9257 goto apply_fix_return;
9258 }
9259 fix_insn (fixP, flags, value);
9260 break;
9261
9262 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9263 if (fixP->fx_done || !seg->use_rela_p)
9264 {
9265 if (value & 3)
9266 as_bad_where (fixP->fx_file, fixP->fx_line,
9267 _("pc-relative load offset not word aligned"));
9268 if (signed_overflow (value, 21))
9269 as_bad_where (fixP->fx_file, fixP->fx_line,
9270 _("pc-relative load offset out of range"));
9271 insn = get_aarch64_insn (buf);
9272 insn |= encode_ld_lit_ofs_19 (value >> 2);
9273 put_aarch64_insn (buf, insn);
9274 }
9275 break;
9276
9277 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9278 if (fixP->fx_done || !seg->use_rela_p)
9279 {
9280 if (signed_overflow (value, 21))
9281 as_bad_where (fixP->fx_file, fixP->fx_line,
9282 _("pc-relative address offset out of range"));
9283 insn = get_aarch64_insn (buf);
9284 insn |= encode_adr_imm (value);
9285 put_aarch64_insn (buf, insn);
9286 }
9287 break;
9288
9289 case BFD_RELOC_AARCH64_BRANCH19:
9290 if (fixP->fx_done || !seg->use_rela_p)
9291 {
9292 if (value & 3)
9293 as_bad_where (fixP->fx_file, fixP->fx_line,
9294 _("conditional branch target not word aligned"));
9295 if (signed_overflow (value, 21))
9296 as_bad_where (fixP->fx_file, fixP->fx_line,
9297 _("conditional branch out of range"));
9298 insn = get_aarch64_insn (buf);
9299 insn |= encode_cond_branch_ofs_19 (value >> 2);
9300 put_aarch64_insn (buf, insn);
9301 }
9302 break;
9303
9304 case BFD_RELOC_AARCH64_TSTBR14:
9305 if (fixP->fx_done || !seg->use_rela_p)
9306 {
9307 if (value & 3)
9308 as_bad_where (fixP->fx_file, fixP->fx_line,
9309 _("conditional branch target not word aligned"));
9310 if (signed_overflow (value, 16))
9311 as_bad_where (fixP->fx_file, fixP->fx_line,
9312 _("conditional branch out of range"));
9313 insn = get_aarch64_insn (buf);
9314 insn |= encode_tst_branch_ofs_14 (value >> 2);
9315 put_aarch64_insn (buf, insn);
9316 }
9317 break;
9318
9319 case BFD_RELOC_AARCH64_CALL26:
9320 case BFD_RELOC_AARCH64_JUMP26:
9321 if (fixP->fx_done || !seg->use_rela_p)
9322 {
9323 if (value & 3)
9324 as_bad_where (fixP->fx_file, fixP->fx_line,
9325 _("branch target not word aligned"));
9326 if (signed_overflow (value, 28))
9327 as_bad_where (fixP->fx_file, fixP->fx_line,
9328 _("branch out of range"));
9329 insn = get_aarch64_insn (buf);
9330 insn |= encode_branch_ofs_26 (value >> 2);
9331 put_aarch64_insn (buf, insn);
9332 }
9333 break;
9334
9335 case BFD_RELOC_AARCH64_MOVW_G0:
9336 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9337 case BFD_RELOC_AARCH64_MOVW_G0_S:
9338 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9339 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9340 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9341 scale = 0;
9342 goto movw_common;
9343 case BFD_RELOC_AARCH64_MOVW_G1:
9344 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9345 case BFD_RELOC_AARCH64_MOVW_G1_S:
9346 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9347 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9348 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9349 scale = 16;
9350 goto movw_common;
9351 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9352 scale = 0;
9353 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9354 /* Should always be exported to object file, see
9355 aarch64_force_relocation(). */
9356 gas_assert (!fixP->fx_done);
9357 gas_assert (seg->use_rela_p);
9358 goto movw_common;
9359 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9360 scale = 16;
9361 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9362 /* Should always be exported to object file, see
9363 aarch64_force_relocation(). */
9364 gas_assert (!fixP->fx_done);
9365 gas_assert (seg->use_rela_p);
9366 goto movw_common;
9367 case BFD_RELOC_AARCH64_MOVW_G2:
9368 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9369 case BFD_RELOC_AARCH64_MOVW_G2_S:
9370 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9371 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9372 scale = 32;
9373 goto movw_common;
9374 case BFD_RELOC_AARCH64_MOVW_G3:
9375 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9376 scale = 48;
9377 movw_common:
9378 if (fixP->fx_done || !seg->use_rela_p)
9379 {
9380 insn = get_aarch64_insn (buf);
9381
9382 if (!fixP->fx_done)
9383 {
9384 /* REL signed addend must fit in 16 bits */
9385 if (signed_overflow (value, 16))
9386 as_bad_where (fixP->fx_file, fixP->fx_line,
9387 _("offset out of range"));
9388 }
9389 else
9390 {
9391 /* Check for overflow and scale. */
9392 switch (fixP->fx_r_type)
9393 {
9394 case BFD_RELOC_AARCH64_MOVW_G0:
9395 case BFD_RELOC_AARCH64_MOVW_G1:
9396 case BFD_RELOC_AARCH64_MOVW_G2:
9397 case BFD_RELOC_AARCH64_MOVW_G3:
9398 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9399 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9400 if (unsigned_overflow (value, scale + 16))
9401 as_bad_where (fixP->fx_file, fixP->fx_line,
9402 _("unsigned value out of range"));
9403 break;
9404 case BFD_RELOC_AARCH64_MOVW_G0_S:
9405 case BFD_RELOC_AARCH64_MOVW_G1_S:
9406 case BFD_RELOC_AARCH64_MOVW_G2_S:
9407 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9408 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9409 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9410 /* NOTE: We can only come here with movz or movn. */
9411 if (signed_overflow (value, scale + 16))
9412 as_bad_where (fixP->fx_file, fixP->fx_line,
9413 _("signed value out of range"));
9414 if (value < 0)
9415 {
9416 /* Force use of MOVN. */
9417 value = ~value;
9418 insn = reencode_movzn_to_movn (insn);
9419 }
9420 else
9421 {
9422 /* Force use of MOVZ. */
9423 insn = reencode_movzn_to_movz (insn);
9424 }
9425 break;
9426 default:
9427 /* Unchecked relocations. */
9428 break;
9429 }
9430 value >>= scale;
9431 }
9432
9433 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9434 insn |= encode_movw_imm (value & 0xffff);
9435
9436 put_aarch64_insn (buf, insn);
9437 }
9438 break;
9439
9440 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9441 fixP->fx_r_type = (ilp32_p
9442 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9443 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9444 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9445 /* Should always be exported to object file, see
9446 aarch64_force_relocation(). */
9447 gas_assert (!fixP->fx_done);
9448 gas_assert (seg->use_rela_p);
9449 break;
9450
9451 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9452 fixP->fx_r_type = (ilp32_p
9453 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9454 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9455 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9456 /* Should always be exported to object file, see
9457 aarch64_force_relocation(). */
9458 gas_assert (!fixP->fx_done);
9459 gas_assert (seg->use_rela_p);
9460 break;
9461
9462 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9463 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9464 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9465 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9466 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9467 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9468 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9469 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9470 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9471 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9472 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9473 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9474 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9475 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9476 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9477 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9478 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9479 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9480 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9481 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9482 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9483 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9484 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9485 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9486 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9487 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9488 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9489 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9490 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9491 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9492 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9493 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9494 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9495 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9496 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9497 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9498 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9499 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9500 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9501 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9502 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9503 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9504 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9505 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9506 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9507 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9508 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9509 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9510 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9511 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9512 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9513 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9514 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9515 /* Should always be exported to object file, see
9516 aarch64_force_relocation(). */
9517 gas_assert (!fixP->fx_done);
9518 gas_assert (seg->use_rela_p);
9519 break;
9520
9521 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9522 /* Should always be exported to object file, see
9523 aarch64_force_relocation(). */
9524 fixP->fx_r_type = (ilp32_p
9525 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9526 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9527 gas_assert (!fixP->fx_done);
9528 gas_assert (seg->use_rela_p);
9529 break;
9530
9531 case BFD_RELOC_AARCH64_ADD_LO12:
9532 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9533 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9534 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9535 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9536 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9537 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9538 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9539 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9540 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9541 case BFD_RELOC_AARCH64_LDST128_LO12:
9542 case BFD_RELOC_AARCH64_LDST16_LO12:
9543 case BFD_RELOC_AARCH64_LDST32_LO12:
9544 case BFD_RELOC_AARCH64_LDST64_LO12:
9545 case BFD_RELOC_AARCH64_LDST8_LO12:
9546 /* Should always be exported to object file, see
9547 aarch64_force_relocation(). */
9548 gas_assert (!fixP->fx_done);
9549 gas_assert (seg->use_rela_p);
9550 break;
9551
9552 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9553 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9554 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9555 break;
9556
9557 case BFD_RELOC_UNUSED:
9558 /* An error will already have been reported. */
9559 break;
9560
9561 case BFD_RELOC_RVA:
9562 case BFD_RELOC_32_SECREL:
9563 case BFD_RELOC_16_SECIDX:
9564 break;
9565
9566 default:
9567 as_bad_where (fixP->fx_file, fixP->fx_line,
9568 _("unexpected %s fixup"),
9569 bfd_get_reloc_code_name (fixP->fx_r_type));
9570 break;
9571 }
9572
9573 apply_fix_return:
9574 /* Free the allocated the struct aarch64_inst.
9575 N.B. currently there are very limited number of fix-up types actually use
9576 this field, so the impact on the performance should be minimal . */
9577 free (fixP->tc_fix_data.inst);
9578
9579 return;
9580 }
9581
9582 /* Translate internal representation of relocation info to BFD target
9583 format. */
9584
9585 arelent *
9586 tc_gen_reloc (asection * section, fixS * fixp)
9587 {
9588 arelent *reloc;
9589 bfd_reloc_code_real_type code;
9590
9591 reloc = XNEW (arelent);
9592
9593 reloc->sym_ptr_ptr = XNEW (asymbol *);
9594 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9595 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9596
9597 if (fixp->fx_pcrel)
9598 {
9599 if (section->use_rela_p)
9600 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9601 else
9602 fixp->fx_offset = reloc->address;
9603 }
9604 reloc->addend = fixp->fx_offset;
9605
9606 code = fixp->fx_r_type;
9607 switch (code)
9608 {
9609 case BFD_RELOC_16:
9610 if (fixp->fx_pcrel)
9611 code = BFD_RELOC_16_PCREL;
9612 break;
9613
9614 case BFD_RELOC_32:
9615 if (fixp->fx_pcrel)
9616 code = BFD_RELOC_32_PCREL;
9617 break;
9618
9619 case BFD_RELOC_64:
9620 if (fixp->fx_pcrel)
9621 code = BFD_RELOC_64_PCREL;
9622 break;
9623
9624 default:
9625 break;
9626 }
9627
9628 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9629 if (reloc->howto == NULL)
9630 {
9631 as_bad_where (fixp->fx_file, fixp->fx_line,
9632 _
9633 ("cannot represent %s relocation in this object file format"),
9634 bfd_get_reloc_code_name (code));
9635 return NULL;
9636 }
9637
9638 return reloc;
9639 }
9640
9641 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9642
9643 void
9644 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9645 {
9646 bfd_reloc_code_real_type type;
9647 int pcrel = 0;
9648
9649 #ifdef TE_PE
9650 if (exp->X_op == O_secrel)
9651 {
9652 exp->X_op = O_symbol;
9653 type = BFD_RELOC_32_SECREL;
9654 }
9655 else if (exp->X_op == O_secidx)
9656 {
9657 exp->X_op = O_symbol;
9658 type = BFD_RELOC_16_SECIDX;
9659 }
9660 else
9661 {
9662 #endif
9663 /* Pick a reloc.
9664 FIXME: @@ Should look at CPU word size. */
9665 switch (size)
9666 {
9667 case 1:
9668 type = BFD_RELOC_8;
9669 break;
9670 case 2:
9671 type = BFD_RELOC_16;
9672 break;
9673 case 4:
9674 type = BFD_RELOC_32;
9675 break;
9676 case 8:
9677 type = BFD_RELOC_64;
9678 break;
9679 default:
9680 as_bad (_("cannot do %u-byte relocation"), size);
9681 type = BFD_RELOC_UNUSED;
9682 break;
9683 }
9684 #ifdef TE_PE
9685 }
9686 #endif
9687
9688 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9689 }
9690
9691 /* Implement md_after_parse_args. This is the earliest time we need to decide
9692 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9693
9694 void
9695 aarch64_after_parse_args (void)
9696 {
9697 if (aarch64_abi != AARCH64_ABI_NONE)
9698 return;
9699
9700 #ifdef OBJ_ELF
9701 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9702 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9703 aarch64_abi = AARCH64_ABI_ILP32;
9704 else
9705 aarch64_abi = AARCH64_ABI_LP64;
9706 #else
9707 aarch64_abi = AARCH64_ABI_LLP64;
9708 #endif
9709 }
9710
9711 #ifdef OBJ_ELF
9712 const char *
9713 elf64_aarch64_target_format (void)
9714 {
9715 #ifdef TE_CLOUDABI
9716 /* FIXME: What to do for ilp32_p ? */
9717 if (target_big_endian)
9718 return "elf64-bigaarch64-cloudabi";
9719 else
9720 return "elf64-littleaarch64-cloudabi";
9721 #else
9722 if (target_big_endian)
9723 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9724 else
9725 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9726 #endif
9727 }
9728
9729 void
9730 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9731 {
9732 elf_frob_symbol (symp, puntp);
9733 }
9734 #elif defined OBJ_COFF
9735 const char *
9736 coff_aarch64_target_format (void)
9737 {
9738 return "pe-aarch64-little";
9739 }
9740 #endif
9741
9742 /* MD interface: Finalization. */
9743
9744 /* A good place to do this, although this was probably not intended
9745 for this kind of use. We need to dump the literal pool before
9746 references are made to a null symbol pointer. */
9747
9748 void
9749 aarch64_cleanup (void)
9750 {
9751 literal_pool *pool;
9752
9753 for (pool = list_of_pools; pool; pool = pool->next)
9754 {
9755 /* Put it at the end of the relevant section. */
9756 subseg_set (pool->section, pool->sub_section);
9757 s_ltorg (0);
9758 }
9759 }
9760
9761 #ifdef OBJ_ELF
9762 /* Remove any excess mapping symbols generated for alignment frags in
9763 SEC. We may have created a mapping symbol before a zero byte
9764 alignment; remove it if there's a mapping symbol after the
9765 alignment. */
9766 static void
9767 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9768 void *dummy ATTRIBUTE_UNUSED)
9769 {
9770 segment_info_type *seginfo = seg_info (sec);
9771 fragS *fragp;
9772
9773 if (seginfo == NULL || seginfo->frchainP == NULL)
9774 return;
9775
9776 for (fragp = seginfo->frchainP->frch_root;
9777 fragp != NULL; fragp = fragp->fr_next)
9778 {
9779 symbolS *sym = fragp->tc_frag_data.last_map;
9780 fragS *next = fragp->fr_next;
9781
9782 /* Variable-sized frags have been converted to fixed size by
9783 this point. But if this was variable-sized to start with,
9784 there will be a fixed-size frag after it. So don't handle
9785 next == NULL. */
9786 if (sym == NULL || next == NULL)
9787 continue;
9788
9789 if (S_GET_VALUE (sym) < next->fr_address)
9790 /* Not at the end of this frag. */
9791 continue;
9792 know (S_GET_VALUE (sym) == next->fr_address);
9793
9794 do
9795 {
9796 if (next->tc_frag_data.first_map != NULL)
9797 {
9798 /* Next frag starts with a mapping symbol. Discard this
9799 one. */
9800 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9801 break;
9802 }
9803
9804 if (next->fr_next == NULL)
9805 {
9806 /* This mapping symbol is at the end of the section. Discard
9807 it. */
9808 know (next->fr_fix == 0 && next->fr_var == 0);
9809 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9810 break;
9811 }
9812
9813 /* As long as we have empty frags without any mapping symbols,
9814 keep looking. */
9815 /* If the next frag is non-empty and does not start with a
9816 mapping symbol, then this mapping symbol is required. */
9817 if (next->fr_address != next->fr_next->fr_address)
9818 break;
9819
9820 next = next->fr_next;
9821 }
9822 while (next != NULL);
9823 }
9824 }
9825 #endif
9826
9827 /* Adjust the symbol table. */
9828
9829 void
9830 aarch64_adjust_symtab (void)
9831 {
9832 #ifdef OBJ_ELF
9833 /* Remove any overlapping mapping symbols generated by alignment frags. */
9834 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9835 /* Now do generic ELF adjustments. */
9836 elf_adjust_symtab ();
9837 #endif
9838 }
9839
9840 static void
9841 checked_hash_insert (htab_t table, const char *key, void *value)
9842 {
9843 str_hash_insert (table, key, value, 0);
9844 }
9845
9846 static void
9847 sysreg_hash_insert (htab_t table, const char *key, void *value)
9848 {
9849 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9850 checked_hash_insert (table, key, value);
9851 }
9852
9853 static void
9854 fill_instruction_hash_table (void)
9855 {
9856 const aarch64_opcode *opcode = aarch64_opcode_table;
9857
9858 while (opcode->name != NULL)
9859 {
9860 templates *templ, *new_templ;
9861 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9862
9863 new_templ = XNEW (templates);
9864 new_templ->opcode = opcode;
9865 new_templ->next = NULL;
9866
9867 if (!templ)
9868 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9869 else
9870 {
9871 new_templ->next = templ->next;
9872 templ->next = new_templ;
9873 }
9874 ++opcode;
9875 }
9876 }
9877
9878 static inline void
9879 convert_to_upper (char *dst, const char *src, size_t num)
9880 {
9881 unsigned int i;
9882 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9883 *dst = TOUPPER (*src);
9884 *dst = '\0';
9885 }
9886
9887 /* Assume STR point to a lower-case string, allocate, convert and return
9888 the corresponding upper-case string. */
9889 static inline const char*
9890 get_upper_str (const char *str)
9891 {
9892 char *ret;
9893 size_t len = strlen (str);
9894 ret = XNEWVEC (char, len + 1);
9895 convert_to_upper (ret, str, len);
9896 return ret;
9897 }
9898
9899 /* MD interface: Initialization. */
9900
9901 void
9902 md_begin (void)
9903 {
9904 unsigned mach;
9905 unsigned int i;
9906
9907 aarch64_ops_hsh = str_htab_create ();
9908 aarch64_cond_hsh = str_htab_create ();
9909 aarch64_shift_hsh = str_htab_create ();
9910 aarch64_sys_regs_hsh = str_htab_create ();
9911 aarch64_pstatefield_hsh = str_htab_create ();
9912 aarch64_sys_regs_ic_hsh = str_htab_create ();
9913 aarch64_sys_regs_dc_hsh = str_htab_create ();
9914 aarch64_sys_regs_at_hsh = str_htab_create ();
9915 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9916 aarch64_sys_regs_sr_hsh = str_htab_create ();
9917 aarch64_reg_hsh = str_htab_create ();
9918 aarch64_barrier_opt_hsh = str_htab_create ();
9919 aarch64_nzcv_hsh = str_htab_create ();
9920 aarch64_pldop_hsh = str_htab_create ();
9921 aarch64_hint_opt_hsh = str_htab_create ();
9922
9923 fill_instruction_hash_table ();
9924
9925 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9926 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9927 (void *) (aarch64_sys_regs + i));
9928
9929 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9930 sysreg_hash_insert (aarch64_pstatefield_hsh,
9931 aarch64_pstatefields[i].name,
9932 (void *) (aarch64_pstatefields + i));
9933
9934 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9935 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9936 aarch64_sys_regs_ic[i].name,
9937 (void *) (aarch64_sys_regs_ic + i));
9938
9939 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9940 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9941 aarch64_sys_regs_dc[i].name,
9942 (void *) (aarch64_sys_regs_dc + i));
9943
9944 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9945 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9946 aarch64_sys_regs_at[i].name,
9947 (void *) (aarch64_sys_regs_at + i));
9948
9949 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9950 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9951 aarch64_sys_regs_tlbi[i].name,
9952 (void *) (aarch64_sys_regs_tlbi + i));
9953
9954 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9955 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9956 aarch64_sys_regs_sr[i].name,
9957 (void *) (aarch64_sys_regs_sr + i));
9958
9959 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9960 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9961 (void *) (reg_names + i));
9962
9963 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9964 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9965 (void *) (nzcv_names + i));
9966
9967 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9968 {
9969 const char *name = aarch64_operand_modifiers[i].name;
9970 checked_hash_insert (aarch64_shift_hsh, name,
9971 (void *) (aarch64_operand_modifiers + i));
9972 /* Also hash the name in the upper case. */
9973 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9974 (void *) (aarch64_operand_modifiers + i));
9975 }
9976
9977 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9978 {
9979 unsigned int j;
9980 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9981 the same condition code. */
9982 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9983 {
9984 const char *name = aarch64_conds[i].names[j];
9985 if (name == NULL)
9986 break;
9987 checked_hash_insert (aarch64_cond_hsh, name,
9988 (void *) (aarch64_conds + i));
9989 /* Also hash the name in the upper case. */
9990 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9991 (void *) (aarch64_conds + i));
9992 }
9993 }
9994
9995 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9996 {
9997 const char *name = aarch64_barrier_options[i].name;
9998 /* Skip xx00 - the unallocated values of option. */
9999 if ((i & 0x3) == 0)
10000 continue;
10001 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10002 (void *) (aarch64_barrier_options + i));
10003 /* Also hash the name in the upper case. */
10004 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10005 (void *) (aarch64_barrier_options + i));
10006 }
10007
10008 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
10009 {
10010 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
10011 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10012 (void *) (aarch64_barrier_dsb_nxs_options + i));
10013 /* Also hash the name in the upper case. */
10014 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10015 (void *) (aarch64_barrier_dsb_nxs_options + i));
10016 }
10017
10018 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10019 {
10020 const char* name = aarch64_prfops[i].name;
10021 /* Skip the unallocated hint encodings. */
10022 if (name == NULL)
10023 continue;
10024 checked_hash_insert (aarch64_pldop_hsh, name,
10025 (void *) (aarch64_prfops + i));
10026 /* Also hash the name in the upper case. */
10027 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10028 (void *) (aarch64_prfops + i));
10029 }
10030
10031 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10032 {
10033 const char* name = aarch64_hint_options[i].name;
10034 const char* upper_name = get_upper_str(name);
10035
10036 checked_hash_insert (aarch64_hint_opt_hsh, name,
10037 (void *) (aarch64_hint_options + i));
10038
10039 /* Also hash the name in the upper case if not the same. */
10040 if (strcmp (name, upper_name) != 0)
10041 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10042 (void *) (aarch64_hint_options + i));
10043 }
10044
10045 /* Set the cpu variant based on the command-line options. */
10046 if (!mcpu_cpu_opt)
10047 mcpu_cpu_opt = march_cpu_opt;
10048
10049 if (!mcpu_cpu_opt)
10050 mcpu_cpu_opt = &cpu_default;
10051
10052 cpu_variant = *mcpu_cpu_opt;
10053
10054 /* Record the CPU type. */
10055 if(ilp32_p)
10056 mach = bfd_mach_aarch64_ilp32;
10057 else if (llp64_p)
10058 mach = bfd_mach_aarch64_llp64;
10059 else
10060 mach = bfd_mach_aarch64;
10061
10062 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10063 #ifdef OBJ_ELF
10064 /* FIXME - is there a better way to do it ? */
10065 aarch64_sframe_cfa_sp_reg = 31;
10066 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10067 aarch64_sframe_cfa_ra_reg = 30;
10068 #endif
10069 }
10070
10071 /* Command line processing. */
10072
10073 const char *md_shortopts = "m:";
10074
10075 #ifdef AARCH64_BI_ENDIAN
10076 #define OPTION_EB (OPTION_MD_BASE + 0)
10077 #define OPTION_EL (OPTION_MD_BASE + 1)
10078 #else
10079 #if TARGET_BYTES_BIG_ENDIAN
10080 #define OPTION_EB (OPTION_MD_BASE + 0)
10081 #else
10082 #define OPTION_EL (OPTION_MD_BASE + 1)
10083 #endif
10084 #endif
10085
10086 struct option md_longopts[] = {
10087 #ifdef OPTION_EB
10088 {"EB", no_argument, NULL, OPTION_EB},
10089 #endif
10090 #ifdef OPTION_EL
10091 {"EL", no_argument, NULL, OPTION_EL},
10092 #endif
10093 {NULL, no_argument, NULL, 0}
10094 };
10095
10096 size_t md_longopts_size = sizeof (md_longopts);
10097
10098 struct aarch64_option_table
10099 {
10100 const char *option; /* Option name to match. */
10101 const char *help; /* Help information. */
10102 int *var; /* Variable to change. */
10103 int value; /* What to change it to. */
10104 char *deprecated; /* If non-null, print this message. */
10105 };
10106
10107 static struct aarch64_option_table aarch64_opts[] = {
10108 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10109 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10110 NULL},
10111 #ifdef DEBUG_AARCH64
10112 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10113 #endif /* DEBUG_AARCH64 */
10114 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10115 NULL},
10116 {"mno-verbose-error", N_("do not output verbose error messages"),
10117 &verbose_error_p, 0, NULL},
10118 {NULL, NULL, NULL, 0, NULL}
10119 };
10120
10121 struct aarch64_cpu_option_table
10122 {
10123 const char *name;
10124 const aarch64_feature_set value;
10125 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10126 case. */
10127 const char *canonical_name;
10128 };
10129
10130 /* This list should, at a minimum, contain all the cpu names
10131 recognized by GCC. */
10132 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10133 {"all", AARCH64_ANY, NULL},
10134 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
10135 AARCH64_FEATURE_CRC), "Cortex-A34"},
10136 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
10137 AARCH64_FEATURE_CRC), "Cortex-A35"},
10138 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
10139 AARCH64_FEATURE_CRC), "Cortex-A53"},
10140 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
10141 AARCH64_FEATURE_CRC), "Cortex-A57"},
10142 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
10143 AARCH64_FEATURE_CRC), "Cortex-A72"},
10144 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
10145 AARCH64_FEATURE_CRC), "Cortex-A73"},
10146 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10147 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10148 "Cortex-A55"},
10149 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10150 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10151 "Cortex-A75"},
10152 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10153 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10154 "Cortex-A76"},
10155 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10156 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10157 | AARCH64_FEATURE_DOTPROD
10158 | AARCH64_FEATURE_SSBS),
10159 "Cortex-A76AE"},
10160 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10161 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10162 | AARCH64_FEATURE_DOTPROD
10163 | AARCH64_FEATURE_SSBS),
10164 "Cortex-A77"},
10165 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10166 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10167 | AARCH64_FEATURE_DOTPROD
10168 | AARCH64_FEATURE_SSBS),
10169 "Cortex-A65"},
10170 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10171 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10172 | AARCH64_FEATURE_DOTPROD
10173 | AARCH64_FEATURE_SSBS),
10174 "Cortex-A65AE"},
10175 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10176 AARCH64_FEATURE_F16
10177 | AARCH64_FEATURE_RCPC
10178 | AARCH64_FEATURE_DOTPROD
10179 | AARCH64_FEATURE_SSBS
10180 | AARCH64_FEATURE_PROFILE),
10181 "Cortex-A78"},
10182 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10183 AARCH64_FEATURE_F16
10184 | AARCH64_FEATURE_RCPC
10185 | AARCH64_FEATURE_DOTPROD
10186 | AARCH64_FEATURE_SSBS
10187 | AARCH64_FEATURE_PROFILE),
10188 "Cortex-A78AE"},
10189 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10190 AARCH64_FEATURE_DOTPROD
10191 | AARCH64_FEATURE_F16
10192 | AARCH64_FEATURE_FLAGM
10193 | AARCH64_FEATURE_PAC
10194 | AARCH64_FEATURE_PROFILE
10195 | AARCH64_FEATURE_RCPC
10196 | AARCH64_FEATURE_SSBS),
10197 "Cortex-A78C"},
10198 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
10199 AARCH64_FEATURE_BFLOAT16
10200 | AARCH64_FEATURE_I8MM
10201 | AARCH64_FEATURE_MEMTAG
10202 | AARCH64_FEATURE_SVE2_BITPERM),
10203 "Cortex-A510"},
10204 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
10205 AARCH64_FEATURE_BFLOAT16
10206 | AARCH64_FEATURE_I8MM
10207 | AARCH64_FEATURE_MEMTAG
10208 | AARCH64_FEATURE_SVE2_BITPERM),
10209 "Cortex-A710"},
10210 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10211 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10212 | AARCH64_FEATURE_DOTPROD
10213 | AARCH64_FEATURE_PROFILE),
10214 "Ares"},
10215 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
10216 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10217 "Samsung Exynos M1"},
10218 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
10219 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10220 | AARCH64_FEATURE_RDMA),
10221 "Qualcomm Falkor"},
10222 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10223 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10224 | AARCH64_FEATURE_DOTPROD
10225 | AARCH64_FEATURE_SSBS),
10226 "Neoverse E1"},
10227 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10228 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10229 | AARCH64_FEATURE_DOTPROD
10230 | AARCH64_FEATURE_PROFILE),
10231 "Neoverse N1"},
10232 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
10233 AARCH64_FEATURE_BFLOAT16
10234 | AARCH64_FEATURE_I8MM
10235 | AARCH64_FEATURE_F16
10236 | AARCH64_FEATURE_SVE
10237 | AARCH64_FEATURE_SVE2
10238 | AARCH64_FEATURE_SVE2_BITPERM
10239 | AARCH64_FEATURE_MEMTAG
10240 | AARCH64_FEATURE_RNG),
10241 "Neoverse N2"},
10242 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10243 AARCH64_FEATURE_PROFILE
10244 | AARCH64_FEATURE_CVADP
10245 | AARCH64_FEATURE_SVE
10246 | AARCH64_FEATURE_SSBS
10247 | AARCH64_FEATURE_RNG
10248 | AARCH64_FEATURE_F16
10249 | AARCH64_FEATURE_BFLOAT16
10250 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
10251 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10252 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10253 | AARCH64_FEATURE_RDMA),
10254 "Qualcomm QDF24XX"},
10255 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10256 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
10257 "Qualcomm Saphira"},
10258 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10259 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10260 "Cavium ThunderX"},
10261 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
10262 AARCH64_FEATURE_CRYPTO),
10263 "Broadcom Vulcan"},
10264 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10265 in earlier releases and is superseded by 'xgene1' in all
10266 tools. */
10267 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10268 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10269 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
10270 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
10271 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
10272 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10273 AARCH64_FEATURE_F16
10274 | AARCH64_FEATURE_RCPC
10275 | AARCH64_FEATURE_DOTPROD
10276 | AARCH64_FEATURE_SSBS
10277 | AARCH64_FEATURE_PROFILE),
10278 "Cortex-X1"},
10279 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10280 AARCH64_FEATURE_BFLOAT16
10281 | AARCH64_FEATURE_I8MM
10282 | AARCH64_FEATURE_MEMTAG
10283 | AARCH64_FEATURE_SVE2_BITPERM),
10284 "Cortex-X2"},
10285 {"generic", AARCH64_ARCH_V8, NULL},
10286
10287 {NULL, AARCH64_ARCH_NONE, NULL}
10288 };
10289
10290 struct aarch64_arch_option_table
10291 {
10292 const char *name;
10293 const aarch64_feature_set value;
10294 };
10295
10296 /* This list should, at a minimum, contain all the architecture names
10297 recognized by GCC. */
10298 static const struct aarch64_arch_option_table aarch64_archs[] = {
10299 {"all", AARCH64_ANY},
10300 {"armv8-a", AARCH64_ARCH_V8},
10301 {"armv8.1-a", AARCH64_ARCH_V8_1},
10302 {"armv8.2-a", AARCH64_ARCH_V8_2},
10303 {"armv8.3-a", AARCH64_ARCH_V8_3},
10304 {"armv8.4-a", AARCH64_ARCH_V8_4},
10305 {"armv8.5-a", AARCH64_ARCH_V8_5},
10306 {"armv8.6-a", AARCH64_ARCH_V8_6},
10307 {"armv8.7-a", AARCH64_ARCH_V8_7},
10308 {"armv8.8-a", AARCH64_ARCH_V8_8},
10309 {"armv8-r", AARCH64_ARCH_V8_R},
10310 {"armv9-a", AARCH64_ARCH_V9},
10311 {"armv9.1-a", AARCH64_ARCH_V9_1},
10312 {"armv9.2-a", AARCH64_ARCH_V9_2},
10313 {"armv9.3-a", AARCH64_ARCH_V9_3},
10314 {NULL, AARCH64_ARCH_NONE}
10315 };
10316
10317 /* ISA extensions. */
10318 struct aarch64_option_cpu_value_table
10319 {
10320 const char *name;
10321 const aarch64_feature_set value;
10322 const aarch64_feature_set require; /* Feature dependencies. */
10323 };
10324
10325 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10326 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10327 AARCH64_ARCH_NONE},
10328 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10329 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10330 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10331 AARCH64_ARCH_NONE},
10332 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10333 AARCH64_ARCH_NONE},
10334 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10335 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10336 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10337 AARCH64_ARCH_NONE},
10338 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10339 AARCH64_ARCH_NONE},
10340 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10341 AARCH64_ARCH_NONE},
10342 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10343 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10344 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10345 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10346 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10347 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10348 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10349 AARCH64_ARCH_NONE},
10350 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10351 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10352 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10353 AARCH64_ARCH_NONE},
10354 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10355 AARCH64_FEATURE (AARCH64_FEATURE_F16
10356 | AARCH64_FEATURE_SIMD, 0)},
10357 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10358 AARCH64_ARCH_NONE},
10359 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10360 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10361 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10362 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10363 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10364 AARCH64_ARCH_NONE},
10365 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10366 AARCH64_ARCH_NONE},
10367 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10368 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10369 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10370 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10371 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10372 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10373 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10374 AARCH64_ARCH_NONE},
10375 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10376 AARCH64_ARCH_NONE},
10377 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10378 AARCH64_ARCH_NONE},
10379 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10380 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10381 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10382 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10383 | AARCH64_FEATURE_SM4, 0)},
10384 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10385 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10386 | AARCH64_FEATURE_AES, 0)},
10387 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10388 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10389 | AARCH64_FEATURE_SHA3, 0)},
10390 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10391 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10392 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10393 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10394 | AARCH64_FEATURE_BFLOAT16, 0)},
10395 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10396 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10397 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10398 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10399 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10400 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10401 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10402 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10403 {"sme2", AARCH64_FEATURE (AARCH64_FEATURE_SME2, 0),
10404 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10405 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10406 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10407 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10408 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10409 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10410 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10411 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10412 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10413 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10414 AARCH64_ARCH_NONE},
10415 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10416 AARCH64_ARCH_NONE},
10417 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10418 AARCH64_ARCH_NONE},
10419 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10420 AARCH64_ARCH_NONE},
10421 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10422 AARCH64_ARCH_NONE},
10423 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10424 AARCH64_ARCH_NONE},
10425 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10426 };
10427
10428 struct aarch64_long_option_table
10429 {
10430 const char *option; /* Substring to match. */
10431 const char *help; /* Help information. */
10432 int (*func) (const char *subopt); /* Function to decode sub-option. */
10433 char *deprecated; /* If non-null, print this message. */
10434 };
10435
10436 /* Transitive closure of features depending on set. */
10437 static aarch64_feature_set
10438 aarch64_feature_disable_set (aarch64_feature_set set)
10439 {
10440 const struct aarch64_option_cpu_value_table *opt;
10441 aarch64_feature_set prev = 0;
10442
10443 while (prev != set) {
10444 prev = set;
10445 for (opt = aarch64_features; opt->name != NULL; opt++)
10446 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10447 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10448 }
10449 return set;
10450 }
10451
10452 /* Transitive closure of dependencies of set. */
10453 static aarch64_feature_set
10454 aarch64_feature_enable_set (aarch64_feature_set set)
10455 {
10456 const struct aarch64_option_cpu_value_table *opt;
10457 aarch64_feature_set prev = 0;
10458
10459 while (prev != set) {
10460 prev = set;
10461 for (opt = aarch64_features; opt->name != NULL; opt++)
10462 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10463 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10464 }
10465 return set;
10466 }
10467
10468 static int
10469 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10470 bool ext_only)
10471 {
10472 /* We insist on extensions being added before being removed. We achieve
10473 this by using the ADDING_VALUE variable to indicate whether we are
10474 adding an extension (1) or removing it (0) and only allowing it to
10475 change in the order -1 -> 1 -> 0. */
10476 int adding_value = -1;
10477 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10478
10479 /* Copy the feature set, so that we can modify it. */
10480 *ext_set = **opt_p;
10481 *opt_p = ext_set;
10482
10483 while (str != NULL && *str != 0)
10484 {
10485 const struct aarch64_option_cpu_value_table *opt;
10486 const char *ext = NULL;
10487 int optlen;
10488
10489 if (!ext_only)
10490 {
10491 if (*str != '+')
10492 {
10493 as_bad (_("invalid architectural extension"));
10494 return 0;
10495 }
10496
10497 ext = strchr (++str, '+');
10498 }
10499
10500 if (ext != NULL)
10501 optlen = ext - str;
10502 else
10503 optlen = strlen (str);
10504
10505 if (optlen >= 2 && startswith (str, "no"))
10506 {
10507 if (adding_value != 0)
10508 adding_value = 0;
10509 optlen -= 2;
10510 str += 2;
10511 }
10512 else if (optlen > 0)
10513 {
10514 if (adding_value == -1)
10515 adding_value = 1;
10516 else if (adding_value != 1)
10517 {
10518 as_bad (_("must specify extensions to add before specifying "
10519 "those to remove"));
10520 return false;
10521 }
10522 }
10523
10524 if (optlen == 0)
10525 {
10526 as_bad (_("missing architectural extension"));
10527 return 0;
10528 }
10529
10530 gas_assert (adding_value != -1);
10531
10532 for (opt = aarch64_features; opt->name != NULL; opt++)
10533 if (strncmp (opt->name, str, optlen) == 0)
10534 {
10535 aarch64_feature_set set;
10536
10537 /* Add or remove the extension. */
10538 if (adding_value)
10539 {
10540 set = aarch64_feature_enable_set (opt->value);
10541 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10542 }
10543 else
10544 {
10545 set = aarch64_feature_disable_set (opt->value);
10546 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10547 }
10548 break;
10549 }
10550
10551 if (opt->name == NULL)
10552 {
10553 as_bad (_("unknown architectural extension `%s'"), str);
10554 return 0;
10555 }
10556
10557 str = ext;
10558 };
10559
10560 return 1;
10561 }
10562
10563 static int
10564 aarch64_parse_cpu (const char *str)
10565 {
10566 const struct aarch64_cpu_option_table *opt;
10567 const char *ext = strchr (str, '+');
10568 size_t optlen;
10569
10570 if (ext != NULL)
10571 optlen = ext - str;
10572 else
10573 optlen = strlen (str);
10574
10575 if (optlen == 0)
10576 {
10577 as_bad (_("missing cpu name `%s'"), str);
10578 return 0;
10579 }
10580
10581 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10582 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10583 {
10584 mcpu_cpu_opt = &opt->value;
10585 if (ext != NULL)
10586 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10587
10588 return 1;
10589 }
10590
10591 as_bad (_("unknown cpu `%s'"), str);
10592 return 0;
10593 }
10594
10595 static int
10596 aarch64_parse_arch (const char *str)
10597 {
10598 const struct aarch64_arch_option_table *opt;
10599 const char *ext = strchr (str, '+');
10600 size_t optlen;
10601
10602 if (ext != NULL)
10603 optlen = ext - str;
10604 else
10605 optlen = strlen (str);
10606
10607 if (optlen == 0)
10608 {
10609 as_bad (_("missing architecture name `%s'"), str);
10610 return 0;
10611 }
10612
10613 for (opt = aarch64_archs; opt->name != NULL; opt++)
10614 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10615 {
10616 march_cpu_opt = &opt->value;
10617 if (ext != NULL)
10618 return aarch64_parse_features (ext, &march_cpu_opt, false);
10619
10620 return 1;
10621 }
10622
10623 as_bad (_("unknown architecture `%s'\n"), str);
10624 return 0;
10625 }
10626
10627 /* ABIs. */
10628 struct aarch64_option_abi_value_table
10629 {
10630 const char *name;
10631 enum aarch64_abi_type value;
10632 };
10633
10634 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10635 #ifdef OBJ_ELF
10636 {"ilp32", AARCH64_ABI_ILP32},
10637 {"lp64", AARCH64_ABI_LP64},
10638 #else
10639 {"llp64", AARCH64_ABI_LLP64},
10640 #endif
10641 };
10642
10643 static int
10644 aarch64_parse_abi (const char *str)
10645 {
10646 unsigned int i;
10647
10648 if (str[0] == '\0')
10649 {
10650 as_bad (_("missing abi name `%s'"), str);
10651 return 0;
10652 }
10653
10654 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10655 if (strcmp (str, aarch64_abis[i].name) == 0)
10656 {
10657 aarch64_abi = aarch64_abis[i].value;
10658 return 1;
10659 }
10660
10661 as_bad (_("unknown abi `%s'\n"), str);
10662 return 0;
10663 }
10664
10665 static struct aarch64_long_option_table aarch64_long_opts[] = {
10666 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10667 aarch64_parse_abi, NULL},
10668 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10669 aarch64_parse_cpu, NULL},
10670 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10671 aarch64_parse_arch, NULL},
10672 {NULL, NULL, 0, NULL}
10673 };
10674
10675 int
10676 md_parse_option (int c, const char *arg)
10677 {
10678 struct aarch64_option_table *opt;
10679 struct aarch64_long_option_table *lopt;
10680
10681 switch (c)
10682 {
10683 #ifdef OPTION_EB
10684 case OPTION_EB:
10685 target_big_endian = 1;
10686 break;
10687 #endif
10688
10689 #ifdef OPTION_EL
10690 case OPTION_EL:
10691 target_big_endian = 0;
10692 break;
10693 #endif
10694
10695 case 'a':
10696 /* Listing option. Just ignore these, we don't support additional
10697 ones. */
10698 return 0;
10699
10700 default:
10701 for (opt = aarch64_opts; opt->option != NULL; opt++)
10702 {
10703 if (c == opt->option[0]
10704 && ((arg == NULL && opt->option[1] == 0)
10705 || streq (arg, opt->option + 1)))
10706 {
10707 /* If the option is deprecated, tell the user. */
10708 if (opt->deprecated != NULL)
10709 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10710 arg ? arg : "", _(opt->deprecated));
10711
10712 if (opt->var != NULL)
10713 *opt->var = opt->value;
10714
10715 return 1;
10716 }
10717 }
10718
10719 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10720 {
10721 /* These options are expected to have an argument. */
10722 if (c == lopt->option[0]
10723 && arg != NULL
10724 && startswith (arg, lopt->option + 1))
10725 {
10726 /* If the option is deprecated, tell the user. */
10727 if (lopt->deprecated != NULL)
10728 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10729 _(lopt->deprecated));
10730
10731 /* Call the sup-option parser. */
10732 return lopt->func (arg + strlen (lopt->option) - 1);
10733 }
10734 }
10735
10736 return 0;
10737 }
10738
10739 return 1;
10740 }
10741
10742 void
10743 md_show_usage (FILE * fp)
10744 {
10745 struct aarch64_option_table *opt;
10746 struct aarch64_long_option_table *lopt;
10747
10748 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10749
10750 for (opt = aarch64_opts; opt->option != NULL; opt++)
10751 if (opt->help != NULL)
10752 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10753
10754 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10755 if (lopt->help != NULL)
10756 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10757
10758 #ifdef OPTION_EB
10759 fprintf (fp, _("\
10760 -EB assemble code for a big-endian cpu\n"));
10761 #endif
10762
10763 #ifdef OPTION_EL
10764 fprintf (fp, _("\
10765 -EL assemble code for a little-endian cpu\n"));
10766 #endif
10767 }
10768
10769 /* Parse a .cpu directive. */
10770
10771 static void
10772 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10773 {
10774 const struct aarch64_cpu_option_table *opt;
10775 char saved_char;
10776 char *name;
10777 char *ext;
10778 size_t optlen;
10779
10780 name = input_line_pointer;
10781 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10782 saved_char = *input_line_pointer;
10783 *input_line_pointer = 0;
10784
10785 ext = strchr (name, '+');
10786
10787 if (ext != NULL)
10788 optlen = ext - name;
10789 else
10790 optlen = strlen (name);
10791
10792 /* Skip the first "all" entry. */
10793 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10794 if (strlen (opt->name) == optlen
10795 && strncmp (name, opt->name, optlen) == 0)
10796 {
10797 mcpu_cpu_opt = &opt->value;
10798 if (ext != NULL)
10799 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10800 return;
10801
10802 cpu_variant = *mcpu_cpu_opt;
10803
10804 *input_line_pointer = saved_char;
10805 demand_empty_rest_of_line ();
10806 return;
10807 }
10808 as_bad (_("unknown cpu `%s'"), name);
10809 *input_line_pointer = saved_char;
10810 ignore_rest_of_line ();
10811 }
10812
10813
10814 /* Parse a .arch directive. */
10815
10816 static void
10817 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10818 {
10819 const struct aarch64_arch_option_table *opt;
10820 char saved_char;
10821 char *name;
10822 char *ext;
10823 size_t optlen;
10824
10825 name = input_line_pointer;
10826 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10827 saved_char = *input_line_pointer;
10828 *input_line_pointer = 0;
10829
10830 ext = strchr (name, '+');
10831
10832 if (ext != NULL)
10833 optlen = ext - name;
10834 else
10835 optlen = strlen (name);
10836
10837 /* Skip the first "all" entry. */
10838 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10839 if (strlen (opt->name) == optlen
10840 && strncmp (name, opt->name, optlen) == 0)
10841 {
10842 mcpu_cpu_opt = &opt->value;
10843 if (ext != NULL)
10844 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10845 return;
10846
10847 cpu_variant = *mcpu_cpu_opt;
10848
10849 *input_line_pointer = saved_char;
10850 demand_empty_rest_of_line ();
10851 return;
10852 }
10853
10854 as_bad (_("unknown architecture `%s'\n"), name);
10855 *input_line_pointer = saved_char;
10856 ignore_rest_of_line ();
10857 }
10858
10859 /* Parse a .arch_extension directive. */
10860
10861 static void
10862 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10863 {
10864 char saved_char;
10865 char *ext = input_line_pointer;
10866
10867 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10868 saved_char = *input_line_pointer;
10869 *input_line_pointer = 0;
10870
10871 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10872 return;
10873
10874 cpu_variant = *mcpu_cpu_opt;
10875
10876 *input_line_pointer = saved_char;
10877 demand_empty_rest_of_line ();
10878 }
10879
10880 /* Copy symbol information. */
10881
10882 void
10883 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10884 {
10885 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10886 }
10887
10888 #ifdef OBJ_ELF
10889 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10890 This is needed so AArch64 specific st_other values can be independently
10891 specified for an IFUNC resolver (that is called by the dynamic linker)
10892 and the symbol it resolves (aliased to the resolver). In particular,
10893 if a function symbol has special st_other value set via directives,
10894 then attaching an IFUNC resolver to that symbol should not override
10895 the st_other setting. Requiring the directive on the IFUNC resolver
10896 symbol would be unexpected and problematic in C code, where the two
10897 symbols appear as two independent function declarations. */
10898
10899 void
10900 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10901 {
10902 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10903 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10904 /* If size is unset, copy size from src. Because we don't track whether
10905 .size has been used, we can't differentiate .size dest, 0 from the case
10906 where dest's size is unset. */
10907 if (!destelf->size && S_GET_SIZE (dest) == 0)
10908 {
10909 if (srcelf->size)
10910 {
10911 destelf->size = XNEW (expressionS);
10912 *destelf->size = *srcelf->size;
10913 }
10914 S_SET_SIZE (dest, S_GET_SIZE (src));
10915 }
10916 }
10917 #endif