]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Add +rcpc2 flag for existing instructions
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 #define streq(a, b) (strcmp (a, b) == 0)
42
43 #define END_OF_INSN '\0'
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = AARCH64_ARCH_FEATURES (V8A);
55
56 /* Currently active instruction sequence. */
57 static aarch64_instr_sequence *insn_sequence = NULL;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62 #endif
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_NONE = 0,
68 AARCH64_ABI_LP64 = 1,
69 AARCH64_ABI_ILP32 = 2,
70 AARCH64_ABI_LLP64 = 3
71 };
72
73 unsigned int aarch64_sframe_cfa_sp_reg;
74 /* The other CFA base register for SFrame stack trace info. */
75 unsigned int aarch64_sframe_cfa_fp_reg;
76 unsigned int aarch64_sframe_cfa_ra_reg;
77
78 #ifndef DEFAULT_ARCH
79 #define DEFAULT_ARCH "aarch64"
80 #endif
81
82 #ifdef OBJ_ELF
83 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
84 static const char *default_arch = DEFAULT_ARCH;
85 #endif
86
87 /* AArch64 ABI for the output file. */
88 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
89
90 /* When non-zero, program to a 32-bit model, in which the C data types
91 int, long and all pointer types are 32-bit objects (ILP32); or to a
92 64-bit model, in which the C int type is 32-bits but the C long type
93 and all pointer types are 64-bit objects (LP64). */
94 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
95
96 /* When non zero, C types int and long are 32 bit,
97 pointers, however are 64 bit */
98 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
99
100 enum vector_el_type
101 {
102 NT_invtype = -1,
103 NT_b,
104 NT_h,
105 NT_s,
106 NT_d,
107 NT_q,
108 NT_zero,
109 NT_merge
110 };
111
112 /* Bits for DEFINED field in vector_type_el. */
113 #define NTA_HASTYPE 1
114 #define NTA_HASINDEX 2
115 #define NTA_HASVARWIDTH 4
116
117 struct vector_type_el
118 {
119 enum vector_el_type type;
120 unsigned char defined;
121 unsigned element_size;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 aarch64_operand_error parsing_error;
144 /* The condition that appears in the assembly line. */
145 int cond;
146 /* Relocation information (including the GAS internal fixup). */
147 struct reloc reloc;
148 /* Need to generate an immediate in the literal pool. */
149 unsigned gen_lit_pool : 1;
150 };
151
152 typedef struct aarch64_instruction aarch64_instruction;
153
154 static aarch64_instruction inst;
155
156 static bool parse_operands (char *, const aarch64_opcode *);
157 static bool programmer_friendly_fixup (aarch64_instruction *);
158
159 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
160 data fields contain the following information:
161
162 data[0].i:
163 A mask of register types that would have been acceptable as bare
164 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
165 is set if a general parsing error occured for an operand (that is,
166 an error not related to registers, and having no error string).
167
168 data[1].i:
169 A mask of register types that would have been acceptable inside
170 a register list. In addition, SEF_IN_REGLIST is set if the
171 operand contained a '{' and if we got to the point of trying
172 to parse a register inside a list.
173
174 data[2].i:
175 The mask associated with the register that was actually seen, or 0
176 if none. A nonzero value describes a register inside a register
177 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
178 register.
179
180 The idea is that stringless errors from multiple opcode templates can
181 be ORed together to give a summary of the available alternatives. */
182 #define SEF_DEFAULT_ERROR (1U << 31)
183 #define SEF_IN_REGLIST (1U << 31)
184
185 /* Diagnostics inline function utilities.
186
187 These are lightweight utilities which should only be called by parse_operands
188 and other parsers. GAS processes each assembly line by parsing it against
189 instruction template(s), in the case of multiple templates (for the same
190 mnemonic name), those templates are tried one by one until one succeeds or
191 all fail. An assembly line may fail a few templates before being
192 successfully parsed; an error saved here in most cases is not a user error
193 but an error indicating the current template is not the right template.
194 Therefore it is very important that errors can be saved at a low cost during
195 the parsing; we don't want to slow down the whole parsing by recording
196 non-user errors in detail.
197
198 Remember that the objective is to help GAS pick up the most appropriate
199 error message in the case of multiple templates, e.g. FMOV which has 8
200 templates. */
201
202 static inline void
203 clear_error (void)
204 {
205 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
206 inst.parsing_error.kind = AARCH64_OPDE_NIL;
207 }
208
209 static inline bool
210 error_p (void)
211 {
212 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
213 }
214
215 static inline void
216 set_error (enum aarch64_operand_error_kind kind, const char *error)
217 {
218 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
219 inst.parsing_error.index = -1;
220 inst.parsing_error.kind = kind;
221 inst.parsing_error.error = error;
222 }
223
224 static inline void
225 set_recoverable_error (const char *error)
226 {
227 set_error (AARCH64_OPDE_RECOVERABLE, error);
228 }
229
230 /* Use the DESC field of the corresponding aarch64_operand entry to compose
231 the error message. */
232 static inline void
233 set_default_error (void)
234 {
235 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
236 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
237 }
238
239 static inline void
240 set_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
243 }
244
245 static inline void
246 set_first_syntax_error (const char *error)
247 {
248 if (! error_p ())
249 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
250 }
251
252 static inline void
253 set_fatal_syntax_error (const char *error)
254 {
255 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
256 }
257 \f
258 /* Return value for certain parsers when the parsing fails; those parsers
259 return the information of the parsed result, e.g. register number, on
260 success. */
261 #define PARSE_FAIL -1
262
263 /* This is an invalid condition code that means no conditional field is
264 present. */
265 #define COND_ALWAYS 0x10
266
267 typedef struct
268 {
269 const char *template;
270 uint32_t value;
271 } asm_nzcv;
272
273 struct reloc_entry
274 {
275 char *name;
276 bfd_reloc_code_real_type reloc;
277 };
278
279 /* Macros to define the register types and masks for the purpose
280 of parsing. */
281
282 #undef AARCH64_REG_TYPES
283 #define AARCH64_REG_TYPES \
284 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
285 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
286 BASIC_REG_TYPE(SP_32) /* wsp */ \
287 BASIC_REG_TYPE(SP_64) /* sp */ \
288 BASIC_REG_TYPE(ZR_32) /* wzr */ \
289 BASIC_REG_TYPE(ZR_64) /* xzr */ \
290 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
291 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
292 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
293 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
294 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
295 BASIC_REG_TYPE(V) /* v[0-31] */ \
296 BASIC_REG_TYPE(Z) /* z[0-31] */ \
297 BASIC_REG_TYPE(P) /* p[0-15] */ \
298 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
299 BASIC_REG_TYPE(ZA) /* za */ \
300 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
301 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
302 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
303 BASIC_REG_TYPE(ZT0) /* zt0 */ \
304 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
305 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
306 /* Typecheck: same, plus SVE registers. */ \
307 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z)) \
309 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
310 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
312 /* Typecheck: same, plus SVE registers. */ \
313 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
315 | REG_TYPE(Z)) \
316 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
317 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
319 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
320 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
323 /* Typecheck: any [BHSDQ]P FP. */ \
324 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
325 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
326 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
327 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
329 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
330 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
331 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
332 be used for SVE instructions, since Zn and Pn are valid symbols \
333 in other contexts. */ \
334 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
335 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
336 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
337 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
338 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
339 | REG_TYPE(Z) | REG_TYPE(P)) \
340 /* Likewise, but with predicate-as-counter registers added. */ \
341 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
342 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
343 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
344 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
345 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
346 | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
347 /* Any integer register; used for error messages only. */ \
348 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
349 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
350 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
351 /* Any vector register. */ \
352 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
353 /* An SVE vector or predicate register. */ \
354 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
355 /* Any vector or predicate register. */ \
356 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
357 /* The whole of ZA or a single tile. */ \
358 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
359 /* A horizontal or vertical slice of a ZA tile. */ \
360 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
361 /* Pseudo type to mark the end of the enumerator sequence. */ \
362 END_REG_TYPE(MAX)
363
364 #undef BASIC_REG_TYPE
365 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
366 #undef MULTI_REG_TYPE
367 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
368 #undef END_REG_TYPE
369 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
370
371 /* Register type enumerators. */
372 typedef enum aarch64_reg_type_
373 {
374 /* A list of REG_TYPE_*. */
375 AARCH64_REG_TYPES
376 } aarch64_reg_type;
377
378 #undef BASIC_REG_TYPE
379 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
380 #undef REG_TYPE
381 #define REG_TYPE(T) (1 << REG_TYPE_##T)
382 #undef MULTI_REG_TYPE
383 #define MULTI_REG_TYPE(T,V) V,
384 #undef END_REG_TYPE
385 #define END_REG_TYPE(T) 0
386
387 /* Structure for a hash table entry for a register. */
388 typedef struct
389 {
390 const char *name;
391 unsigned char number;
392 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
393 unsigned char builtin;
394 } reg_entry;
395
396 /* Values indexed by aarch64_reg_type to assist the type checking. */
397 static const unsigned reg_type_masks[] =
398 {
399 AARCH64_REG_TYPES
400 };
401
402 #undef BASIC_REG_TYPE
403 #undef REG_TYPE
404 #undef MULTI_REG_TYPE
405 #undef END_REG_TYPE
406 #undef AARCH64_REG_TYPES
407
408 /* We expected one of the registers in MASK to be specified. If a register
409 of some kind was specified, SEEN is a mask that contains that register,
410 otherwise it is zero.
411
412 If it is possible to provide a relatively pithy message that describes
413 the error exactly, return a string that does so, reporting the error
414 against "operand %d". Return null otherwise.
415
416 From a QoI perspective, any REG_TYPE_* that is passed as the first
417 argument to set_expected_reg_error should generally have its own message.
418 Providing messages for combinations of such REG_TYPE_*s can be useful if
419 it is possible to summarize the combination in a relatively natural way.
420 On the other hand, it seems better to avoid long lists of unrelated
421 things. */
422
423 static const char *
424 get_reg_expected_msg (unsigned int mask, unsigned int seen)
425 {
426 /* First handle messages that use SEEN. */
427 if ((mask & reg_type_masks[REG_TYPE_ZAT])
428 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
429 return N_("expected an unsuffixed ZA tile at operand %d");
430
431 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
432 && (seen & reg_type_masks[REG_TYPE_ZAT]))
433 return N_("missing horizontal or vertical suffix at operand %d");
434
435 if ((mask & reg_type_masks[REG_TYPE_ZA])
436 && (seen & (reg_type_masks[REG_TYPE_ZAT]
437 | reg_type_masks[REG_TYPE_ZATHV])))
438 return N_("expected 'za' rather than a ZA tile at operand %d");
439
440 if ((mask & reg_type_masks[REG_TYPE_PN])
441 && (seen & reg_type_masks[REG_TYPE_P]))
442 return N_("expected a predicate-as-counter rather than predicate-as-mask"
443 " register at operand %d");
444
445 if ((mask & reg_type_masks[REG_TYPE_P])
446 && (seen & reg_type_masks[REG_TYPE_PN]))
447 return N_("expected a predicate-as-mask rather than predicate-as-counter"
448 " register at operand %d");
449
450 /* Integer, zero and stack registers. */
451 if (mask == reg_type_masks[REG_TYPE_R_64])
452 return N_("expected a 64-bit integer register at operand %d");
453 if (mask == reg_type_masks[REG_TYPE_R_ZR])
454 return N_("expected an integer or zero register at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_R_SP])
456 return N_("expected an integer or stack pointer register at operand %d");
457
458 /* Floating-point and SIMD registers. */
459 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
460 return N_("expected a scalar SIMD or floating-point register"
461 " at operand %d");
462 if (mask == reg_type_masks[REG_TYPE_V])
463 return N_("expected an Advanced SIMD vector register at operand %d");
464 if (mask == reg_type_masks[REG_TYPE_Z])
465 return N_("expected an SVE vector register at operand %d");
466 if (mask == reg_type_masks[REG_TYPE_P]
467 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
468 /* Use this error for "predicate-as-mask only" and "either kind of
469 predicate". We report a more specific error if P is used where
470 PN is expected, and vice versa, so the issue at this point is
471 "predicate-like" vs. "not predicate-like". */
472 return N_("expected an SVE predicate register at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_PN])
474 return N_("expected an SVE predicate-as-counter register at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_VZ])
476 return N_("expected a vector register at operand %d");
477 if (mask == reg_type_masks[REG_TYPE_ZP])
478 return N_("expected an SVE vector or predicate register at operand %d");
479 if (mask == reg_type_masks[REG_TYPE_VZP])
480 return N_("expected a vector or predicate register at operand %d");
481
482 /* SME-related registers. */
483 if (mask == reg_type_masks[REG_TYPE_ZA])
484 return N_("expected a ZA array vector at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
486 return N_("expected ZT0 or a ZA mask at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_ZAT])
488 return N_("expected a ZA tile at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZATHV])
490 return N_("expected a ZA tile slice at operand %d");
491
492 /* Integer and vector combos. */
493 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
494 return N_("expected an integer register or Advanced SIMD vector register"
495 " at operand %d");
496 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
497 return N_("expected an integer register or SVE vector register"
498 " at operand %d");
499 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
500 return N_("expected an integer or vector register at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
502 return N_("expected an integer or predicate register at operand %d");
503 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
504 return N_("expected an integer, vector or predicate register"
505 " at operand %d");
506
507 /* SVE and SME combos. */
508 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
509 return N_("expected an SVE vector register or ZA tile slice"
510 " at operand %d");
511
512 return NULL;
513 }
514
515 /* Record that we expected a register of type TYPE but didn't see one.
516 REG is the register that we actually saw, or null if we didn't see a
517 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
518 contents of a register list, otherwise it is zero. */
519
520 static inline void
521 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
522 unsigned int flags)
523 {
524 assert (flags == 0 || flags == SEF_IN_REGLIST);
525 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
526 if (flags & SEF_IN_REGLIST)
527 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
528 else
529 inst.parsing_error.data[0].i = reg_type_masks[type];
530 if (reg)
531 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
532 }
533
534 /* Record that we expected a register list containing registers of type TYPE,
535 but didn't see the opening '{'. If we saw a register instead, REG is the
536 register that we saw, otherwise it is null. */
537
538 static inline void
539 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
540 {
541 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
542 inst.parsing_error.data[1].i = reg_type_masks[type];
543 if (reg)
544 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
545 }
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 31
549 #define REG_ZR 31
550
551 /* Instructions take 4 bytes in the object file. */
552 #define INSN_SIZE 4
553
554 static htab_t aarch64_ops_hsh;
555 static htab_t aarch64_cond_hsh;
556 static htab_t aarch64_shift_hsh;
557 static htab_t aarch64_sys_regs_hsh;
558 static htab_t aarch64_pstatefield_hsh;
559 static htab_t aarch64_sys_regs_ic_hsh;
560 static htab_t aarch64_sys_regs_dc_hsh;
561 static htab_t aarch64_sys_regs_at_hsh;
562 static htab_t aarch64_sys_regs_tlbi_hsh;
563 static htab_t aarch64_sys_regs_sr_hsh;
564 static htab_t aarch64_reg_hsh;
565 static htab_t aarch64_barrier_opt_hsh;
566 static htab_t aarch64_nzcv_hsh;
567 static htab_t aarch64_pldop_hsh;
568 static htab_t aarch64_hint_opt_hsh;
569
570 /* Stuff needed to resolve the label ambiguity
571 As:
572 ...
573 label: <insn>
574 may differ from:
575 ...
576 label:
577 <insn> */
578
579 static symbolS *last_label_seen;
580
581 /* Literal pool structure. Held on a per-section
582 and per-sub-section basis. */
583
584 #define MAX_LITERAL_POOL_SIZE 1024
585 typedef struct literal_expression
586 {
587 expressionS exp;
588 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
589 LITTLENUM_TYPE * bignum;
590 } literal_expression;
591
592 typedef struct literal_pool
593 {
594 literal_expression literals[MAX_LITERAL_POOL_SIZE];
595 unsigned int next_free_entry;
596 unsigned int id;
597 symbolS *symbol;
598 segT section;
599 subsegT sub_section;
600 int size;
601 struct literal_pool *next;
602 } literal_pool;
603
604 /* Pointer to a linked list of literal pools. */
605 static literal_pool *list_of_pools = NULL;
606 \f
607 /* Pure syntax. */
608
609 /* This array holds the chars that always start a comment. If the
610 pre-processor is disabled, these aren't very useful. */
611 const char comment_chars[] = "";
612
613 /* This array holds the chars that only start a comment at the beginning of
614 a line. If the line seems to have the form '# 123 filename'
615 .line and .file directives will appear in the pre-processed output. */
616 /* Note that input_file.c hand checks for '#' at the beginning of the
617 first line of the input file. This is because the compiler outputs
618 #NO_APP at the beginning of its output. */
619 /* Also note that comments like this one will always work. */
620 const char line_comment_chars[] = "#";
621
622 const char line_separator_chars[] = ";";
623
624 /* Chars that can be used to separate mant
625 from exp in floating point numbers. */
626 const char EXP_CHARS[] = "eE";
627
628 /* Chars that mean this number is a floating point constant. */
629 /* As in 0f12.456 */
630 /* or 0d1.2345e12 */
631
632 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
633
634 /* Prefix character that indicates the start of an immediate value. */
635 #define is_immediate_prefix(C) ((C) == '#')
636
637 /* Separator character handling. */
638
639 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
640
641 static inline bool
642 skip_past_char (char **str, char c)
643 {
644 if (**str == c)
645 {
646 (*str)++;
647 return true;
648 }
649 else
650 return false;
651 }
652
653 #define skip_past_comma(str) skip_past_char (str, ',')
654
655 /* Arithmetic expressions (possibly involving symbols). */
656
657 static bool in_aarch64_get_expression = false;
658
659 /* Third argument to aarch64_get_expression. */
660 #define GE_NO_PREFIX false
661 #define GE_OPT_PREFIX true
662
663 /* Fourth argument to aarch64_get_expression. */
664 #define ALLOW_ABSENT false
665 #define REJECT_ABSENT true
666
667 /* Return TRUE if the string pointed by *STR is successfully parsed
668 as an valid expression; *EP will be filled with the information of
669 such an expression. Otherwise return FALSE.
670
671 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
672 If REJECT_ABSENT is true then trat missing expressions as an error. */
673
674 static bool
675 aarch64_get_expression (expressionS * ep,
676 char ** str,
677 bool allow_immediate_prefix,
678 bool reject_absent)
679 {
680 char *save_in;
681 segT seg;
682 bool prefix_present = false;
683
684 if (allow_immediate_prefix)
685 {
686 if (is_immediate_prefix (**str))
687 {
688 (*str)++;
689 prefix_present = true;
690 }
691 }
692
693 memset (ep, 0, sizeof (expressionS));
694
695 save_in = input_line_pointer;
696 input_line_pointer = *str;
697 in_aarch64_get_expression = true;
698 seg = expression (ep);
699 in_aarch64_get_expression = false;
700
701 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
702 {
703 /* We found a bad expression in md_operand(). */
704 *str = input_line_pointer;
705 input_line_pointer = save_in;
706 if (prefix_present && ! error_p ())
707 set_fatal_syntax_error (_("bad expression"));
708 else
709 set_first_syntax_error (_("bad expression"));
710 return false;
711 }
712
713 #ifdef OBJ_AOUT
714 if (seg != absolute_section
715 && seg != text_section
716 && seg != data_section
717 && seg != bss_section
718 && seg != undefined_section)
719 {
720 set_syntax_error (_("bad segment"));
721 *str = input_line_pointer;
722 input_line_pointer = save_in;
723 return false;
724 }
725 #else
726 (void) seg;
727 #endif
728
729 *str = input_line_pointer;
730 input_line_pointer = save_in;
731 return true;
732 }
733
734 /* Turn a string in input_line_pointer into a floating point constant
735 of type TYPE, and store the appropriate bytes in *LITP. The number
736 of LITTLENUMS emitted is stored in *SIZEP. An error message is
737 returned, or NULL on OK. */
738
739 const char *
740 md_atof (int type, char *litP, int *sizeP)
741 {
742 return ieee_md_atof (type, litP, sizeP, target_big_endian);
743 }
744
745 /* We handle all bad expressions here, so that we can report the faulty
746 instruction in the error message. */
747 void
748 md_operand (expressionS * exp)
749 {
750 if (in_aarch64_get_expression)
751 exp->X_op = O_illegal;
752 }
753
754 /* Immediate values. */
755
756 /* Errors may be set multiple times during parsing or bit encoding
757 (particularly in the Neon bits), but usually the earliest error which is set
758 will be the most meaningful. Avoid overwriting it with later (cascading)
759 errors by calling this function. */
760
761 static void
762 first_error (const char *error)
763 {
764 if (! error_p ())
765 set_syntax_error (error);
766 }
767
768 /* Similar to first_error, but this function accepts formatted error
769 message. */
770 static void
771 first_error_fmt (const char *format, ...)
772 {
773 va_list args;
774 enum
775 { size = 100 };
776 /* N.B. this single buffer will not cause error messages for different
777 instructions to pollute each other; this is because at the end of
778 processing of each assembly line, error message if any will be
779 collected by as_bad. */
780 static char buffer[size];
781
782 if (! error_p ())
783 {
784 int ret ATTRIBUTE_UNUSED;
785 va_start (args, format);
786 ret = vsnprintf (buffer, size, format, args);
787 know (ret <= size - 1 && ret >= 0);
788 va_end (args);
789 set_syntax_error (buffer);
790 }
791 }
792
793 /* Internal helper routine converting a vector_type_el structure *VECTYPE
794 to a corresponding operand qualifier. */
795
796 static inline aarch64_opnd_qualifier_t
797 vectype_to_qualifier (const struct vector_type_el *vectype)
798 {
799 /* Element size in bytes indexed by vector_el_type. */
800 const unsigned char ele_size[5]
801 = {1, 2, 4, 8, 16};
802 const unsigned int ele_base [5] =
803 {
804 AARCH64_OPND_QLF_V_4B,
805 AARCH64_OPND_QLF_V_2H,
806 AARCH64_OPND_QLF_V_2S,
807 AARCH64_OPND_QLF_V_1D,
808 AARCH64_OPND_QLF_V_1Q
809 };
810
811 if (!vectype->defined || vectype->type == NT_invtype)
812 goto vectype_conversion_fail;
813
814 if (vectype->type == NT_zero)
815 return AARCH64_OPND_QLF_P_Z;
816 if (vectype->type == NT_merge)
817 return AARCH64_OPND_QLF_P_M;
818
819 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
820
821 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
822 {
823 /* Special case S_4B. */
824 if (vectype->type == NT_b && vectype->width == 4)
825 return AARCH64_OPND_QLF_S_4B;
826
827 /* Special case S_2H. */
828 if (vectype->type == NT_h && vectype->width == 2)
829 return AARCH64_OPND_QLF_S_2H;
830
831 /* Vector element register. */
832 return AARCH64_OPND_QLF_S_B + vectype->type;
833 }
834 else
835 {
836 /* Vector register. */
837 int reg_size = ele_size[vectype->type] * vectype->width;
838 unsigned offset;
839 unsigned shift;
840 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
841 goto vectype_conversion_fail;
842
843 /* The conversion is by calculating the offset from the base operand
844 qualifier for the vector type. The operand qualifiers are regular
845 enough that the offset can established by shifting the vector width by
846 a vector-type dependent amount. */
847 shift = 0;
848 if (vectype->type == NT_b)
849 shift = 3;
850 else if (vectype->type == NT_h || vectype->type == NT_s)
851 shift = 2;
852 else if (vectype->type >= NT_d)
853 shift = 1;
854 else
855 gas_assert (0);
856
857 offset = ele_base [vectype->type] + (vectype->width >> shift);
858 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
859 && offset <= AARCH64_OPND_QLF_V_1Q);
860 return offset;
861 }
862
863 vectype_conversion_fail:
864 first_error (_("bad vector arrangement type"));
865 return AARCH64_OPND_QLF_NIL;
866 }
867
868 /* Register parsing. */
869
870 /* Generic register parser which is called by other specialized
871 register parsers.
872 CCP points to what should be the beginning of a register name.
873 If it is indeed a valid register name, advance CCP over it and
874 return the reg_entry structure; otherwise return NULL.
875 It does not issue diagnostics. */
876
877 static reg_entry *
878 parse_reg (char **ccp)
879 {
880 char *start = *ccp;
881 char *p;
882 reg_entry *reg;
883
884 #ifdef REGISTER_PREFIX
885 if (*start != REGISTER_PREFIX)
886 return NULL;
887 start++;
888 #endif
889
890 p = start;
891 if (!ISALPHA (*p) || !is_name_beginner (*p))
892 return NULL;
893
894 do
895 p++;
896 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
897
898 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
899
900 if (!reg)
901 return NULL;
902
903 *ccp = p;
904 return reg;
905 }
906
907 /* Return the operand qualifier associated with all uses of REG, or
908 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
909 that qualifiers don't apply to REG or that qualifiers are added
910 using suffixes. */
911
912 static aarch64_opnd_qualifier_t
913 inherent_reg_qualifier (const reg_entry *reg)
914 {
915 switch (reg->type)
916 {
917 case REG_TYPE_R_32:
918 case REG_TYPE_SP_32:
919 case REG_TYPE_ZR_32:
920 return AARCH64_OPND_QLF_W;
921
922 case REG_TYPE_R_64:
923 case REG_TYPE_SP_64:
924 case REG_TYPE_ZR_64:
925 return AARCH64_OPND_QLF_X;
926
927 case REG_TYPE_FP_B:
928 case REG_TYPE_FP_H:
929 case REG_TYPE_FP_S:
930 case REG_TYPE_FP_D:
931 case REG_TYPE_FP_Q:
932 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
933
934 default:
935 return AARCH64_OPND_QLF_NIL;
936 }
937 }
938
939 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
940 return FALSE. */
941 static bool
942 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
943 {
944 return (reg_type_masks[type] & (1 << reg->type)) != 0;
945 }
946
947 /* Try to parse a base or offset register. Allow SVE base and offset
948 registers if REG_TYPE includes SVE registers. Return the register
949 entry on success, setting *QUALIFIER to the register qualifier.
950 Return null otherwise.
951
952 Note that this function does not issue any diagnostics. */
953
954 static const reg_entry *
955 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
956 aarch64_opnd_qualifier_t *qualifier)
957 {
958 char *str = *ccp;
959 const reg_entry *reg = parse_reg (&str);
960
961 if (reg == NULL)
962 return NULL;
963
964 switch (reg->type)
965 {
966 case REG_TYPE_Z:
967 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
968 || str[0] != '.')
969 return NULL;
970 switch (TOLOWER (str[1]))
971 {
972 case 's':
973 *qualifier = AARCH64_OPND_QLF_S_S;
974 break;
975 case 'd':
976 *qualifier = AARCH64_OPND_QLF_S_D;
977 break;
978 default:
979 return NULL;
980 }
981 str += 2;
982 break;
983
984 default:
985 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
986 return NULL;
987 *qualifier = inherent_reg_qualifier (reg);
988 break;
989 }
990
991 *ccp = str;
992
993 return reg;
994 }
995
996 /* Try to parse a base or offset register. Return the register entry
997 on success, setting *QUALIFIER to the register qualifier. Return null
998 otherwise.
999
1000 Note that this function does not issue any diagnostics. */
1001
1002 static const reg_entry *
1003 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1004 {
1005 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1006 }
1007
1008 /* Parse the qualifier of a vector register or vector element of type
1009 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1010 succeeds; otherwise return FALSE.
1011
1012 Accept only one occurrence of:
1013 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1014 b h s d q */
1015 static bool
1016 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1017 struct vector_type_el *parsed_type, char **str)
1018 {
1019 char *ptr = *str;
1020 unsigned width;
1021 unsigned element_size;
1022 enum vector_el_type type;
1023
1024 /* skip '.' */
1025 gas_assert (*ptr == '.');
1026 ptr++;
1027
1028 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1029 {
1030 width = 0;
1031 goto elt_size;
1032 }
1033 width = strtoul (ptr, &ptr, 10);
1034 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1035 {
1036 first_error_fmt (_("bad size %d in vector width specifier"), width);
1037 return false;
1038 }
1039
1040 elt_size:
1041 switch (TOLOWER (*ptr))
1042 {
1043 case 'b':
1044 type = NT_b;
1045 element_size = 8;
1046 break;
1047 case 'h':
1048 type = NT_h;
1049 element_size = 16;
1050 break;
1051 case 's':
1052 type = NT_s;
1053 element_size = 32;
1054 break;
1055 case 'd':
1056 type = NT_d;
1057 element_size = 64;
1058 break;
1059 case 'q':
1060 if (reg_type != REG_TYPE_V || width == 1)
1061 {
1062 type = NT_q;
1063 element_size = 128;
1064 break;
1065 }
1066 /* fall through. */
1067 default:
1068 if (*ptr != '\0')
1069 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1070 else
1071 first_error (_("missing element size"));
1072 return false;
1073 }
1074 if (width != 0 && width * element_size != 64
1075 && width * element_size != 128
1076 && !(width == 2 && element_size == 16)
1077 && !(width == 4 && element_size == 8))
1078 {
1079 first_error_fmt (_
1080 ("invalid element size %d and vector size combination %c"),
1081 width, *ptr);
1082 return false;
1083 }
1084 ptr++;
1085
1086 parsed_type->type = type;
1087 parsed_type->width = width;
1088 parsed_type->element_size = element_size;
1089
1090 *str = ptr;
1091
1092 return true;
1093 }
1094
1095 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1096 *PARSED_TYPE and point *STR at the end of the suffix. */
1097
1098 static bool
1099 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1100 {
1101 char *ptr = *str;
1102
1103 /* Skip '/'. */
1104 gas_assert (*ptr == '/');
1105 ptr++;
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'z':
1109 parsed_type->type = NT_zero;
1110 break;
1111 case 'm':
1112 parsed_type->type = NT_merge;
1113 break;
1114 default:
1115 if (*ptr != '\0' && *ptr != ',')
1116 first_error_fmt (_("unexpected character `%c' in predication type"),
1117 *ptr);
1118 else
1119 first_error (_("missing predication type"));
1120 return false;
1121 }
1122 parsed_type->width = 0;
1123 *str = ptr + 1;
1124 return true;
1125 }
1126
1127 /* Return true if CH is a valid suffix character for registers of
1128 type TYPE. */
1129
1130 static bool
1131 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1132 {
1133 switch (type)
1134 {
1135 case REG_TYPE_V:
1136 case REG_TYPE_Z:
1137 case REG_TYPE_ZA:
1138 case REG_TYPE_ZAT:
1139 case REG_TYPE_ZATH:
1140 case REG_TYPE_ZATV:
1141 return ch == '.';
1142
1143 case REG_TYPE_P:
1144 case REG_TYPE_PN:
1145 return ch == '.' || ch == '/';
1146
1147 default:
1148 return false;
1149 }
1150 }
1151
1152 /* Parse an index expression at *STR, storing it in *IMM on success. */
1153
1154 static bool
1155 parse_index_expression (char **str, int64_t *imm)
1156 {
1157 expressionS exp;
1158
1159 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1160 if (exp.X_op != O_constant)
1161 {
1162 first_error (_("constant expression required"));
1163 return false;
1164 }
1165 *imm = exp.X_add_number;
1166 return true;
1167 }
1168
1169 /* Parse a register of the type TYPE.
1170
1171 Return null if the string pointed to by *CCP is not a valid register
1172 name or the parsed register is not of TYPE.
1173
1174 Otherwise return the register, and optionally return the register
1175 shape and element index information in *TYPEINFO.
1176
1177 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1178
1179 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1180 register index.
1181
1182 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1183 an operand that we can be confident that it is a good match. */
1184
1185 #define PTR_IN_REGLIST (1U << 0)
1186 #define PTR_FULL_REG (1U << 1)
1187 #define PTR_GOOD_MATCH (1U << 2)
1188
1189 static const reg_entry *
1190 parse_typed_reg (char **ccp, aarch64_reg_type type,
1191 struct vector_type_el *typeinfo, unsigned int flags)
1192 {
1193 char *str = *ccp;
1194 bool is_alpha = ISALPHA (*str);
1195 const reg_entry *reg = parse_reg (&str);
1196 struct vector_type_el atype;
1197 struct vector_type_el parsetype;
1198 bool is_typed_vecreg = false;
1199 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1200
1201 atype.defined = 0;
1202 atype.type = NT_invtype;
1203 atype.width = -1;
1204 atype.element_size = 0;
1205 atype.index = 0;
1206
1207 if (reg == NULL)
1208 {
1209 if (typeinfo)
1210 *typeinfo = atype;
1211 if (!is_alpha && (flags & PTR_IN_REGLIST))
1212 set_fatal_syntax_error (_("syntax error in register list"));
1213 else if (flags & PTR_GOOD_MATCH)
1214 set_fatal_syntax_error (NULL);
1215 else
1216 set_expected_reg_error (type, reg, err_flags);
1217 return NULL;
1218 }
1219
1220 if (! aarch64_check_reg_type (reg, type))
1221 {
1222 DEBUG_TRACE ("reg type check failed");
1223 if (flags & PTR_GOOD_MATCH)
1224 set_fatal_syntax_error (NULL);
1225 else
1226 set_expected_reg_error (type, reg, err_flags);
1227 return NULL;
1228 }
1229 type = reg->type;
1230
1231 if (aarch64_valid_suffix_char_p (reg->type, *str))
1232 {
1233 if (*str == '.')
1234 {
1235 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1236 return NULL;
1237 if ((reg->type == REG_TYPE_ZAT
1238 || reg->type == REG_TYPE_ZATH
1239 || reg->type == REG_TYPE_ZATV)
1240 && reg->number * 8U >= parsetype.element_size)
1241 {
1242 set_syntax_error (_("ZA tile number out of range"));
1243 return NULL;
1244 }
1245 }
1246 else
1247 {
1248 if (!parse_predication_for_operand (&parsetype, &str))
1249 return NULL;
1250 }
1251
1252 /* Register if of the form Vn.[bhsdq]. */
1253 is_typed_vecreg = true;
1254
1255 if (type != REG_TYPE_V)
1256 {
1257 /* The width is always variable; we don't allow an integer width
1258 to be specified. */
1259 gas_assert (parsetype.width == 0);
1260 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1261 }
1262 else if (parsetype.width == 0)
1263 /* Expect index. In the new scheme we cannot have
1264 Vn.[bhsdq] represent a scalar. Therefore any
1265 Vn.[bhsdq] should have an index following it.
1266 Except in reglists of course. */
1267 atype.defined |= NTA_HASINDEX;
1268 else
1269 atype.defined |= NTA_HASTYPE;
1270
1271 atype.type = parsetype.type;
1272 atype.width = parsetype.width;
1273 }
1274
1275 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1276 {
1277 /* Reject Sn[index] syntax. */
1278 if (reg->type != REG_TYPE_Z
1279 && reg->type != REG_TYPE_PN
1280 && reg->type != REG_TYPE_ZT0
1281 && !is_typed_vecreg)
1282 {
1283 first_error (_("this type of register can't be indexed"));
1284 return NULL;
1285 }
1286
1287 if (flags & PTR_IN_REGLIST)
1288 {
1289 first_error (_("index not allowed inside register list"));
1290 return NULL;
1291 }
1292
1293 atype.defined |= NTA_HASINDEX;
1294
1295 if (!parse_index_expression (&str, &atype.index))
1296 return NULL;
1297
1298 if (! skip_past_char (&str, ']'))
1299 return NULL;
1300 }
1301 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1302 {
1303 /* Indexed vector register expected. */
1304 first_error (_("indexed vector register expected"));
1305 return NULL;
1306 }
1307
1308 /* A vector reg Vn should be typed or indexed. */
1309 if (type == REG_TYPE_V && atype.defined == 0)
1310 {
1311 first_error (_("invalid use of vector register"));
1312 }
1313
1314 if (typeinfo)
1315 *typeinfo = atype;
1316
1317 *ccp = str;
1318
1319 return reg;
1320 }
1321
1322 /* Parse register.
1323
1324 Return the register on success; return null otherwise.
1325
1326 If this is a NEON vector register with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328
1329 This parser does not handle register lists. */
1330
1331 static const reg_entry *
1332 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1333 struct vector_type_el *vectype)
1334 {
1335 return parse_typed_reg (ccp, type, vectype, 0);
1336 }
1337
1338 static inline bool
1339 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1340 {
1341 return (e1.type == e2.type
1342 && e1.defined == e2.defined
1343 && e1.width == e2.width
1344 && e1.element_size == e2.element_size
1345 && e1.index == e2.index);
1346 }
1347
1348 /* Return the register number mask for registers of type REG_TYPE. */
1349
1350 static inline int
1351 reg_type_mask (aarch64_reg_type reg_type)
1352 {
1353 return reg_type == REG_TYPE_P ? 15 : 31;
1354 }
1355
1356 /* This function parses a list of vector registers of type TYPE.
1357 On success, it returns the parsed register list information in the
1358 following encoded format:
1359
1360 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1361 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1362
1363 The information of the register shape and/or index is returned in
1364 *VECTYPE.
1365
1366 It returns PARSE_FAIL if the register list is invalid.
1367
1368 The list contains one to four registers.
1369 Each register can be one of:
1370 <Vt>.<T>[<index>]
1371 <Vt>.<T>
1372 All <T> should be identical.
1373 All <index> should be identical.
1374 There are restrictions on <Vt> numbers which are checked later
1375 (by reg_list_valid_p). */
1376
1377 static int
1378 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1379 struct vector_type_el *vectype)
1380 {
1381 char *str = *ccp;
1382 int nb_regs;
1383 struct vector_type_el typeinfo, typeinfo_first;
1384 uint32_t val, val_range, mask;
1385 int in_range;
1386 int ret_val;
1387 bool error = false;
1388 bool expect_index = false;
1389 unsigned int ptr_flags = PTR_IN_REGLIST;
1390
1391 if (*str != '{')
1392 {
1393 set_expected_reglist_error (type, parse_reg (&str));
1394 return PARSE_FAIL;
1395 }
1396 str++;
1397
1398 nb_regs = 0;
1399 typeinfo_first.defined = 0;
1400 typeinfo_first.type = NT_invtype;
1401 typeinfo_first.width = -1;
1402 typeinfo_first.element_size = 0;
1403 typeinfo_first.index = 0;
1404 ret_val = 0;
1405 val = -1u;
1406 val_range = -1u;
1407 in_range = 0;
1408 mask = reg_type_mask (type);
1409 do
1410 {
1411 if (in_range)
1412 {
1413 str++; /* skip over '-' */
1414 val_range = val;
1415 }
1416 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1417 ptr_flags);
1418 if (!reg)
1419 {
1420 set_first_syntax_error (_("invalid vector register in list"));
1421 error = true;
1422 continue;
1423 }
1424 val = reg->number;
1425 /* reject [bhsd]n */
1426 if (type == REG_TYPE_V && typeinfo.defined == 0)
1427 {
1428 set_first_syntax_error (_("invalid scalar register in list"));
1429 error = true;
1430 continue;
1431 }
1432
1433 if (typeinfo.defined & NTA_HASINDEX)
1434 expect_index = true;
1435
1436 if (in_range)
1437 {
1438 if (val == val_range)
1439 {
1440 set_first_syntax_error
1441 (_("invalid range in vector register list"));
1442 error = true;
1443 }
1444 val_range = (val_range + 1) & mask;
1445 }
1446 else
1447 {
1448 val_range = val;
1449 if (nb_regs == 0)
1450 typeinfo_first = typeinfo;
1451 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1452 {
1453 set_first_syntax_error
1454 (_("type mismatch in vector register list"));
1455 error = true;
1456 }
1457 }
1458 if (! error)
1459 for (;;)
1460 {
1461 ret_val |= val_range << ((5 * nb_regs) & 31);
1462 nb_regs++;
1463 if (val_range == val)
1464 break;
1465 val_range = (val_range + 1) & mask;
1466 }
1467 in_range = 0;
1468 ptr_flags |= PTR_GOOD_MATCH;
1469 }
1470 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1471
1472 skip_whitespace (str);
1473 if (*str != '}')
1474 {
1475 set_first_syntax_error (_("end of vector register list not found"));
1476 error = true;
1477 }
1478 str++;
1479
1480 skip_whitespace (str);
1481
1482 if (expect_index)
1483 {
1484 if (skip_past_char (&str, '['))
1485 {
1486 if (!parse_index_expression (&str, &typeinfo_first.index))
1487 error = true;
1488 if (! skip_past_char (&str, ']'))
1489 error = true;
1490 }
1491 else
1492 {
1493 set_first_syntax_error (_("expected index"));
1494 error = true;
1495 }
1496 }
1497
1498 if (nb_regs > 4)
1499 {
1500 set_first_syntax_error (_("too many registers in vector register list"));
1501 error = true;
1502 }
1503 else if (nb_regs == 0)
1504 {
1505 set_first_syntax_error (_("empty vector register list"));
1506 error = true;
1507 }
1508
1509 *ccp = str;
1510 if (! error)
1511 *vectype = typeinfo_first;
1512
1513 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1514 }
1515
1516 /* Directives: register aliases. */
1517
1518 static reg_entry *
1519 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1520 {
1521 reg_entry *new;
1522 const char *name;
1523
1524 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1525 {
1526 if (new->builtin)
1527 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1528 str);
1529
1530 /* Only warn about a redefinition if it's not defined as the
1531 same register. */
1532 else if (new->number != number || new->type != type)
1533 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1534
1535 return NULL;
1536 }
1537
1538 name = xstrdup (str);
1539 new = XNEW (reg_entry);
1540
1541 new->name = name;
1542 new->number = number;
1543 new->type = type;
1544 new->builtin = false;
1545
1546 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1547
1548 return new;
1549 }
1550
1551 /* Look for the .req directive. This is of the form:
1552
1553 new_register_name .req existing_register_name
1554
1555 If we find one, or if it looks sufficiently like one that we want to
1556 handle any error here, return TRUE. Otherwise return FALSE. */
1557
1558 static bool
1559 create_register_alias (char *newname, char *p)
1560 {
1561 const reg_entry *old;
1562 char *oldname, *nbuf;
1563 size_t nlen;
1564
1565 /* The input scrubber ensures that whitespace after the mnemonic is
1566 collapsed to single spaces. */
1567 oldname = p;
1568 if (!startswith (oldname, " .req "))
1569 return false;
1570
1571 oldname += 6;
1572 if (*oldname == '\0')
1573 return false;
1574
1575 old = str_hash_find (aarch64_reg_hsh, oldname);
1576 if (!old)
1577 {
1578 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1579 return true;
1580 }
1581
1582 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1583 the desired alias name, and p points to its end. If not, then
1584 the desired alias name is in the global original_case_string. */
1585 #ifdef TC_CASE_SENSITIVE
1586 nlen = p - newname;
1587 #else
1588 newname = original_case_string;
1589 nlen = strlen (newname);
1590 #endif
1591
1592 nbuf = xmemdup0 (newname, nlen);
1593
1594 /* Create aliases under the new name as stated; an all-lowercase
1595 version of the new name; and an all-uppercase version of the new
1596 name. */
1597 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1598 {
1599 for (p = nbuf; *p; p++)
1600 *p = TOUPPER (*p);
1601
1602 if (strncmp (nbuf, newname, nlen))
1603 {
1604 /* If this attempt to create an additional alias fails, do not bother
1605 trying to create the all-lower case alias. We will fail and issue
1606 a second, duplicate error message. This situation arises when the
1607 programmer does something like:
1608 foo .req r0
1609 Foo .req r1
1610 The second .req creates the "Foo" alias but then fails to create
1611 the artificial FOO alias because it has already been created by the
1612 first .req. */
1613 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1614 {
1615 free (nbuf);
1616 return true;
1617 }
1618 }
1619
1620 for (p = nbuf; *p; p++)
1621 *p = TOLOWER (*p);
1622
1623 if (strncmp (nbuf, newname, nlen))
1624 insert_reg_alias (nbuf, old->number, old->type);
1625 }
1626
1627 free (nbuf);
1628 return true;
1629 }
1630
1631 /* Should never be called, as .req goes between the alias and the
1632 register name, not at the beginning of the line. */
1633 static void
1634 s_req (int a ATTRIBUTE_UNUSED)
1635 {
1636 as_bad (_("invalid syntax for .req directive"));
1637 }
1638
1639 /* The .unreq directive deletes an alias which was previously defined
1640 by .req. For example:
1641
1642 my_alias .req r11
1643 .unreq my_alias */
1644
1645 static void
1646 s_unreq (int a ATTRIBUTE_UNUSED)
1647 {
1648 char *name;
1649 char saved_char;
1650
1651 name = input_line_pointer;
1652 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1653 saved_char = *input_line_pointer;
1654 *input_line_pointer = 0;
1655
1656 if (!*name)
1657 as_bad (_("invalid syntax for .unreq directive"));
1658 else
1659 {
1660 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1661
1662 if (!reg)
1663 as_bad (_("unknown register alias '%s'"), name);
1664 else if (reg->builtin)
1665 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1666 name);
1667 else
1668 {
1669 char *p;
1670 char *nbuf;
1671
1672 str_hash_delete (aarch64_reg_hsh, name);
1673 free ((char *) reg->name);
1674 free (reg);
1675
1676 /* Also locate the all upper case and all lower case versions.
1677 Do not complain if we cannot find one or the other as it
1678 was probably deleted above. */
1679
1680 nbuf = strdup (name);
1681 for (p = nbuf; *p; p++)
1682 *p = TOUPPER (*p);
1683 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1684 if (reg)
1685 {
1686 str_hash_delete (aarch64_reg_hsh, nbuf);
1687 free ((char *) reg->name);
1688 free (reg);
1689 }
1690
1691 for (p = nbuf; *p; p++)
1692 *p = TOLOWER (*p);
1693 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1694 if (reg)
1695 {
1696 str_hash_delete (aarch64_reg_hsh, nbuf);
1697 free ((char *) reg->name);
1698 free (reg);
1699 }
1700
1701 free (nbuf);
1702 }
1703 }
1704
1705 *input_line_pointer = saved_char;
1706 demand_empty_rest_of_line ();
1707 }
1708
1709 /* Directives: Instruction set selection. */
1710
1711 #if defined OBJ_ELF || defined OBJ_COFF
1712 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1713 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1714 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1715 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1716
1717 /* Create a new mapping symbol for the transition to STATE. */
1718
1719 static void
1720 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1721 {
1722 symbolS *symbolP;
1723 const char *symname;
1724 int type;
1725
1726 switch (state)
1727 {
1728 case MAP_DATA:
1729 symname = "$d";
1730 type = BSF_NO_FLAGS;
1731 break;
1732 case MAP_INSN:
1733 symname = "$x";
1734 type = BSF_NO_FLAGS;
1735 break;
1736 default:
1737 abort ();
1738 }
1739
1740 symbolP = symbol_new (symname, now_seg, frag, value);
1741 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1742
1743 /* Save the mapping symbols for future reference. Also check that
1744 we do not place two mapping symbols at the same offset within a
1745 frag. We'll handle overlap between frags in
1746 check_mapping_symbols.
1747
1748 If .fill or other data filling directive generates zero sized data,
1749 the mapping symbol for the following code will have the same value
1750 as the one generated for the data filling directive. In this case,
1751 we replace the old symbol with the new one at the same address. */
1752 if (value == 0)
1753 {
1754 if (frag->tc_frag_data.first_map != NULL)
1755 {
1756 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1757 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1758 &symbol_lastP);
1759 }
1760 frag->tc_frag_data.first_map = symbolP;
1761 }
1762 if (frag->tc_frag_data.last_map != NULL)
1763 {
1764 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1765 S_GET_VALUE (symbolP));
1766 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1767 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1768 &symbol_lastP);
1769 }
1770 frag->tc_frag_data.last_map = symbolP;
1771 }
1772
1773 /* We must sometimes convert a region marked as code to data during
1774 code alignment, if an odd number of bytes have to be padded. The
1775 code mapping symbol is pushed to an aligned address. */
1776
1777 static void
1778 insert_data_mapping_symbol (enum mstate state,
1779 valueT value, fragS * frag, offsetT bytes)
1780 {
1781 /* If there was already a mapping symbol, remove it. */
1782 if (frag->tc_frag_data.last_map != NULL
1783 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1784 frag->fr_address + value)
1785 {
1786 symbolS *symp = frag->tc_frag_data.last_map;
1787
1788 if (value == 0)
1789 {
1790 know (frag->tc_frag_data.first_map == symp);
1791 frag->tc_frag_data.first_map = NULL;
1792 }
1793 frag->tc_frag_data.last_map = NULL;
1794 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1795 }
1796
1797 make_mapping_symbol (MAP_DATA, value, frag);
1798 make_mapping_symbol (state, value + bytes, frag);
1799 }
1800
1801 static void mapping_state_2 (enum mstate state, int max_chars);
1802
1803 /* Set the mapping state to STATE. Only call this when about to
1804 emit some STATE bytes to the file. */
1805
1806 void
1807 mapping_state (enum mstate state)
1808 {
1809 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1810
1811 if (state == MAP_INSN)
1812 /* AArch64 instructions require 4-byte alignment. When emitting
1813 instructions into any section, record the appropriate section
1814 alignment. */
1815 record_alignment (now_seg, 2);
1816
1817 if (mapstate == state)
1818 /* The mapping symbol has already been emitted.
1819 There is nothing else to do. */
1820 return;
1821
1822 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1823 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1824 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1825 evaluated later in the next else. */
1826 return;
1827 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1828 {
1829 /* Only add the symbol if the offset is > 0:
1830 if we're at the first frag, check it's size > 0;
1831 if we're not at the first frag, then for sure
1832 the offset is > 0. */
1833 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1834 const int add_symbol = (frag_now != frag_first)
1835 || (frag_now_fix () > 0);
1836
1837 if (add_symbol)
1838 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1839 }
1840 #undef TRANSITION
1841
1842 mapping_state_2 (state, 0);
1843 }
1844
1845 /* Same as mapping_state, but MAX_CHARS bytes have already been
1846 allocated. Put the mapping symbol that far back. */
1847
1848 static void
1849 mapping_state_2 (enum mstate state, int max_chars)
1850 {
1851 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1852
1853 if (!SEG_NORMAL (now_seg))
1854 return;
1855
1856 if (mapstate == state)
1857 /* The mapping symbol has already been emitted.
1858 There is nothing else to do. */
1859 return;
1860
1861 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1862 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1863 }
1864 #else
1865 #define mapping_state(x) /* nothing */
1866 #define mapping_state_2(x, y) /* nothing */
1867 #endif
1868
1869 /* Directives: alignment. */
1870
1871 static void
1872 s_even (int ignore ATTRIBUTE_UNUSED)
1873 {
1874 /* Never make frag if expect extra pass. */
1875 if (!need_pass_2)
1876 frag_align (1, 0, 0);
1877
1878 record_alignment (now_seg, 1);
1879
1880 demand_empty_rest_of_line ();
1881 }
1882
1883 /* Directives: Literal pools. */
1884
1885 static literal_pool *
1886 find_literal_pool (int size)
1887 {
1888 literal_pool *pool;
1889
1890 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1891 {
1892 if (pool->section == now_seg
1893 && pool->sub_section == now_subseg && pool->size == size)
1894 break;
1895 }
1896
1897 return pool;
1898 }
1899
1900 static literal_pool *
1901 find_or_make_literal_pool (int size)
1902 {
1903 /* Next literal pool ID number. */
1904 static unsigned int latest_pool_num = 1;
1905 literal_pool *pool;
1906
1907 pool = find_literal_pool (size);
1908
1909 if (pool == NULL)
1910 {
1911 /* Create a new pool. */
1912 pool = XNEW (literal_pool);
1913 if (!pool)
1914 return NULL;
1915
1916 /* Currently we always put the literal pool in the current text
1917 section. If we were generating "small" model code where we
1918 knew that all code and initialised data was within 1MB then
1919 we could output literals to mergeable, read-only data
1920 sections. */
1921
1922 pool->next_free_entry = 0;
1923 pool->section = now_seg;
1924 pool->sub_section = now_subseg;
1925 pool->size = size;
1926 pool->next = list_of_pools;
1927 pool->symbol = NULL;
1928
1929 /* Add it to the list. */
1930 list_of_pools = pool;
1931 }
1932
1933 /* New pools, and emptied pools, will have a NULL symbol. */
1934 if (pool->symbol == NULL)
1935 {
1936 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1937 &zero_address_frag, 0);
1938 pool->id = latest_pool_num++;
1939 }
1940
1941 /* Done. */
1942 return pool;
1943 }
1944
1945 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1946 Return TRUE on success, otherwise return FALSE. */
1947 static bool
1948 add_to_lit_pool (expressionS *exp, int size)
1949 {
1950 literal_pool *pool;
1951 unsigned int entry;
1952
1953 pool = find_or_make_literal_pool (size);
1954
1955 /* Check if this literal value is already in the pool. */
1956 for (entry = 0; entry < pool->next_free_entry; entry++)
1957 {
1958 expressionS * litexp = & pool->literals[entry].exp;
1959
1960 if ((litexp->X_op == exp->X_op)
1961 && (exp->X_op == O_constant)
1962 && (litexp->X_add_number == exp->X_add_number)
1963 && (litexp->X_unsigned == exp->X_unsigned))
1964 break;
1965
1966 if ((litexp->X_op == exp->X_op)
1967 && (exp->X_op == O_symbol)
1968 && (litexp->X_add_number == exp->X_add_number)
1969 && (litexp->X_add_symbol == exp->X_add_symbol)
1970 && (litexp->X_op_symbol == exp->X_op_symbol))
1971 break;
1972 }
1973
1974 /* Do we need to create a new entry? */
1975 if (entry == pool->next_free_entry)
1976 {
1977 if (entry >= MAX_LITERAL_POOL_SIZE)
1978 {
1979 set_syntax_error (_("literal pool overflow"));
1980 return false;
1981 }
1982
1983 pool->literals[entry].exp = *exp;
1984 pool->next_free_entry += 1;
1985 if (exp->X_op == O_big)
1986 {
1987 /* PR 16688: Bignums are held in a single global array. We must
1988 copy and preserve that value now, before it is overwritten. */
1989 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1990 exp->X_add_number);
1991 memcpy (pool->literals[entry].bignum, generic_bignum,
1992 CHARS_PER_LITTLENUM * exp->X_add_number);
1993 }
1994 else
1995 pool->literals[entry].bignum = NULL;
1996 }
1997
1998 exp->X_op = O_symbol;
1999 exp->X_add_number = ((int) entry) * size;
2000 exp->X_add_symbol = pool->symbol;
2001
2002 return true;
2003 }
2004
2005 /* Can't use symbol_new here, so have to create a symbol and then at
2006 a later date assign it a value. That's what these functions do. */
2007
2008 static void
2009 symbol_locate (symbolS * symbolP,
2010 const char *name,/* It is copied, the caller can modify. */
2011 segT segment, /* Segment identifier (SEG_<something>). */
2012 valueT valu, /* Symbol value. */
2013 fragS * frag) /* Associated fragment. */
2014 {
2015 size_t name_length;
2016 char *preserved_copy_of_name;
2017
2018 name_length = strlen (name) + 1; /* +1 for \0. */
2019 obstack_grow (&notes, name, name_length);
2020 preserved_copy_of_name = obstack_finish (&notes);
2021
2022 #ifdef tc_canonicalize_symbol_name
2023 preserved_copy_of_name =
2024 tc_canonicalize_symbol_name (preserved_copy_of_name);
2025 #endif
2026
2027 S_SET_NAME (symbolP, preserved_copy_of_name);
2028
2029 S_SET_SEGMENT (symbolP, segment);
2030 S_SET_VALUE (symbolP, valu);
2031 symbol_clear_list_pointers (symbolP);
2032
2033 symbol_set_frag (symbolP, frag);
2034
2035 /* Link to end of symbol chain. */
2036 {
2037 extern int symbol_table_frozen;
2038
2039 if (symbol_table_frozen)
2040 abort ();
2041 }
2042
2043 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2044
2045 obj_symbol_new_hook (symbolP);
2046
2047 #ifdef tc_symbol_new_hook
2048 tc_symbol_new_hook (symbolP);
2049 #endif
2050
2051 #ifdef DEBUG_SYMS
2052 verify_symbol_chain (symbol_rootP, symbol_lastP);
2053 #endif /* DEBUG_SYMS */
2054 }
2055
2056
2057 static void
2058 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 unsigned int entry;
2061 literal_pool *pool;
2062 char sym_name[20];
2063 int align;
2064
2065 for (align = 2; align <= 4; align++)
2066 {
2067 int size = 1 << align;
2068
2069 pool = find_literal_pool (size);
2070 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2071 continue;
2072
2073 /* Align pool as you have word accesses.
2074 Only make a frag if we have to. */
2075 if (!need_pass_2)
2076 frag_align (align, 0, 0);
2077
2078 mapping_state (MAP_DATA);
2079
2080 record_alignment (now_seg, align);
2081
2082 sprintf (sym_name, "$$lit_\002%x", pool->id);
2083
2084 symbol_locate (pool->symbol, sym_name, now_seg,
2085 (valueT) frag_now_fix (), frag_now);
2086 symbol_table_insert (pool->symbol);
2087
2088 for (entry = 0; entry < pool->next_free_entry; entry++)
2089 {
2090 expressionS * exp = & pool->literals[entry].exp;
2091
2092 if (exp->X_op == O_big)
2093 {
2094 /* PR 16688: Restore the global bignum value. */
2095 gas_assert (pool->literals[entry].bignum != NULL);
2096 memcpy (generic_bignum, pool->literals[entry].bignum,
2097 CHARS_PER_LITTLENUM * exp->X_add_number);
2098 }
2099
2100 /* First output the expression in the instruction to the pool. */
2101 emit_expr (exp, size); /* .word|.xword */
2102
2103 if (exp->X_op == O_big)
2104 {
2105 free (pool->literals[entry].bignum);
2106 pool->literals[entry].bignum = NULL;
2107 }
2108 }
2109
2110 /* Mark the pool as empty. */
2111 pool->next_free_entry = 0;
2112 pool->symbol = NULL;
2113 }
2114 }
2115
2116 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2117 /* Forward declarations for functions below, in the MD interface
2118 section. */
2119 static struct reloc_table_entry * find_reloc_table_entry (char **);
2120
2121 /* Directives: Data. */
2122 /* N.B. the support for relocation suffix in this directive needs to be
2123 implemented properly. */
2124
2125 static void
2126 s_aarch64_cons (int nbytes)
2127 {
2128 expressionS exp;
2129
2130 #ifdef md_flush_pending_output
2131 md_flush_pending_output ();
2132 #endif
2133
2134 if (is_it_end_of_statement ())
2135 {
2136 demand_empty_rest_of_line ();
2137 return;
2138 }
2139
2140 #ifdef md_cons_align
2141 md_cons_align (nbytes);
2142 #endif
2143
2144 mapping_state (MAP_DATA);
2145 do
2146 {
2147 struct reloc_table_entry *reloc;
2148
2149 expression (&exp);
2150
2151 if (exp.X_op != O_symbol)
2152 emit_expr (&exp, (unsigned int) nbytes);
2153 else
2154 {
2155 skip_past_char (&input_line_pointer, '#');
2156 if (skip_past_char (&input_line_pointer, ':'))
2157 {
2158 reloc = find_reloc_table_entry (&input_line_pointer);
2159 if (reloc == NULL)
2160 as_bad (_("unrecognized relocation suffix"));
2161 else
2162 as_bad (_("unimplemented relocation suffix"));
2163 ignore_rest_of_line ();
2164 return;
2165 }
2166 else
2167 emit_expr (&exp, (unsigned int) nbytes);
2168 }
2169 }
2170 while (*input_line_pointer++ == ',');
2171
2172 /* Put terminator back into stream. */
2173 input_line_pointer--;
2174 demand_empty_rest_of_line ();
2175 }
2176 #endif
2177
2178 #ifdef OBJ_ELF
2179 /* Forward declarations for functions below, in the MD interface
2180 section. */
2181 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2182
2183 /* Mark symbol that it follows a variant PCS convention. */
2184
2185 static void
2186 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2187 {
2188 char *name;
2189 char c;
2190 symbolS *sym;
2191 asymbol *bfdsym;
2192 elf_symbol_type *elfsym;
2193
2194 c = get_symbol_name (&name);
2195 if (!*name)
2196 as_bad (_("Missing symbol name in directive"));
2197 sym = symbol_find_or_make (name);
2198 restore_line_pointer (c);
2199 demand_empty_rest_of_line ();
2200 bfdsym = symbol_get_bfdsym (sym);
2201 elfsym = elf_symbol_from (bfdsym);
2202 gas_assert (elfsym);
2203 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2204 }
2205 #endif /* OBJ_ELF */
2206
2207 /* Output a 32-bit word, but mark as an instruction. */
2208
2209 static void
2210 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2211 {
2212 expressionS exp;
2213 unsigned n = 0;
2214
2215 #ifdef md_flush_pending_output
2216 md_flush_pending_output ();
2217 #endif
2218
2219 if (is_it_end_of_statement ())
2220 {
2221 demand_empty_rest_of_line ();
2222 return;
2223 }
2224
2225 /* Sections are assumed to start aligned. In executable section, there is no
2226 MAP_DATA symbol pending. So we only align the address during
2227 MAP_DATA --> MAP_INSN transition.
2228 For other sections, this is not guaranteed. */
2229 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2230 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2231 frag_align_code (2, 0);
2232
2233 #ifdef OBJ_ELF
2234 mapping_state (MAP_INSN);
2235 #endif
2236
2237 do
2238 {
2239 expression (&exp);
2240 if (exp.X_op != O_constant)
2241 {
2242 as_bad (_("constant expression required"));
2243 ignore_rest_of_line ();
2244 return;
2245 }
2246
2247 if (target_big_endian)
2248 {
2249 unsigned int val = exp.X_add_number;
2250 exp.X_add_number = SWAP_32 (val);
2251 }
2252 emit_expr (&exp, INSN_SIZE);
2253 ++n;
2254 }
2255 while (*input_line_pointer++ == ',');
2256
2257 dwarf2_emit_insn (n * INSN_SIZE);
2258
2259 /* Put terminator back into stream. */
2260 input_line_pointer--;
2261 demand_empty_rest_of_line ();
2262 }
2263
2264 static void
2265 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2266 {
2267 demand_empty_rest_of_line ();
2268 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2269 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2270 }
2271
2272 #ifdef OBJ_ELF
2273 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2274
2275 static void
2276 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2277 {
2278 expressionS exp;
2279
2280 expression (&exp);
2281 frag_grow (4);
2282 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2283 BFD_RELOC_AARCH64_TLSDESC_ADD);
2284
2285 demand_empty_rest_of_line ();
2286 }
2287
2288 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2289
2290 static void
2291 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2292 {
2293 expressionS exp;
2294
2295 /* Since we're just labelling the code, there's no need to define a
2296 mapping symbol. */
2297 expression (&exp);
2298 /* Make sure there is enough room in this frag for the following
2299 blr. This trick only works if the blr follows immediately after
2300 the .tlsdesc directive. */
2301 frag_grow (4);
2302 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2303 BFD_RELOC_AARCH64_TLSDESC_CALL);
2304
2305 demand_empty_rest_of_line ();
2306 }
2307
2308 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2309
2310 static void
2311 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2312 {
2313 expressionS exp;
2314
2315 expression (&exp);
2316 frag_grow (4);
2317 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2318 BFD_RELOC_AARCH64_TLSDESC_LDR);
2319
2320 demand_empty_rest_of_line ();
2321 }
2322 #endif /* OBJ_ELF */
2323
2324 #ifdef TE_PE
2325 static void
2326 s_secrel (int dummy ATTRIBUTE_UNUSED)
2327 {
2328 expressionS exp;
2329
2330 do
2331 {
2332 expression (&exp);
2333 if (exp.X_op == O_symbol)
2334 exp.X_op = O_secrel;
2335
2336 emit_expr (&exp, 4);
2337 }
2338 while (*input_line_pointer++ == ',');
2339
2340 input_line_pointer--;
2341 demand_empty_rest_of_line ();
2342 }
2343
2344 void
2345 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2346 {
2347 expressionS exp;
2348
2349 exp.X_op = O_secrel;
2350 exp.X_add_symbol = symbol;
2351 exp.X_add_number = 0;
2352 emit_expr (&exp, size);
2353 }
2354
2355 static void
2356 s_secidx (int dummy ATTRIBUTE_UNUSED)
2357 {
2358 expressionS exp;
2359
2360 do
2361 {
2362 expression (&exp);
2363 if (exp.X_op == O_symbol)
2364 exp.X_op = O_secidx;
2365
2366 emit_expr (&exp, 2);
2367 }
2368 while (*input_line_pointer++ == ',');
2369
2370 input_line_pointer--;
2371 demand_empty_rest_of_line ();
2372 }
2373 #endif /* TE_PE */
2374
2375 static void s_aarch64_arch (int);
2376 static void s_aarch64_cpu (int);
2377 static void s_aarch64_arch_extension (int);
2378
2379 /* This table describes all the machine specific pseudo-ops the assembler
2380 has to support. The fields are:
2381 pseudo-op name without dot
2382 function to call to execute this pseudo-op
2383 Integer arg to pass to the function. */
2384
2385 const pseudo_typeS md_pseudo_table[] = {
2386 /* Never called because '.req' does not start a line. */
2387 {"req", s_req, 0},
2388 {"unreq", s_unreq, 0},
2389 {"even", s_even, 0},
2390 {"ltorg", s_ltorg, 0},
2391 {"pool", s_ltorg, 0},
2392 {"cpu", s_aarch64_cpu, 0},
2393 {"arch", s_aarch64_arch, 0},
2394 {"arch_extension", s_aarch64_arch_extension, 0},
2395 {"inst", s_aarch64_inst, 0},
2396 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2397 #ifdef OBJ_ELF
2398 {"tlsdescadd", s_tlsdescadd, 0},
2399 {"tlsdesccall", s_tlsdesccall, 0},
2400 {"tlsdescldr", s_tlsdescldr, 0},
2401 {"variant_pcs", s_variant_pcs, 0},
2402 #endif
2403 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2404 {"word", s_aarch64_cons, 4},
2405 {"long", s_aarch64_cons, 4},
2406 {"xword", s_aarch64_cons, 8},
2407 {"dword", s_aarch64_cons, 8},
2408 #endif
2409 #ifdef TE_PE
2410 {"secrel32", s_secrel, 0},
2411 {"secidx", s_secidx, 0},
2412 #endif
2413 {"float16", float_cons, 'h'},
2414 {"bfloat16", float_cons, 'b'},
2415 {0, 0, 0}
2416 };
2417 \f
2418
2419 /* Check whether STR points to a register name followed by a comma or the
2420 end of line; REG_TYPE indicates which register types are checked
2421 against. Return TRUE if STR is such a register name; otherwise return
2422 FALSE. The function does not intend to produce any diagnostics, but since
2423 the register parser aarch64_reg_parse, which is called by this function,
2424 does produce diagnostics, we call clear_error to clear any diagnostics
2425 that may be generated by aarch64_reg_parse.
2426 Also, the function returns FALSE directly if there is any user error
2427 present at the function entry. This prevents the existing diagnostics
2428 state from being spoiled.
2429 The function currently serves parse_constant_immediate and
2430 parse_big_immediate only. */
2431 static bool
2432 reg_name_p (char *str, aarch64_reg_type reg_type)
2433 {
2434 const reg_entry *reg;
2435
2436 /* Prevent the diagnostics state from being spoiled. */
2437 if (error_p ())
2438 return false;
2439
2440 reg = aarch64_reg_parse (&str, reg_type, NULL);
2441
2442 /* Clear the parsing error that may be set by the reg parser. */
2443 clear_error ();
2444
2445 if (!reg)
2446 return false;
2447
2448 skip_whitespace (str);
2449 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2450 return true;
2451
2452 return false;
2453 }
2454
2455 /* Parser functions used exclusively in instruction operands. */
2456
2457 /* Parse an immediate expression which may not be constant.
2458
2459 To prevent the expression parser from pushing a register name
2460 into the symbol table as an undefined symbol, firstly a check is
2461 done to find out whether STR is a register of type REG_TYPE followed
2462 by a comma or the end of line. Return FALSE if STR is such a string. */
2463
2464 static bool
2465 parse_immediate_expression (char **str, expressionS *exp,
2466 aarch64_reg_type reg_type)
2467 {
2468 if (reg_name_p (*str, reg_type))
2469 {
2470 set_recoverable_error (_("immediate operand required"));
2471 return false;
2472 }
2473
2474 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2475
2476 if (exp->X_op == O_absent)
2477 {
2478 set_fatal_syntax_error (_("missing immediate expression"));
2479 return false;
2480 }
2481
2482 return true;
2483 }
2484
2485 /* Constant immediate-value read function for use in insn parsing.
2486 STR points to the beginning of the immediate (with the optional
2487 leading #); *VAL receives the value. REG_TYPE says which register
2488 names should be treated as registers rather than as symbolic immediates.
2489
2490 Return TRUE on success; otherwise return FALSE. */
2491
2492 static bool
2493 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2494 {
2495 expressionS exp;
2496
2497 if (! parse_immediate_expression (str, &exp, reg_type))
2498 return false;
2499
2500 if (exp.X_op != O_constant)
2501 {
2502 set_syntax_error (_("constant expression required"));
2503 return false;
2504 }
2505
2506 *val = exp.X_add_number;
2507 return true;
2508 }
2509
2510 static uint32_t
2511 encode_imm_float_bits (uint32_t imm)
2512 {
2513 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2514 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2515 }
2516
2517 /* Return TRUE if the single-precision floating-point value encoded in IMM
2518 can be expressed in the AArch64 8-bit signed floating-point format with
2519 3-bit exponent and normalized 4 bits of precision; in other words, the
2520 floating-point value must be expressable as
2521 (+/-) n / 16 * power (2, r)
2522 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2523
2524 static bool
2525 aarch64_imm_float_p (uint32_t imm)
2526 {
2527 /* If a single-precision floating-point value has the following bit
2528 pattern, it can be expressed in the AArch64 8-bit floating-point
2529 format:
2530
2531 3 32222222 2221111111111
2532 1 09876543 21098765432109876543210
2533 n Eeeeeexx xxxx0000000000000000000
2534
2535 where n, e and each x are either 0 or 1 independently, with
2536 E == ~ e. */
2537
2538 uint32_t pattern;
2539
2540 /* Prepare the pattern for 'Eeeeee'. */
2541 if (((imm >> 30) & 0x1) == 0)
2542 pattern = 0x3e000000;
2543 else
2544 pattern = 0x40000000;
2545
2546 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2547 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2548 }
2549
2550 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2551 as an IEEE float without any loss of precision. Store the value in
2552 *FPWORD if so. */
2553
2554 static bool
2555 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2556 {
2557 /* If a double-precision floating-point value has the following bit
2558 pattern, it can be expressed in a float:
2559
2560 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2561 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2562 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2563
2564 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2565 if Eeee_eeee != 1111_1111
2566
2567 where n, e, s and S are either 0 or 1 independently and where ~ is the
2568 inverse of E. */
2569
2570 uint32_t pattern;
2571 uint32_t high32 = imm >> 32;
2572 uint32_t low32 = imm;
2573
2574 /* Lower 29 bits need to be 0s. */
2575 if ((imm & 0x1fffffff) != 0)
2576 return false;
2577
2578 /* Prepare the pattern for 'Eeeeeeeee'. */
2579 if (((high32 >> 30) & 0x1) == 0)
2580 pattern = 0x38000000;
2581 else
2582 pattern = 0x40000000;
2583
2584 /* Check E~~~. */
2585 if ((high32 & 0x78000000) != pattern)
2586 return false;
2587
2588 /* Check Eeee_eeee != 1111_1111. */
2589 if ((high32 & 0x7ff00000) == 0x47f00000)
2590 return false;
2591
2592 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2593 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2594 | (low32 >> 29)); /* 3 S bits. */
2595 return true;
2596 }
2597
2598 /* Return true if we should treat OPERAND as a double-precision
2599 floating-point operand rather than a single-precision one. */
2600 static bool
2601 double_precision_operand_p (const aarch64_opnd_info *operand)
2602 {
2603 /* Check for unsuffixed SVE registers, which are allowed
2604 for LDR and STR but not in instructions that require an
2605 immediate. We get better error messages if we arbitrarily
2606 pick one size, parse the immediate normally, and then
2607 report the match failure in the normal way. */
2608 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2609 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2610 }
2611
2612 /* Parse a floating-point immediate. Return TRUE on success and return the
2613 value in *IMMED in the format of IEEE754 single-precision encoding.
2614 *CCP points to the start of the string; DP_P is TRUE when the immediate
2615 is expected to be in double-precision (N.B. this only matters when
2616 hexadecimal representation is involved). REG_TYPE says which register
2617 names should be treated as registers rather than as symbolic immediates.
2618
2619 This routine accepts any IEEE float; it is up to the callers to reject
2620 invalid ones. */
2621
2622 static bool
2623 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2624 aarch64_reg_type reg_type)
2625 {
2626 char *str = *ccp;
2627 char *fpnum;
2628 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2629 int64_t val = 0;
2630 unsigned fpword = 0;
2631 bool hex_p = false;
2632
2633 skip_past_char (&str, '#');
2634
2635 fpnum = str;
2636 skip_whitespace (fpnum);
2637
2638 if (startswith (fpnum, "0x"))
2639 {
2640 /* Support the hexadecimal representation of the IEEE754 encoding.
2641 Double-precision is expected when DP_P is TRUE, otherwise the
2642 representation should be in single-precision. */
2643 if (! parse_constant_immediate (&str, &val, reg_type))
2644 goto invalid_fp;
2645
2646 if (dp_p)
2647 {
2648 if (!can_convert_double_to_float (val, &fpword))
2649 goto invalid_fp;
2650 }
2651 else if ((uint64_t) val > 0xffffffff)
2652 goto invalid_fp;
2653 else
2654 fpword = val;
2655
2656 hex_p = true;
2657 }
2658 else if (reg_name_p (str, reg_type))
2659 {
2660 set_recoverable_error (_("immediate operand required"));
2661 return false;
2662 }
2663
2664 if (! hex_p)
2665 {
2666 int i;
2667
2668 if ((str = atof_ieee (str, 's', words)) == NULL)
2669 goto invalid_fp;
2670
2671 /* Our FP word must be 32 bits (single-precision FP). */
2672 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2673 {
2674 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2675 fpword |= words[i];
2676 }
2677 }
2678
2679 *immed = fpword;
2680 *ccp = str;
2681 return true;
2682
2683 invalid_fp:
2684 set_fatal_syntax_error (_("invalid floating-point constant"));
2685 return false;
2686 }
2687
2688 /* Less-generic immediate-value read function with the possibility of loading
2689 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2690 instructions.
2691
2692 To prevent the expression parser from pushing a register name into the
2693 symbol table as an undefined symbol, a check is firstly done to find
2694 out whether STR is a register of type REG_TYPE followed by a comma or
2695 the end of line. Return FALSE if STR is such a register. */
2696
2697 static bool
2698 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2699 {
2700 char *ptr = *str;
2701
2702 if (reg_name_p (ptr, reg_type))
2703 {
2704 set_syntax_error (_("immediate operand required"));
2705 return false;
2706 }
2707
2708 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2709
2710 if (inst.reloc.exp.X_op == O_constant)
2711 *imm = inst.reloc.exp.X_add_number;
2712
2713 *str = ptr;
2714
2715 return true;
2716 }
2717
2718 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2719 if NEED_LIBOPCODES is non-zero, the fixup will need
2720 assistance from the libopcodes. */
2721
2722 static inline void
2723 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2724 const aarch64_opnd_info *operand,
2725 int need_libopcodes_p)
2726 {
2727 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2728 reloc->opnd = operand->type;
2729 if (need_libopcodes_p)
2730 reloc->need_libopcodes_p = 1;
2731 };
2732
2733 /* Return TRUE if the instruction needs to be fixed up later internally by
2734 the GAS; otherwise return FALSE. */
2735
2736 static inline bool
2737 aarch64_gas_internal_fixup_p (void)
2738 {
2739 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2740 }
2741
2742 /* Assign the immediate value to the relevant field in *OPERAND if
2743 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2744 needs an internal fixup in a later stage.
2745 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2746 IMM.VALUE that may get assigned with the constant. */
2747 static inline void
2748 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2749 aarch64_opnd_info *operand,
2750 int addr_off_p,
2751 int need_libopcodes_p,
2752 int skip_p)
2753 {
2754 if (reloc->exp.X_op == O_constant)
2755 {
2756 if (addr_off_p)
2757 operand->addr.offset.imm = reloc->exp.X_add_number;
2758 else
2759 operand->imm.value = reloc->exp.X_add_number;
2760 reloc->type = BFD_RELOC_UNUSED;
2761 }
2762 else
2763 {
2764 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2765 /* Tell libopcodes to ignore this operand or not. This is helpful
2766 when one of the operands needs to be fixed up later but we need
2767 libopcodes to check the other operands. */
2768 operand->skip = skip_p;
2769 }
2770 }
2771
2772 /* Relocation modifiers. Each entry in the table contains the textual
2773 name for the relocation which may be placed before a symbol used as
2774 a load/store offset, or add immediate. It must be surrounded by a
2775 leading and trailing colon, for example:
2776
2777 ldr x0, [x1, #:rello:varsym]
2778 add x0, x1, #:rello:varsym */
2779
2780 struct reloc_table_entry
2781 {
2782 const char *name;
2783 int pc_rel;
2784 bfd_reloc_code_real_type adr_type;
2785 bfd_reloc_code_real_type adrp_type;
2786 bfd_reloc_code_real_type movw_type;
2787 bfd_reloc_code_real_type add_type;
2788 bfd_reloc_code_real_type ldst_type;
2789 bfd_reloc_code_real_type ld_literal_type;
2790 };
2791
2792 static struct reloc_table_entry reloc_table[] =
2793 {
2794 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2795 {"lo12", 0,
2796 0, /* adr_type */
2797 0,
2798 0,
2799 BFD_RELOC_AARCH64_ADD_LO12,
2800 BFD_RELOC_AARCH64_LDST_LO12,
2801 0},
2802
2803 /* Higher 21 bits of pc-relative page offset: ADRP */
2804 {"pg_hi21", 1,
2805 0, /* adr_type */
2806 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2807 0,
2808 0,
2809 0,
2810 0},
2811
2812 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2813 {"pg_hi21_nc", 1,
2814 0, /* adr_type */
2815 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2816 0,
2817 0,
2818 0,
2819 0},
2820
2821 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2822 {"abs_g0", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_G0,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2831 {"abs_g0_s", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_G0_S,
2835 0,
2836 0,
2837 0},
2838
2839 /* Less significant bits 0-15 of address/value: MOVK, no check */
2840 {"abs_g0_nc", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_MOVW_G0_NC,
2844 0,
2845 0,
2846 0},
2847
2848 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2849 {"abs_g1", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_MOVW_G1,
2853 0,
2854 0,
2855 0},
2856
2857 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2858 {"abs_g1_s", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_G1_S,
2862 0,
2863 0,
2864 0},
2865
2866 /* Less significant bits 16-31 of address/value: MOVK, no check */
2867 {"abs_g1_nc", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_G1_NC,
2871 0,
2872 0,
2873 0},
2874
2875 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2876 {"abs_g2", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_MOVW_G2,
2880 0,
2881 0,
2882 0},
2883
2884 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2885 {"abs_g2_s", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_MOVW_G2_S,
2889 0,
2890 0,
2891 0},
2892
2893 /* Less significant bits 32-47 of address/value: MOVK, no check */
2894 {"abs_g2_nc", 0,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_MOVW_G2_NC,
2898 0,
2899 0,
2900 0},
2901
2902 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2903 {"abs_g3", 0,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_MOVW_G3,
2907 0,
2908 0,
2909 0},
2910
2911 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2912 {"prel_g0", 1,
2913 0, /* adr_type */
2914 0,
2915 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2916 0,
2917 0,
2918 0},
2919
2920 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2921 {"prel_g0_nc", 1,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2930 {"prel_g1", 1,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2939 {"prel_g1_nc", 1,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2948 {"prel_g2", 1,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2952 0,
2953 0,
2954 0},
2955
2956 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2957 {"prel_g2_nc", 1,
2958 0, /* adr_type */
2959 0,
2960 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2961 0,
2962 0,
2963 0},
2964
2965 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2966 {"prel_g3", 1,
2967 0, /* adr_type */
2968 0,
2969 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2970 0,
2971 0,
2972 0},
2973
2974 /* Get to the page containing GOT entry for a symbol. */
2975 {"got", 1,
2976 0, /* adr_type */
2977 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2978 0,
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2982
2983 /* 12 bit offset into the page containing GOT entry for that symbol. */
2984 {"got_lo12", 0,
2985 0, /* adr_type */
2986 0,
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2990 0},
2991
2992 /* 0-15 bits of address/value: MOVk, no check. */
2993 {"gotoff_g0_nc", 0,
2994 0, /* adr_type */
2995 0,
2996 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2997 0,
2998 0,
2999 0},
3000
3001 /* Most significant bits 16-31 of address/value: MOVZ. */
3002 {"gotoff_g1", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3006 0,
3007 0,
3008 0},
3009
3010 /* 15 bit offset into the page containing GOT entry for that symbol. */
3011 {"gotoff_lo15", 0,
3012 0, /* adr_type */
3013 0,
3014 0,
3015 0,
3016 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3017 0},
3018
3019 /* Get to the page containing GOT TLS entry for a symbol */
3020 {"gottprel_g0_nc", 0,
3021 0, /* adr_type */
3022 0,
3023 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3024 0,
3025 0,
3026 0},
3027
3028 /* Get to the page containing GOT TLS entry for a symbol */
3029 {"gottprel_g1", 0,
3030 0, /* adr_type */
3031 0,
3032 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3033 0,
3034 0,
3035 0},
3036
3037 /* Get to the page containing GOT TLS entry for a symbol */
3038 {"tlsgd", 0,
3039 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3040 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3041 0,
3042 0,
3043 0,
3044 0},
3045
3046 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3047 {"tlsgd_lo12", 0,
3048 0, /* adr_type */
3049 0,
3050 0,
3051 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3052 0,
3053 0},
3054
3055 /* Lower 16 bits address/value: MOVk. */
3056 {"tlsgd_g0_nc", 0,
3057 0, /* adr_type */
3058 0,
3059 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3060 0,
3061 0,
3062 0},
3063
3064 /* Most significant bits 16-31 of address/value: MOVZ. */
3065 {"tlsgd_g1", 0,
3066 0, /* adr_type */
3067 0,
3068 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3069 0,
3070 0,
3071 0},
3072
3073 /* Get to the page containing GOT TLS entry for a symbol */
3074 {"tlsdesc", 0,
3075 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3076 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3077 0,
3078 0,
3079 0,
3080 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3081
3082 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3083 {"tlsdesc_lo12", 0,
3084 0, /* adr_type */
3085 0,
3086 0,
3087 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3088 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3089 0},
3090
3091 /* Get to the page containing GOT TLS entry for a symbol.
3092 The same as GD, we allocate two consecutive GOT slots
3093 for module index and module offset, the only difference
3094 with GD is the module offset should be initialized to
3095 zero without any outstanding runtime relocation. */
3096 {"tlsldm", 0,
3097 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3098 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3099 0,
3100 0,
3101 0,
3102 0},
3103
3104 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3105 {"tlsldm_lo12_nc", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3110 0,
3111 0},
3112
3113 /* 12 bit offset into the module TLS base address. */
3114 {"dtprel_lo12", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3119 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3120 0},
3121
3122 /* Same as dtprel_lo12, no overflow check. */
3123 {"dtprel_lo12_nc", 0,
3124 0, /* adr_type */
3125 0,
3126 0,
3127 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3128 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3129 0},
3130
3131 /* bits[23:12] of offset to the module TLS base address. */
3132 {"dtprel_hi12", 0,
3133 0, /* adr_type */
3134 0,
3135 0,
3136 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3137 0,
3138 0},
3139
3140 /* bits[15:0] of offset to the module TLS base address. */
3141 {"dtprel_g0", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3145 0,
3146 0,
3147 0},
3148
3149 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3150 {"dtprel_g0_nc", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3154 0,
3155 0,
3156 0},
3157
3158 /* bits[31:16] of offset to the module TLS base address. */
3159 {"dtprel_g1", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3163 0,
3164 0,
3165 0},
3166
3167 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3168 {"dtprel_g1_nc", 0,
3169 0, /* adr_type */
3170 0,
3171 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3172 0,
3173 0,
3174 0},
3175
3176 /* bits[47:32] of offset to the module TLS base address. */
3177 {"dtprel_g2", 0,
3178 0, /* adr_type */
3179 0,
3180 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3181 0,
3182 0,
3183 0},
3184
3185 /* Lower 16 bit offset into GOT entry for a symbol */
3186 {"tlsdesc_off_g0_nc", 0,
3187 0, /* adr_type */
3188 0,
3189 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3190 0,
3191 0,
3192 0},
3193
3194 /* Higher 16 bit offset into GOT entry for a symbol */
3195 {"tlsdesc_off_g1", 0,
3196 0, /* adr_type */
3197 0,
3198 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3199 0,
3200 0,
3201 0},
3202
3203 /* Get to the page containing GOT TLS entry for a symbol */
3204 {"gottprel", 0,
3205 0, /* adr_type */
3206 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3207 0,
3208 0,
3209 0,
3210 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3211
3212 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3213 {"gottprel_lo12", 0,
3214 0, /* adr_type */
3215 0,
3216 0,
3217 0,
3218 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3219 0},
3220
3221 /* Get tp offset for a symbol. */
3222 {"tprel", 0,
3223 0, /* adr_type */
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3227 0,
3228 0},
3229
3230 /* Get tp offset for a symbol. */
3231 {"tprel_lo12", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3236 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3237 0},
3238
3239 /* Get tp offset for a symbol. */
3240 {"tprel_hi12", 0,
3241 0, /* adr_type */
3242 0,
3243 0,
3244 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3245 0,
3246 0},
3247
3248 /* Get tp offset for a symbol. */
3249 {"tprel_lo12_nc", 0,
3250 0, /* adr_type */
3251 0,
3252 0,
3253 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3254 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3255 0},
3256
3257 /* Most significant bits 32-47 of address/value: MOVZ. */
3258 {"tprel_g2", 0,
3259 0, /* adr_type */
3260 0,
3261 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3262 0,
3263 0,
3264 0},
3265
3266 /* Most significant bits 16-31 of address/value: MOVZ. */
3267 {"tprel_g1", 0,
3268 0, /* adr_type */
3269 0,
3270 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3271 0,
3272 0,
3273 0},
3274
3275 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3276 {"tprel_g1_nc", 0,
3277 0, /* adr_type */
3278 0,
3279 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3280 0,
3281 0,
3282 0},
3283
3284 /* Most significant bits 0-15 of address/value: MOVZ. */
3285 {"tprel_g0", 0,
3286 0, /* adr_type */
3287 0,
3288 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3289 0,
3290 0,
3291 0},
3292
3293 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3294 {"tprel_g0_nc", 0,
3295 0, /* adr_type */
3296 0,
3297 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3298 0,
3299 0,
3300 0},
3301
3302 /* 15bit offset from got entry to base address of GOT table. */
3303 {"gotpage_lo15", 0,
3304 0,
3305 0,
3306 0,
3307 0,
3308 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3309 0},
3310
3311 /* 14bit offset from got entry to base address of GOT table. */
3312 {"gotpage_lo14", 0,
3313 0,
3314 0,
3315 0,
3316 0,
3317 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3318 0},
3319 };
3320
3321 /* Given the address of a pointer pointing to the textual name of a
3322 relocation as may appear in assembler source, attempt to find its
3323 details in reloc_table. The pointer will be updated to the character
3324 after the trailing colon. On failure, NULL will be returned;
3325 otherwise return the reloc_table_entry. */
3326
3327 static struct reloc_table_entry *
3328 find_reloc_table_entry (char **str)
3329 {
3330 unsigned int i;
3331 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3332 {
3333 int length = strlen (reloc_table[i].name);
3334
3335 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3336 && (*str)[length] == ':')
3337 {
3338 *str += (length + 1);
3339 return &reloc_table[i];
3340 }
3341 }
3342
3343 return NULL;
3344 }
3345
3346 /* Returns 0 if the relocation should never be forced,
3347 1 if the relocation must be forced, and -1 if either
3348 result is OK. */
3349
3350 static signed int
3351 aarch64_force_reloc (unsigned int type)
3352 {
3353 switch (type)
3354 {
3355 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3356 /* Perform these "immediate" internal relocations
3357 even if the symbol is extern or weak. */
3358 return 0;
3359
3360 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3361 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3362 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3363 /* Pseudo relocs that need to be fixed up according to
3364 ilp32_p. */
3365 return 1;
3366
3367 case BFD_RELOC_AARCH64_ADD_LO12:
3368 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3369 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3370 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3371 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3372 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3373 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3374 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3375 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3376 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3377 case BFD_RELOC_AARCH64_LDST128_LO12:
3378 case BFD_RELOC_AARCH64_LDST16_LO12:
3379 case BFD_RELOC_AARCH64_LDST32_LO12:
3380 case BFD_RELOC_AARCH64_LDST64_LO12:
3381 case BFD_RELOC_AARCH64_LDST8_LO12:
3382 case BFD_RELOC_AARCH64_LDST_LO12:
3383 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3384 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3385 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3386 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3387 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3388 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3389 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3390 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3391 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3392 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3393 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3394 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3395 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3396 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3397 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3399 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3400 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3401 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3402 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3403 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3404 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3405 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3406 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3407 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3408 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3409 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3410 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3411 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3412 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3413 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3414 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3415 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3416 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3417 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3418 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3419 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3420 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3421 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3422 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3423 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3424 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3425 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3426 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3427 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3428 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3429 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3430 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3431 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3432 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3433 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3434 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3435 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3436 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3437 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3438 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3439 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3440 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3441 /* Always leave these relocations for the linker. */
3442 return 1;
3443
3444 default:
3445 return -1;
3446 }
3447 }
3448
3449 int
3450 aarch64_force_relocation (struct fix *fixp)
3451 {
3452 int res = aarch64_force_reloc (fixp->fx_r_type);
3453
3454 if (res == -1)
3455 return generic_force_reloc (fixp);
3456 return res;
3457 }
3458
3459 /* Mode argument to parse_shift and parser_shifter_operand. */
3460 enum parse_shift_mode
3461 {
3462 SHIFTED_NONE, /* no shifter allowed */
3463 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3464 "#imm{,lsl #n}" */
3465 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3466 "#imm" */
3467 SHIFTED_LSL, /* bare "lsl #n" */
3468 SHIFTED_MUL, /* bare "mul #n" */
3469 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3470 SHIFTED_MUL_VL, /* "mul vl" */
3471 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3472 };
3473
3474 /* Parse a <shift> operator on an AArch64 data processing instruction.
3475 Return TRUE on success; otherwise return FALSE. */
3476 static bool
3477 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3478 {
3479 const struct aarch64_name_value_pair *shift_op;
3480 enum aarch64_modifier_kind kind;
3481 expressionS exp;
3482 int exp_has_prefix;
3483 char *s = *str;
3484 char *p = s;
3485
3486 for (p = *str; ISALPHA (*p); p++)
3487 ;
3488
3489 if (p == *str)
3490 {
3491 set_syntax_error (_("shift expression expected"));
3492 return false;
3493 }
3494
3495 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3496
3497 if (shift_op == NULL)
3498 {
3499 set_syntax_error (_("shift operator expected"));
3500 return false;
3501 }
3502
3503 kind = aarch64_get_operand_modifier (shift_op);
3504
3505 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3506 {
3507 set_syntax_error (_("invalid use of 'MSL'"));
3508 return false;
3509 }
3510
3511 if (kind == AARCH64_MOD_MUL
3512 && mode != SHIFTED_MUL
3513 && mode != SHIFTED_MUL_VL)
3514 {
3515 set_syntax_error (_("invalid use of 'MUL'"));
3516 return false;
3517 }
3518
3519 switch (mode)
3520 {
3521 case SHIFTED_LOGIC_IMM:
3522 if (aarch64_extend_operator_p (kind))
3523 {
3524 set_syntax_error (_("extending shift is not permitted"));
3525 return false;
3526 }
3527 break;
3528
3529 case SHIFTED_ARITH_IMM:
3530 if (kind == AARCH64_MOD_ROR)
3531 {
3532 set_syntax_error (_("'ROR' shift is not permitted"));
3533 return false;
3534 }
3535 break;
3536
3537 case SHIFTED_LSL:
3538 if (kind != AARCH64_MOD_LSL)
3539 {
3540 set_syntax_error (_("only 'LSL' shift is permitted"));
3541 return false;
3542 }
3543 break;
3544
3545 case SHIFTED_MUL:
3546 if (kind != AARCH64_MOD_MUL)
3547 {
3548 set_syntax_error (_("only 'MUL' is permitted"));
3549 return false;
3550 }
3551 break;
3552
3553 case SHIFTED_MUL_VL:
3554 /* "MUL VL" consists of two separate tokens. Require the first
3555 token to be "MUL" and look for a following "VL". */
3556 if (kind == AARCH64_MOD_MUL)
3557 {
3558 skip_whitespace (p);
3559 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3560 {
3561 p += 2;
3562 kind = AARCH64_MOD_MUL_VL;
3563 break;
3564 }
3565 }
3566 set_syntax_error (_("only 'MUL VL' is permitted"));
3567 return false;
3568
3569 case SHIFTED_REG_OFFSET:
3570 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3571 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3572 {
3573 set_fatal_syntax_error
3574 (_("invalid shift for the register offset addressing mode"));
3575 return false;
3576 }
3577 break;
3578
3579 case SHIFTED_LSL_MSL:
3580 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3581 {
3582 set_syntax_error (_("invalid shift operator"));
3583 return false;
3584 }
3585 break;
3586
3587 default:
3588 abort ();
3589 }
3590
3591 /* Whitespace can appear here if the next thing is a bare digit. */
3592 skip_whitespace (p);
3593
3594 /* Parse shift amount. */
3595 exp_has_prefix = 0;
3596 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3597 exp.X_op = O_absent;
3598 else
3599 {
3600 if (is_immediate_prefix (*p))
3601 {
3602 p++;
3603 exp_has_prefix = 1;
3604 }
3605 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3606 }
3607 if (kind == AARCH64_MOD_MUL_VL)
3608 /* For consistency, give MUL VL the same shift amount as an implicit
3609 MUL #1. */
3610 operand->shifter.amount = 1;
3611 else if (exp.X_op == O_absent)
3612 {
3613 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3614 {
3615 set_syntax_error (_("missing shift amount"));
3616 return false;
3617 }
3618 operand->shifter.amount = 0;
3619 }
3620 else if (exp.X_op != O_constant)
3621 {
3622 set_syntax_error (_("constant shift amount required"));
3623 return false;
3624 }
3625 /* For parsing purposes, MUL #n has no inherent range. The range
3626 depends on the operand and will be checked by operand-specific
3627 routines. */
3628 else if (kind != AARCH64_MOD_MUL
3629 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3630 {
3631 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3632 return false;
3633 }
3634 else
3635 {
3636 operand->shifter.amount = exp.X_add_number;
3637 operand->shifter.amount_present = 1;
3638 }
3639
3640 operand->shifter.operator_present = 1;
3641 operand->shifter.kind = kind;
3642
3643 *str = p;
3644 return true;
3645 }
3646
3647 /* Parse a <shifter_operand> for a data processing instruction:
3648
3649 #<immediate>
3650 #<immediate>, LSL #imm
3651
3652 Validation of immediate operands is deferred to md_apply_fix.
3653
3654 Return TRUE on success; otherwise return FALSE. */
3655
3656 static bool
3657 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3658 enum parse_shift_mode mode)
3659 {
3660 char *p;
3661
3662 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3663 return false;
3664
3665 p = *str;
3666
3667 /* Accept an immediate expression. */
3668 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3669 REJECT_ABSENT))
3670 return false;
3671
3672 /* Accept optional LSL for arithmetic immediate values. */
3673 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3674 if (! parse_shift (&p, operand, SHIFTED_LSL))
3675 return false;
3676
3677 /* Not accept any shifter for logical immediate values. */
3678 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3679 && parse_shift (&p, operand, mode))
3680 {
3681 set_syntax_error (_("unexpected shift operator"));
3682 return false;
3683 }
3684
3685 *str = p;
3686 return true;
3687 }
3688
3689 /* Parse a <shifter_operand> for a data processing instruction:
3690
3691 <Rm>
3692 <Rm>, <shift>
3693 #<immediate>
3694 #<immediate>, LSL #imm
3695
3696 where <shift> is handled by parse_shift above, and the last two
3697 cases are handled by the function above.
3698
3699 Validation of immediate operands is deferred to md_apply_fix.
3700
3701 Return TRUE on success; otherwise return FALSE. */
3702
3703 static bool
3704 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3705 enum parse_shift_mode mode)
3706 {
3707 const reg_entry *reg;
3708 aarch64_opnd_qualifier_t qualifier;
3709 enum aarch64_operand_class opd_class
3710 = aarch64_get_operand_class (operand->type);
3711
3712 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3713 if (reg)
3714 {
3715 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3716 {
3717 set_syntax_error (_("unexpected register in the immediate operand"));
3718 return false;
3719 }
3720
3721 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3722 {
3723 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3724 return false;
3725 }
3726
3727 operand->reg.regno = reg->number;
3728 operand->qualifier = qualifier;
3729
3730 /* Accept optional shift operation on register. */
3731 if (! skip_past_comma (str))
3732 return true;
3733
3734 if (! parse_shift (str, operand, mode))
3735 return false;
3736
3737 return true;
3738 }
3739 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3740 {
3741 set_syntax_error
3742 (_("integer register expected in the extended/shifted operand "
3743 "register"));
3744 return false;
3745 }
3746
3747 /* We have a shifted immediate variable. */
3748 return parse_shifter_operand_imm (str, operand, mode);
3749 }
3750
3751 /* Return TRUE on success; return FALSE otherwise. */
3752
3753 static bool
3754 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3755 enum parse_shift_mode mode)
3756 {
3757 char *p = *str;
3758
3759 /* Determine if we have the sequence of characters #: or just :
3760 coming next. If we do, then we check for a :rello: relocation
3761 modifier. If we don't, punt the whole lot to
3762 parse_shifter_operand. */
3763
3764 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3765 {
3766 struct reloc_table_entry *entry;
3767
3768 if (p[0] == '#')
3769 p += 2;
3770 else
3771 p++;
3772 *str = p;
3773
3774 /* Try to parse a relocation. Anything else is an error. */
3775 if (!(entry = find_reloc_table_entry (str)))
3776 {
3777 set_syntax_error (_("unknown relocation modifier"));
3778 return false;
3779 }
3780
3781 if (entry->add_type == 0)
3782 {
3783 set_syntax_error
3784 (_("this relocation modifier is not allowed on this instruction"));
3785 return false;
3786 }
3787
3788 /* Save str before we decompose it. */
3789 p = *str;
3790
3791 /* Next, we parse the expression. */
3792 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3793 REJECT_ABSENT))
3794 return false;
3795
3796 /* Record the relocation type (use the ADD variant here). */
3797 inst.reloc.type = entry->add_type;
3798 inst.reloc.pc_rel = entry->pc_rel;
3799
3800 /* If str is empty, we've reached the end, stop here. */
3801 if (**str == '\0')
3802 return true;
3803
3804 /* Otherwise, we have a shifted reloc modifier, so rewind to
3805 recover the variable name and continue parsing for the shifter. */
3806 *str = p;
3807 return parse_shifter_operand_imm (str, operand, mode);
3808 }
3809
3810 return parse_shifter_operand (str, operand, mode);
3811 }
3812
3813 /* Parse all forms of an address expression. Information is written
3814 to *OPERAND and/or inst.reloc.
3815
3816 The A64 instruction set has the following addressing modes:
3817
3818 Offset
3819 [base] // in SIMD ld/st structure
3820 [base{,#0}] // in ld/st exclusive
3821 [base{,#imm}]
3822 [base,Xm{,LSL #imm}]
3823 [base,Xm,SXTX {#imm}]
3824 [base,Wm,(S|U)XTW {#imm}]
3825 Pre-indexed
3826 [base]! // in ldraa/ldrab exclusive
3827 [base,#imm]!
3828 Post-indexed
3829 [base],#imm
3830 [base],Xm // in SIMD ld/st structure
3831 PC-relative (literal)
3832 label
3833 SVE:
3834 [base,#imm,MUL VL]
3835 [base,Zm.D{,LSL #imm}]
3836 [base,Zm.S,(S|U)XTW {#imm}]
3837 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3838 [Zn.S,#imm]
3839 [Zn.D,#imm]
3840 [Zn.S{, Xm}]
3841 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3842 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3843 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3844
3845 (As a convenience, the notation "=immediate" is permitted in conjunction
3846 with the pc-relative literal load instructions to automatically place an
3847 immediate value or symbolic address in a nearby literal pool and generate
3848 a hidden label which references it.)
3849
3850 Upon a successful parsing, the address structure in *OPERAND will be
3851 filled in the following way:
3852
3853 .base_regno = <base>
3854 .offset.is_reg // 1 if the offset is a register
3855 .offset.imm = <imm>
3856 .offset.regno = <Rm>
3857
3858 For different addressing modes defined in the A64 ISA:
3859
3860 Offset
3861 .pcrel=0; .preind=1; .postind=0; .writeback=0
3862 Pre-indexed
3863 .pcrel=0; .preind=1; .postind=0; .writeback=1
3864 Post-indexed
3865 .pcrel=0; .preind=0; .postind=1; .writeback=1
3866 PC-relative (literal)
3867 .pcrel=1; .preind=1; .postind=0; .writeback=0
3868
3869 The shift/extension information, if any, will be stored in .shifter.
3870 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3871 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3872 corresponding register.
3873
3874 BASE_TYPE says which types of base register should be accepted and
3875 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3876 is the type of shifter that is allowed for immediate offsets,
3877 or SHIFTED_NONE if none.
3878
3879 In all other respects, it is the caller's responsibility to check
3880 for addressing modes not supported by the instruction, and to set
3881 inst.reloc.type. */
3882
3883 static bool
3884 parse_address_main (char **str, aarch64_opnd_info *operand,
3885 aarch64_opnd_qualifier_t *base_qualifier,
3886 aarch64_opnd_qualifier_t *offset_qualifier,
3887 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3888 enum parse_shift_mode imm_shift_mode)
3889 {
3890 char *p = *str;
3891 const reg_entry *reg;
3892 expressionS *exp = &inst.reloc.exp;
3893
3894 *base_qualifier = AARCH64_OPND_QLF_NIL;
3895 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3896 if (! skip_past_char (&p, '['))
3897 {
3898 /* =immediate or label. */
3899 operand->addr.pcrel = 1;
3900 operand->addr.preind = 1;
3901
3902 /* #:<reloc_op>:<symbol> */
3903 skip_past_char (&p, '#');
3904 if (skip_past_char (&p, ':'))
3905 {
3906 bfd_reloc_code_real_type ty;
3907 struct reloc_table_entry *entry;
3908
3909 /* Try to parse a relocation modifier. Anything else is
3910 an error. */
3911 entry = find_reloc_table_entry (&p);
3912 if (! entry)
3913 {
3914 set_syntax_error (_("unknown relocation modifier"));
3915 return false;
3916 }
3917
3918 switch (operand->type)
3919 {
3920 case AARCH64_OPND_ADDR_PCREL21:
3921 /* adr */
3922 ty = entry->adr_type;
3923 break;
3924
3925 default:
3926 ty = entry->ld_literal_type;
3927 break;
3928 }
3929
3930 if (ty == 0)
3931 {
3932 set_syntax_error
3933 (_("this relocation modifier is not allowed on this "
3934 "instruction"));
3935 return false;
3936 }
3937
3938 /* #:<reloc_op>: */
3939 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3940 {
3941 set_syntax_error (_("invalid relocation expression"));
3942 return false;
3943 }
3944 /* #:<reloc_op>:<expr> */
3945 /* Record the relocation type. */
3946 inst.reloc.type = ty;
3947 inst.reloc.pc_rel = entry->pc_rel;
3948 }
3949 else
3950 {
3951 if (skip_past_char (&p, '='))
3952 /* =immediate; need to generate the literal in the literal pool. */
3953 inst.gen_lit_pool = 1;
3954
3955 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3956 {
3957 set_syntax_error (_("invalid address"));
3958 return false;
3959 }
3960 }
3961
3962 *str = p;
3963 return true;
3964 }
3965
3966 /* [ */
3967
3968 bool alpha_base_p = ISALPHA (*p);
3969 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3970 if (!reg || !aarch64_check_reg_type (reg, base_type))
3971 {
3972 if (reg
3973 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3974 && *base_qualifier == AARCH64_OPND_QLF_W)
3975 set_syntax_error (_("expected a 64-bit base register"));
3976 else if (alpha_base_p)
3977 set_syntax_error (_("invalid base register"));
3978 else
3979 set_syntax_error (_("expected a base register"));
3980 return false;
3981 }
3982 operand->addr.base_regno = reg->number;
3983
3984 /* [Xn */
3985 if (skip_past_comma (&p))
3986 {
3987 /* [Xn, */
3988 operand->addr.preind = 1;
3989
3990 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3991 if (reg)
3992 {
3993 if (!aarch64_check_reg_type (reg, offset_type))
3994 {
3995 set_syntax_error (_("invalid offset register"));
3996 return false;
3997 }
3998
3999 /* [Xn,Rm */
4000 operand->addr.offset.regno = reg->number;
4001 operand->addr.offset.is_reg = 1;
4002 /* Shifted index. */
4003 if (skip_past_comma (&p))
4004 {
4005 /* [Xn,Rm, */
4006 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4007 /* Use the diagnostics set in parse_shift, so not set new
4008 error message here. */
4009 return false;
4010 }
4011 /* We only accept:
4012 [base,Xm] # For vector plus scalar SVE2 indexing.
4013 [base,Xm{,LSL #imm}]
4014 [base,Xm,SXTX {#imm}]
4015 [base,Wm,(S|U)XTW {#imm}] */
4016 if (operand->shifter.kind == AARCH64_MOD_NONE
4017 || operand->shifter.kind == AARCH64_MOD_LSL
4018 || operand->shifter.kind == AARCH64_MOD_SXTX)
4019 {
4020 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4021 {
4022 set_syntax_error (_("invalid use of 32-bit register offset"));
4023 return false;
4024 }
4025 if (aarch64_get_qualifier_esize (*base_qualifier)
4026 != aarch64_get_qualifier_esize (*offset_qualifier)
4027 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4028 || *base_qualifier != AARCH64_OPND_QLF_S_S
4029 || *offset_qualifier != AARCH64_OPND_QLF_X))
4030 {
4031 set_syntax_error (_("offset has different size from base"));
4032 return false;
4033 }
4034 }
4035 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4036 {
4037 set_syntax_error (_("invalid use of 64-bit register offset"));
4038 return false;
4039 }
4040 }
4041 else
4042 {
4043 /* [Xn,#:<reloc_op>:<symbol> */
4044 skip_past_char (&p, '#');
4045 if (skip_past_char (&p, ':'))
4046 {
4047 struct reloc_table_entry *entry;
4048
4049 /* Try to parse a relocation modifier. Anything else is
4050 an error. */
4051 if (!(entry = find_reloc_table_entry (&p)))
4052 {
4053 set_syntax_error (_("unknown relocation modifier"));
4054 return false;
4055 }
4056
4057 if (entry->ldst_type == 0)
4058 {
4059 set_syntax_error
4060 (_("this relocation modifier is not allowed on this "
4061 "instruction"));
4062 return false;
4063 }
4064
4065 /* [Xn,#:<reloc_op>: */
4066 /* We now have the group relocation table entry corresponding to
4067 the name in the assembler source. Next, we parse the
4068 expression. */
4069 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4070 {
4071 set_syntax_error (_("invalid relocation expression"));
4072 return false;
4073 }
4074
4075 /* [Xn,#:<reloc_op>:<expr> */
4076 /* Record the load/store relocation type. */
4077 inst.reloc.type = entry->ldst_type;
4078 inst.reloc.pc_rel = entry->pc_rel;
4079 }
4080 else
4081 {
4082 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4083 {
4084 set_syntax_error (_("invalid expression in the address"));
4085 return false;
4086 }
4087 /* [Xn,<expr> */
4088 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4089 /* [Xn,<expr>,<shifter> */
4090 if (! parse_shift (&p, operand, imm_shift_mode))
4091 return false;
4092 }
4093 }
4094 }
4095
4096 if (! skip_past_char (&p, ']'))
4097 {
4098 set_syntax_error (_("']' expected"));
4099 return false;
4100 }
4101
4102 if (skip_past_char (&p, '!'))
4103 {
4104 if (operand->addr.preind && operand->addr.offset.is_reg)
4105 {
4106 set_syntax_error (_("register offset not allowed in pre-indexed "
4107 "addressing mode"));
4108 return false;
4109 }
4110 /* [Xn]! */
4111 operand->addr.writeback = 1;
4112 }
4113 else if (skip_past_comma (&p))
4114 {
4115 /* [Xn], */
4116 operand->addr.postind = 1;
4117 operand->addr.writeback = 1;
4118
4119 if (operand->addr.preind)
4120 {
4121 set_syntax_error (_("cannot combine pre- and post-indexing"));
4122 return false;
4123 }
4124
4125 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4126 if (reg)
4127 {
4128 /* [Xn],Xm */
4129 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4130 {
4131 set_syntax_error (_("invalid offset register"));
4132 return false;
4133 }
4134
4135 operand->addr.offset.regno = reg->number;
4136 operand->addr.offset.is_reg = 1;
4137 }
4138 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4139 {
4140 /* [Xn],#expr */
4141 set_syntax_error (_("invalid expression in the address"));
4142 return false;
4143 }
4144 }
4145
4146 /* If at this point neither .preind nor .postind is set, we have a
4147 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4148 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4149 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4150 [Zn.<T>, xzr]. */
4151 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4152 {
4153 if (operand->addr.writeback)
4154 {
4155 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4156 {
4157 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4158 operand->addr.offset.is_reg = 0;
4159 operand->addr.offset.imm = 0;
4160 operand->addr.preind = 1;
4161 }
4162 else
4163 {
4164 /* Reject [Rn]! */
4165 set_syntax_error (_("missing offset in the pre-indexed address"));
4166 return false;
4167 }
4168 }
4169 else
4170 {
4171 operand->addr.preind = 1;
4172 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4173 {
4174 operand->addr.offset.is_reg = 1;
4175 operand->addr.offset.regno = REG_ZR;
4176 *offset_qualifier = AARCH64_OPND_QLF_X;
4177 }
4178 else
4179 {
4180 inst.reloc.exp.X_op = O_constant;
4181 inst.reloc.exp.X_add_number = 0;
4182 }
4183 }
4184 }
4185
4186 *str = p;
4187 return true;
4188 }
4189
4190 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4191 on success. */
4192 static bool
4193 parse_address (char **str, aarch64_opnd_info *operand)
4194 {
4195 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4196 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4197 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4198 }
4199
4200 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4201 The arguments have the same meaning as for parse_address_main.
4202 Return TRUE on success. */
4203 static bool
4204 parse_sve_address (char **str, aarch64_opnd_info *operand,
4205 aarch64_opnd_qualifier_t *base_qualifier,
4206 aarch64_opnd_qualifier_t *offset_qualifier)
4207 {
4208 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4209 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4210 SHIFTED_MUL_VL);
4211 }
4212
4213 /* Parse a register X0-X30. The register must be 64-bit and register 31
4214 is unallocated. */
4215 static bool
4216 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4217 {
4218 const reg_entry *reg = parse_reg (str);
4219 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4220 {
4221 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4222 return false;
4223 }
4224 operand->reg.regno = reg->number;
4225 operand->qualifier = AARCH64_OPND_QLF_X;
4226 return true;
4227 }
4228
4229 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4230 Return TRUE on success; otherwise return FALSE. */
4231 static bool
4232 parse_half (char **str, int *internal_fixup_p)
4233 {
4234 char *p = *str;
4235
4236 skip_past_char (&p, '#');
4237
4238 gas_assert (internal_fixup_p);
4239 *internal_fixup_p = 0;
4240
4241 if (*p == ':')
4242 {
4243 struct reloc_table_entry *entry;
4244
4245 /* Try to parse a relocation. Anything else is an error. */
4246 ++p;
4247
4248 if (!(entry = find_reloc_table_entry (&p)))
4249 {
4250 set_syntax_error (_("unknown relocation modifier"));
4251 return false;
4252 }
4253
4254 if (entry->movw_type == 0)
4255 {
4256 set_syntax_error
4257 (_("this relocation modifier is not allowed on this instruction"));
4258 return false;
4259 }
4260
4261 inst.reloc.type = entry->movw_type;
4262 }
4263 else
4264 *internal_fixup_p = 1;
4265
4266 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4267 return false;
4268
4269 *str = p;
4270 return true;
4271 }
4272
4273 /* Parse an operand for an ADRP instruction:
4274 ADRP <Xd>, <label>
4275 Return TRUE on success; otherwise return FALSE. */
4276
4277 static bool
4278 parse_adrp (char **str)
4279 {
4280 char *p;
4281
4282 p = *str;
4283 if (*p == ':')
4284 {
4285 struct reloc_table_entry *entry;
4286
4287 /* Try to parse a relocation. Anything else is an error. */
4288 ++p;
4289 if (!(entry = find_reloc_table_entry (&p)))
4290 {
4291 set_syntax_error (_("unknown relocation modifier"));
4292 return false;
4293 }
4294
4295 if (entry->adrp_type == 0)
4296 {
4297 set_syntax_error
4298 (_("this relocation modifier is not allowed on this instruction"));
4299 return false;
4300 }
4301
4302 inst.reloc.type = entry->adrp_type;
4303 }
4304 else
4305 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4306
4307 inst.reloc.pc_rel = 1;
4308 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4309 return false;
4310 *str = p;
4311 return true;
4312 }
4313
4314 /* Miscellaneous. */
4315
4316 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4317 of SIZE tokens in which index I gives the token for field value I,
4318 or is null if field value I is invalid. If the symbolic operand
4319 can also be given as a 0-based integer, REG_TYPE says which register
4320 names should be treated as registers rather than as symbolic immediates
4321 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4322
4323 Return true on success, moving *STR past the operand and storing the
4324 field value in *VAL. */
4325
4326 static int
4327 parse_enum_string (char **str, int64_t *val, const char *const *array,
4328 size_t size, aarch64_reg_type reg_type)
4329 {
4330 expressionS exp;
4331 char *p, *q;
4332 size_t i;
4333
4334 /* Match C-like tokens. */
4335 p = q = *str;
4336 while (ISALNUM (*q))
4337 q++;
4338
4339 for (i = 0; i < size; ++i)
4340 if (array[i]
4341 && strncasecmp (array[i], p, q - p) == 0
4342 && array[i][q - p] == 0)
4343 {
4344 *val = i;
4345 *str = q;
4346 return true;
4347 }
4348
4349 if (reg_type == REG_TYPE_MAX)
4350 return false;
4351
4352 if (!parse_immediate_expression (&p, &exp, reg_type))
4353 return false;
4354
4355 if (exp.X_op == O_constant
4356 && (uint64_t) exp.X_add_number < size)
4357 {
4358 *val = exp.X_add_number;
4359 *str = p;
4360 return true;
4361 }
4362
4363 /* Use the default error for this operand. */
4364 return false;
4365 }
4366
4367 /* Parse an option for a preload instruction. Returns the encoding for the
4368 option, or PARSE_FAIL. */
4369
4370 static int
4371 parse_pldop (char **str)
4372 {
4373 char *p, *q;
4374 const struct aarch64_name_value_pair *o;
4375
4376 p = q = *str;
4377 while (ISALNUM (*q))
4378 q++;
4379
4380 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4381 if (!o)
4382 return PARSE_FAIL;
4383
4384 *str = q;
4385 return o->value;
4386 }
4387
4388 /* Parse an option for a barrier instruction. Returns the encoding for the
4389 option, or PARSE_FAIL. */
4390
4391 static int
4392 parse_barrier (char **str)
4393 {
4394 char *p, *q;
4395 const struct aarch64_name_value_pair *o;
4396
4397 p = q = *str;
4398 while (ISALPHA (*q))
4399 q++;
4400
4401 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4402 if (!o)
4403 return PARSE_FAIL;
4404
4405 *str = q;
4406 return o->value;
4407 }
4408
4409 /* Parse an option for barrier, bti and guarded control stack data
4410 synchronization instructions. Return true on matching the target
4411 options else return false. */
4412
4413 static bool
4414 parse_hint_opt (const char *name, char **str,
4415 const struct aarch64_name_value_pair ** hint_opt)
4416 {
4417 char *p, *q;
4418 const struct aarch64_name_value_pair *o;
4419
4420 p = q = *str;
4421 while (ISALPHA (*q))
4422 q++;
4423
4424 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4425 if (!o)
4426 return false;
4427
4428 if ((strcmp ("gcsb", name) == 0 && o->value != HINT_OPD_DSYNC)
4429 || ((strcmp ("psb", name) == 0 || strcmp ("tsb", name) == 0)
4430 && o->value != HINT_OPD_CSYNC)
4431 || ((strcmp ("bti", name) == 0)
4432 && (o->value != HINT_OPD_C && o->value != HINT_OPD_J
4433 && o->value != HINT_OPD_JC)))
4434 return false;
4435
4436 *str = q;
4437 *hint_opt = o;
4438 return true;
4439 }
4440
4441 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4442 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4443 on failure. Format:
4444
4445 REG_TYPE.QUALIFIER
4446
4447 Side effect: Update STR with current parse position of success.
4448
4449 FLAGS is as for parse_typed_reg. */
4450
4451 static const reg_entry *
4452 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4453 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4454 {
4455 struct vector_type_el vectype;
4456 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4457 PTR_FULL_REG | flags);
4458 if (!reg)
4459 return NULL;
4460
4461 if (vectype.type == NT_invtype)
4462 *qualifier = AARCH64_OPND_QLF_NIL;
4463 else
4464 {
4465 *qualifier = vectype_to_qualifier (&vectype);
4466 if (*qualifier == AARCH64_OPND_QLF_NIL)
4467 return NULL;
4468 }
4469
4470 return reg;
4471 }
4472
4473 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4474
4475 #<imm>
4476 <imm>
4477
4478 Function return TRUE if immediate was found, or FALSE.
4479 */
4480 static bool
4481 parse_sme_immediate (char **str, int64_t *imm)
4482 {
4483 int64_t val;
4484 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4485 return false;
4486
4487 *imm = val;
4488 return true;
4489 }
4490
4491 /* Parse index with selection register and immediate offset:
4492
4493 [<Wv>, <imm>]
4494 [<Wv>, #<imm>]
4495
4496 Return true on success, populating OPND with the parsed index. */
4497
4498 static bool
4499 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4500 {
4501 const reg_entry *reg;
4502
4503 if (!skip_past_char (str, '['))
4504 {
4505 set_syntax_error (_("expected '['"));
4506 return false;
4507 }
4508
4509 /* The selection register, encoded in the 2-bit Rv field. */
4510 reg = parse_reg (str);
4511 if (reg == NULL || reg->type != REG_TYPE_R_32)
4512 {
4513 set_syntax_error (_("expected a 32-bit selection register"));
4514 return false;
4515 }
4516 opnd->index.regno = reg->number;
4517
4518 if (!skip_past_char (str, ','))
4519 {
4520 set_syntax_error (_("missing immediate offset"));
4521 return false;
4522 }
4523
4524 if (!parse_sme_immediate (str, &opnd->index.imm))
4525 {
4526 set_syntax_error (_("expected a constant immediate offset"));
4527 return false;
4528 }
4529
4530 if (skip_past_char (str, ':'))
4531 {
4532 int64_t end;
4533 if (!parse_sme_immediate (str, &end))
4534 {
4535 set_syntax_error (_("expected a constant immediate offset"));
4536 return false;
4537 }
4538 if (end < opnd->index.imm)
4539 {
4540 set_syntax_error (_("the last offset is less than the"
4541 " first offset"));
4542 return false;
4543 }
4544 if (end == opnd->index.imm)
4545 {
4546 set_syntax_error (_("the last offset is equal to the"
4547 " first offset"));
4548 return false;
4549 }
4550 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4551 }
4552
4553 opnd->group_size = 0;
4554 if (skip_past_char (str, ','))
4555 {
4556 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4557 {
4558 *str += 4;
4559 opnd->group_size = 2;
4560 }
4561 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4562 {
4563 *str += 4;
4564 opnd->group_size = 4;
4565 }
4566 else
4567 {
4568 set_syntax_error (_("invalid vector group size"));
4569 return false;
4570 }
4571 }
4572
4573 if (!skip_past_char (str, ']'))
4574 {
4575 set_syntax_error (_("expected ']'"));
4576 return false;
4577 }
4578
4579 return true;
4580 }
4581
4582 /* Parse a register of type REG_TYPE that might have an element type
4583 qualifier and that is indexed by two values: a 32-bit register,
4584 followed by an immediate. The ranges of the register and the
4585 immediate vary by opcode and are checked in libopcodes.
4586
4587 Return true on success, populating OPND with information about
4588 the operand and setting QUALIFIER to the register qualifier.
4589
4590 Field format examples:
4591
4592 <Pm>.<T>[<Wv>< #<imm>]
4593 ZA[<Wv>, #<imm>]
4594 <ZAn><HV>.<T>[<Wv>, #<imm>]
4595
4596 FLAGS is as for parse_typed_reg. */
4597
4598 static bool
4599 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4600 struct aarch64_indexed_za *opnd,
4601 aarch64_opnd_qualifier_t *qualifier,
4602 unsigned int flags)
4603 {
4604 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4605 if (!reg)
4606 return false;
4607
4608 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4609 opnd->regno = reg->number;
4610
4611 return parse_sme_za_index (str, opnd);
4612 }
4613
4614 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4615 operand. */
4616
4617 static bool
4618 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4619 struct aarch64_indexed_za *opnd,
4620 aarch64_opnd_qualifier_t *qualifier)
4621 {
4622 if (!skip_past_char (str, '{'))
4623 {
4624 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4625 return false;
4626 }
4627
4628 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4629 PTR_IN_REGLIST))
4630 return false;
4631
4632 if (!skip_past_char (str, '}'))
4633 {
4634 set_syntax_error (_("expected '}'"));
4635 return false;
4636 }
4637
4638 return true;
4639 }
4640
4641 /* Parse list of up to eight 64-bit element tile names separated by commas in
4642 SME's ZERO instruction:
4643
4644 ZERO { <mask> }
4645
4646 Function returns <mask>:
4647
4648 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4649 */
4650 static int
4651 parse_sme_zero_mask(char **str)
4652 {
4653 char *q;
4654 int mask;
4655 aarch64_opnd_qualifier_t qualifier;
4656 unsigned int ptr_flags = PTR_IN_REGLIST;
4657
4658 mask = 0x00;
4659 q = *str;
4660 do
4661 {
4662 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4663 &qualifier, ptr_flags);
4664 if (!reg)
4665 return PARSE_FAIL;
4666
4667 if (reg->type == REG_TYPE_ZA)
4668 {
4669 if (qualifier != AARCH64_OPND_QLF_NIL)
4670 {
4671 set_syntax_error ("ZA should not have a size suffix");
4672 return PARSE_FAIL;
4673 }
4674 /* { ZA } is assembled as all-ones immediate. */
4675 mask = 0xff;
4676 }
4677 else
4678 {
4679 int regno = reg->number;
4680 if (qualifier == AARCH64_OPND_QLF_S_B)
4681 {
4682 /* { ZA0.B } is assembled as all-ones immediate. */
4683 mask = 0xff;
4684 }
4685 else if (qualifier == AARCH64_OPND_QLF_S_H)
4686 mask |= 0x55 << regno;
4687 else if (qualifier == AARCH64_OPND_QLF_S_S)
4688 mask |= 0x11 << regno;
4689 else if (qualifier == AARCH64_OPND_QLF_S_D)
4690 mask |= 0x01 << regno;
4691 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4692 {
4693 set_syntax_error (_("ZA tile masks do not operate at .Q"
4694 " granularity"));
4695 return PARSE_FAIL;
4696 }
4697 else if (qualifier == AARCH64_OPND_QLF_NIL)
4698 {
4699 set_syntax_error (_("missing ZA tile size"));
4700 return PARSE_FAIL;
4701 }
4702 else
4703 {
4704 set_syntax_error (_("invalid ZA tile"));
4705 return PARSE_FAIL;
4706 }
4707 }
4708 ptr_flags |= PTR_GOOD_MATCH;
4709 }
4710 while (skip_past_char (&q, ','));
4711
4712 *str = q;
4713 return mask;
4714 }
4715
4716 /* Wraps in curly braces <mask> operand ZERO instruction:
4717
4718 ZERO { <mask> }
4719
4720 Function returns value of <mask> bit-field.
4721 */
4722 static int
4723 parse_sme_list_of_64bit_tiles (char **str)
4724 {
4725 int regno;
4726
4727 if (!skip_past_char (str, '{'))
4728 {
4729 set_syntax_error (_("expected '{'"));
4730 return PARSE_FAIL;
4731 }
4732
4733 /* Empty <mask> list is an all-zeros immediate. */
4734 if (!skip_past_char (str, '}'))
4735 {
4736 regno = parse_sme_zero_mask (str);
4737 if (regno == PARSE_FAIL)
4738 return PARSE_FAIL;
4739
4740 if (!skip_past_char (str, '}'))
4741 {
4742 set_syntax_error (_("expected '}'"));
4743 return PARSE_FAIL;
4744 }
4745 }
4746 else
4747 regno = 0x00;
4748
4749 return regno;
4750 }
4751
4752 /* Parse streaming mode operand for SMSTART and SMSTOP.
4753
4754 {SM | ZA}
4755
4756 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4757 */
4758 static int
4759 parse_sme_sm_za (char **str)
4760 {
4761 char *p, *q;
4762
4763 p = q = *str;
4764 while (ISALPHA (*q))
4765 q++;
4766
4767 if ((q - p != 2)
4768 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4769 {
4770 set_syntax_error (_("expected SM or ZA operand"));
4771 return PARSE_FAIL;
4772 }
4773
4774 *str = q;
4775 return TOLOWER (p[0]);
4776 }
4777
4778 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4779 Returns the encoding for the option, or PARSE_FAIL.
4780
4781 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4782 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4783
4784 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4785 field, otherwise as a system register.
4786 */
4787
4788 static int
4789 parse_sys_reg (char **str, htab_t sys_regs,
4790 int imple_defined_p, int pstatefield_p,
4791 uint32_t* flags, bool sysreg128_p)
4792 {
4793 char *p, *q;
4794 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4795 const aarch64_sys_reg *o;
4796 int value;
4797
4798 p = buf;
4799 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4800 if (p < buf + (sizeof (buf) - 1))
4801 *p++ = TOLOWER (*q);
4802 *p = '\0';
4803
4804 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4805 valid system register. This is enforced by construction of the hash
4806 table. */
4807 if (p - buf != q - *str)
4808 return PARSE_FAIL;
4809
4810 o = str_hash_find (sys_regs, buf);
4811 if (!o)
4812 {
4813 if (!imple_defined_p)
4814 return PARSE_FAIL;
4815 else
4816 {
4817 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4818 unsigned int op0, op1, cn, cm, op2;
4819
4820 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4821 != 5)
4822 return PARSE_FAIL;
4823 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4824 return PARSE_FAIL;
4825 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4826 if (flags)
4827 *flags = 0;
4828 }
4829 }
4830 else
4831 {
4832 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4833 as_bad (_("selected processor does not support PSTATE field "
4834 "name '%s'"), buf);
4835 if (!pstatefield_p
4836 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4837 o->value, o->flags,
4838 &o->features))
4839 as_bad (_("selected processor does not support system register "
4840 "name '%s'"), buf);
4841 if (sysreg128_p && !aarch64_sys_reg_128bit_p (o->flags))
4842 as_bad (_("128-bit-wide accsess not allowed on selected system"
4843 " register '%s'"), buf);
4844 if (aarch64_sys_reg_deprecated_p (o->flags))
4845 as_warn (_("system register name '%s' is deprecated and may be "
4846 "removed in a future release"), buf);
4847 value = o->value;
4848 if (flags)
4849 *flags = o->flags;
4850 }
4851
4852 *str = q;
4853 return value;
4854 }
4855
4856 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4857 for the option, or NULL. */
4858
4859 static const aarch64_sys_ins_reg *
4860 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4861 {
4862 char *p, *q;
4863 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4864 const aarch64_sys_ins_reg *o;
4865
4866 p = buf;
4867 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4868 if (p < buf + (sizeof (buf) - 1))
4869 *p++ = TOLOWER (*q);
4870 *p = '\0';
4871
4872 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4873 valid system register. This is enforced by construction of the hash
4874 table. */
4875 if (p - buf != q - *str)
4876 return NULL;
4877
4878 o = str_hash_find (sys_ins_regs, buf);
4879 if (!o)
4880 return NULL;
4881
4882 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4883 o->name, o->value, o->flags, 0))
4884 as_bad (_("selected processor does not support system register "
4885 "name '%s'"), buf);
4886 if (aarch64_sys_reg_deprecated_p (o->flags))
4887 as_warn (_("system register name '%s' is deprecated and may be "
4888 "removed in a future release"), buf);
4889
4890 *str = q;
4891 return o;
4892 }
4893 \f
4894 #define po_char_or_fail(chr) do { \
4895 if (! skip_past_char (&str, chr)) \
4896 goto failure; \
4897 } while (0)
4898
4899 #define po_reg_or_fail(regtype) do { \
4900 reg = aarch64_reg_parse (&str, regtype, NULL); \
4901 if (!reg) \
4902 goto failure; \
4903 } while (0)
4904
4905 #define po_int_fp_reg_or_fail(reg_type) do { \
4906 reg = parse_reg (&str); \
4907 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4908 { \
4909 set_expected_reg_error (reg_type, reg, 0); \
4910 goto failure; \
4911 } \
4912 info->reg.regno = reg->number; \
4913 info->qualifier = inherent_reg_qualifier (reg); \
4914 } while (0)
4915
4916 #define po_imm_nc_or_fail() do { \
4917 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4918 goto failure; \
4919 } while (0)
4920
4921 #define po_imm_or_fail(min, max) do { \
4922 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4923 goto failure; \
4924 if (val < min || val > max) \
4925 { \
4926 set_fatal_syntax_error (_("immediate value out of range "\
4927 #min " to "#max)); \
4928 goto failure; \
4929 } \
4930 } while (0)
4931
4932 #define po_enum_or_fail(array) do { \
4933 if (!parse_enum_string (&str, &val, array, \
4934 ARRAY_SIZE (array), imm_reg_type)) \
4935 goto failure; \
4936 } while (0)
4937
4938 #define po_strict_enum_or_fail(array) do { \
4939 if (!parse_enum_string (&str, &val, array, \
4940 ARRAY_SIZE (array), REG_TYPE_MAX)) \
4941 goto failure; \
4942 } while (0)
4943
4944 #define po_misc_or_fail(expr) do { \
4945 if (!expr) \
4946 goto failure; \
4947 } while (0)
4948 \f
4949 /* A primitive log calculator. */
4950
4951 static inline unsigned int
4952 get_log2 (unsigned int n)
4953 {
4954 unsigned int count = 0;
4955 while (n > 1)
4956 {
4957 n >>= 1;
4958 count += 1;
4959 }
4960 return count;
4961 }
4962
4963 /* encode the 12-bit imm field of Add/sub immediate */
4964 static inline uint32_t
4965 encode_addsub_imm (uint32_t imm)
4966 {
4967 return imm << 10;
4968 }
4969
4970 /* encode the shift amount field of Add/sub immediate */
4971 static inline uint32_t
4972 encode_addsub_imm_shift_amount (uint32_t cnt)
4973 {
4974 return cnt << 22;
4975 }
4976
4977
4978 /* encode the imm field of Adr instruction */
4979 static inline uint32_t
4980 encode_adr_imm (uint32_t imm)
4981 {
4982 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4983 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4984 }
4985
4986 /* encode the immediate field of Move wide immediate */
4987 static inline uint32_t
4988 encode_movw_imm (uint32_t imm)
4989 {
4990 return imm << 5;
4991 }
4992
4993 /* encode the 26-bit offset of unconditional branch */
4994 static inline uint32_t
4995 encode_branch_ofs_26 (uint32_t ofs)
4996 {
4997 return ofs & ((1 << 26) - 1);
4998 }
4999
5000 /* encode the 19-bit offset of conditional branch and compare & branch */
5001 static inline uint32_t
5002 encode_cond_branch_ofs_19 (uint32_t ofs)
5003 {
5004 return (ofs & ((1 << 19) - 1)) << 5;
5005 }
5006
5007 /* encode the 19-bit offset of ld literal */
5008 static inline uint32_t
5009 encode_ld_lit_ofs_19 (uint32_t ofs)
5010 {
5011 return (ofs & ((1 << 19) - 1)) << 5;
5012 }
5013
5014 /* Encode the 14-bit offset of test & branch. */
5015 static inline uint32_t
5016 encode_tst_branch_ofs_14 (uint32_t ofs)
5017 {
5018 return (ofs & ((1 << 14) - 1)) << 5;
5019 }
5020
5021 /* Encode the 16-bit imm field of svc/hvc/smc. */
5022 static inline uint32_t
5023 encode_svc_imm (uint32_t imm)
5024 {
5025 return imm << 5;
5026 }
5027
5028 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5029 static inline uint32_t
5030 reencode_addsub_switch_add_sub (uint32_t opcode)
5031 {
5032 return opcode ^ (1 << 30);
5033 }
5034
5035 static inline uint32_t
5036 reencode_movzn_to_movz (uint32_t opcode)
5037 {
5038 return opcode | (1 << 30);
5039 }
5040
5041 static inline uint32_t
5042 reencode_movzn_to_movn (uint32_t opcode)
5043 {
5044 return opcode & ~(1 << 30);
5045 }
5046
5047 /* Overall per-instruction processing. */
5048
5049 /* We need to be able to fix up arbitrary expressions in some statements.
5050 This is so that we can handle symbols that are an arbitrary distance from
5051 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5052 which returns part of an address in a form which will be valid for
5053 a data instruction. We do this by pushing the expression into a symbol
5054 in the expr_section, and creating a fix for that. */
5055
5056 static fixS *
5057 fix_new_aarch64 (fragS * frag,
5058 int where,
5059 short int size,
5060 expressionS * exp,
5061 int pc_rel,
5062 int reloc)
5063 {
5064 fixS *new_fix;
5065
5066 switch (exp->X_op)
5067 {
5068 case O_constant:
5069 case O_symbol:
5070 case O_add:
5071 case O_subtract:
5072 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5073 break;
5074
5075 default:
5076 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5077 pc_rel, reloc);
5078 break;
5079 }
5080 return new_fix;
5081 }
5082 \f
5083 /* Diagnostics on operands errors. */
5084
5085 /* By default, output verbose error message.
5086 Disable the verbose error message by -mno-verbose-error. */
5087 static int verbose_error_p = 1;
5088
5089 #ifdef DEBUG_AARCH64
5090 /* N.B. this is only for the purpose of debugging. */
5091 const char* operand_mismatch_kind_names[] =
5092 {
5093 "AARCH64_OPDE_NIL",
5094 "AARCH64_OPDE_RECOVERABLE",
5095 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5096 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5097 "AARCH64_OPDE_SYNTAX_ERROR",
5098 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5099 "AARCH64_OPDE_INVALID_VARIANT",
5100 "AARCH64_OPDE_INVALID_VG_SIZE",
5101 "AARCH64_OPDE_REG_LIST_LENGTH",
5102 "AARCH64_OPDE_REG_LIST_STRIDE",
5103 "AARCH64_OPDE_UNTIED_IMMS",
5104 "AARCH64_OPDE_UNTIED_OPERAND",
5105 "AARCH64_OPDE_OUT_OF_RANGE",
5106 "AARCH64_OPDE_UNALIGNED",
5107 "AARCH64_OPDE_OTHER_ERROR",
5108 "AARCH64_OPDE_INVALID_REGNO",
5109 };
5110 #endif /* DEBUG_AARCH64 */
5111
5112 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5113
5114 When multiple errors of different kinds are found in the same assembly
5115 line, only the error of the highest severity will be picked up for
5116 issuing the diagnostics. */
5117
5118 static inline bool
5119 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5120 enum aarch64_operand_error_kind rhs)
5121 {
5122 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5123 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5124 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5125 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5126 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5127 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5128 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5129 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5130 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5131 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5132 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5133 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5134 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5135 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5136 return lhs > rhs;
5137 }
5138
5139 /* Helper routine to get the mnemonic name from the assembly instruction
5140 line; should only be called for the diagnosis purpose, as there is
5141 string copy operation involved, which may affect the runtime
5142 performance if used in elsewhere. */
5143
5144 static const char*
5145 get_mnemonic_name (const char *str)
5146 {
5147 static char mnemonic[32];
5148 char *ptr;
5149
5150 /* Get the first 15 bytes and assume that the full name is included. */
5151 strncpy (mnemonic, str, 31);
5152 mnemonic[31] = '\0';
5153
5154 /* Scan up to the end of the mnemonic, which must end in white space,
5155 '.', or end of string. */
5156 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5157 ;
5158
5159 *ptr = '\0';
5160
5161 /* Append '...' to the truncated long name. */
5162 if (ptr - mnemonic == 31)
5163 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5164
5165 return mnemonic;
5166 }
5167
5168 static void
5169 reset_aarch64_instruction (aarch64_instruction *instruction)
5170 {
5171 memset (instruction, '\0', sizeof (aarch64_instruction));
5172 instruction->reloc.type = BFD_RELOC_UNUSED;
5173 }
5174
5175 /* Data structures storing one user error in the assembly code related to
5176 operands. */
5177
5178 struct operand_error_record
5179 {
5180 const aarch64_opcode *opcode;
5181 aarch64_operand_error detail;
5182 struct operand_error_record *next;
5183 };
5184
5185 typedef struct operand_error_record operand_error_record;
5186
5187 struct operand_errors
5188 {
5189 operand_error_record *head;
5190 operand_error_record *tail;
5191 };
5192
5193 typedef struct operand_errors operand_errors;
5194
5195 /* Top-level data structure reporting user errors for the current line of
5196 the assembly code.
5197 The way md_assemble works is that all opcodes sharing the same mnemonic
5198 name are iterated to find a match to the assembly line. In this data
5199 structure, each of the such opcodes will have one operand_error_record
5200 allocated and inserted. In other words, excessive errors related with
5201 a single opcode are disregarded. */
5202 operand_errors operand_error_report;
5203
5204 /* Free record nodes. */
5205 static operand_error_record *free_opnd_error_record_nodes = NULL;
5206
5207 /* Initialize the data structure that stores the operand mismatch
5208 information on assembling one line of the assembly code. */
5209 static void
5210 init_operand_error_report (void)
5211 {
5212 if (operand_error_report.head != NULL)
5213 {
5214 gas_assert (operand_error_report.tail != NULL);
5215 operand_error_report.tail->next = free_opnd_error_record_nodes;
5216 free_opnd_error_record_nodes = operand_error_report.head;
5217 operand_error_report.head = NULL;
5218 operand_error_report.tail = NULL;
5219 return;
5220 }
5221 gas_assert (operand_error_report.tail == NULL);
5222 }
5223
5224 /* Return TRUE if some operand error has been recorded during the
5225 parsing of the current assembly line using the opcode *OPCODE;
5226 otherwise return FALSE. */
5227 static inline bool
5228 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5229 {
5230 operand_error_record *record = operand_error_report.head;
5231 return record && record->opcode == opcode;
5232 }
5233
5234 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5235 OPCODE field is initialized with OPCODE.
5236 N.B. only one record for each opcode, i.e. the maximum of one error is
5237 recorded for each instruction template. */
5238
5239 static void
5240 add_operand_error_record (const operand_error_record* new_record)
5241 {
5242 const aarch64_opcode *opcode = new_record->opcode;
5243 operand_error_record* record = operand_error_report.head;
5244
5245 /* The record may have been created for this opcode. If not, we need
5246 to prepare one. */
5247 if (! opcode_has_operand_error_p (opcode))
5248 {
5249 /* Get one empty record. */
5250 if (free_opnd_error_record_nodes == NULL)
5251 {
5252 record = XNEW (operand_error_record);
5253 }
5254 else
5255 {
5256 record = free_opnd_error_record_nodes;
5257 free_opnd_error_record_nodes = record->next;
5258 }
5259 record->opcode = opcode;
5260 /* Insert at the head. */
5261 record->next = operand_error_report.head;
5262 operand_error_report.head = record;
5263 if (operand_error_report.tail == NULL)
5264 operand_error_report.tail = record;
5265 }
5266 else if (record->detail.kind != AARCH64_OPDE_NIL
5267 && record->detail.index <= new_record->detail.index
5268 && operand_error_higher_severity_p (record->detail.kind,
5269 new_record->detail.kind))
5270 {
5271 /* In the case of multiple errors found on operands related with a
5272 single opcode, only record the error of the leftmost operand and
5273 only if the error is of higher severity. */
5274 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5275 " the existing error %s on operand %d",
5276 operand_mismatch_kind_names[new_record->detail.kind],
5277 new_record->detail.index,
5278 operand_mismatch_kind_names[record->detail.kind],
5279 record->detail.index);
5280 return;
5281 }
5282
5283 record->detail = new_record->detail;
5284 }
5285
5286 static inline void
5287 record_operand_error_info (const aarch64_opcode *opcode,
5288 aarch64_operand_error *error_info)
5289 {
5290 operand_error_record record;
5291 record.opcode = opcode;
5292 record.detail = *error_info;
5293 add_operand_error_record (&record);
5294 }
5295
5296 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5297 error message *ERROR, for operand IDX (count from 0). */
5298
5299 static void
5300 record_operand_error (const aarch64_opcode *opcode, int idx,
5301 enum aarch64_operand_error_kind kind,
5302 const char* error)
5303 {
5304 aarch64_operand_error info;
5305 memset(&info, 0, sizeof (info));
5306 info.index = idx;
5307 info.kind = kind;
5308 info.error = error;
5309 info.non_fatal = false;
5310 record_operand_error_info (opcode, &info);
5311 }
5312
5313 static void
5314 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5315 enum aarch64_operand_error_kind kind,
5316 const char* error, const int *extra_data)
5317 {
5318 aarch64_operand_error info;
5319 info.index = idx;
5320 info.kind = kind;
5321 info.error = error;
5322 info.data[0].i = extra_data[0];
5323 info.data[1].i = extra_data[1];
5324 info.data[2].i = extra_data[2];
5325 info.non_fatal = false;
5326 record_operand_error_info (opcode, &info);
5327 }
5328
5329 static void
5330 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5331 const char* error, int lower_bound,
5332 int upper_bound)
5333 {
5334 int data[3] = {lower_bound, upper_bound, 0};
5335 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5336 error, data);
5337 }
5338
5339 /* Remove the operand error record for *OPCODE. */
5340 static void ATTRIBUTE_UNUSED
5341 remove_operand_error_record (const aarch64_opcode *opcode)
5342 {
5343 if (opcode_has_operand_error_p (opcode))
5344 {
5345 operand_error_record* record = operand_error_report.head;
5346 gas_assert (record != NULL && operand_error_report.tail != NULL);
5347 operand_error_report.head = record->next;
5348 record->next = free_opnd_error_record_nodes;
5349 free_opnd_error_record_nodes = record;
5350 if (operand_error_report.head == NULL)
5351 {
5352 gas_assert (operand_error_report.tail == record);
5353 operand_error_report.tail = NULL;
5354 }
5355 }
5356 }
5357
5358 /* Given the instruction in *INSTR, return the index of the best matched
5359 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5360
5361 Return -1 if there is no qualifier sequence; return the first match
5362 if there is multiple matches found. */
5363
5364 static int
5365 find_best_match (const aarch64_inst *instr,
5366 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5367 {
5368 int i, num_opnds, max_num_matched, idx;
5369
5370 num_opnds = aarch64_num_of_operands (instr->opcode);
5371 if (num_opnds == 0)
5372 {
5373 DEBUG_TRACE ("no operand");
5374 return -1;
5375 }
5376
5377 max_num_matched = 0;
5378 idx = 0;
5379
5380 /* For each pattern. */
5381 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5382 {
5383 int j, num_matched;
5384 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5385
5386 /* Most opcodes has much fewer patterns in the list. */
5387 if (empty_qualifier_sequence_p (qualifiers))
5388 {
5389 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5390 break;
5391 }
5392
5393 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5394 if (*qualifiers == instr->operands[j].qualifier)
5395 ++num_matched;
5396
5397 if (num_matched > max_num_matched)
5398 {
5399 max_num_matched = num_matched;
5400 idx = i;
5401 }
5402 }
5403
5404 DEBUG_TRACE ("return with %d", idx);
5405 return idx;
5406 }
5407
5408 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5409 corresponding operands in *INSTR. */
5410
5411 static inline void
5412 assign_qualifier_sequence (aarch64_inst *instr,
5413 const aarch64_opnd_qualifier_t *qualifiers)
5414 {
5415 int i = 0;
5416 int num_opnds = aarch64_num_of_operands (instr->opcode);
5417 gas_assert (num_opnds);
5418 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5419 instr->operands[i].qualifier = *qualifiers;
5420 }
5421
5422 /* Callback used by aarch64_print_operand to apply STYLE to the
5423 disassembler output created from FMT and ARGS. The STYLER object holds
5424 any required state. Must return a pointer to a string (created from FMT
5425 and ARGS) that will continue to be valid until the complete disassembled
5426 instruction has been printed.
5427
5428 We don't currently add any styling to the output of the disassembler as
5429 used within assembler error messages, and so STYLE is ignored here. A
5430 new string is allocated on the obstack help within STYLER and returned
5431 to the caller. */
5432
5433 static const char *aarch64_apply_style
5434 (struct aarch64_styler *styler,
5435 enum disassembler_style style ATTRIBUTE_UNUSED,
5436 const char *fmt, va_list args)
5437 {
5438 int res;
5439 char *ptr;
5440 struct obstack *stack = (struct obstack *) styler->state;
5441 va_list ap;
5442
5443 /* Calculate the required space. */
5444 va_copy (ap, args);
5445 res = vsnprintf (NULL, 0, fmt, ap);
5446 va_end (ap);
5447 gas_assert (res >= 0);
5448
5449 /* Allocate space on the obstack and format the result. */
5450 ptr = (char *) obstack_alloc (stack, res + 1);
5451 res = vsnprintf (ptr, (res + 1), fmt, args);
5452 gas_assert (res >= 0);
5453
5454 return ptr;
5455 }
5456
5457 /* Print operands for the diagnosis purpose. */
5458
5459 static void
5460 print_operands (char *buf, const aarch64_opcode *opcode,
5461 const aarch64_opnd_info *opnds)
5462 {
5463 int i;
5464 struct aarch64_styler styler;
5465 struct obstack content;
5466 obstack_init (&content);
5467
5468 styler.apply_style = aarch64_apply_style;
5469 styler.state = (void *) &content;
5470
5471 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5472 {
5473 char str[128];
5474 char cmt[128];
5475
5476 /* We regard the opcode operand info more, however we also look into
5477 the inst->operands to support the disassembling of the optional
5478 operand.
5479 The two operand code should be the same in all cases, apart from
5480 when the operand can be optional. */
5481 if (opcode->operands[i] == AARCH64_OPND_NIL
5482 || opnds[i].type == AARCH64_OPND_NIL)
5483 break;
5484
5485 /* Generate the operand string in STR. */
5486 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5487 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5488
5489 /* Delimiter. */
5490 if (str[0] != '\0')
5491 strcat (buf, i == 0 ? " " : ", ");
5492
5493 /* Append the operand string. */
5494 strcat (buf, str);
5495
5496 /* Append a comment. This works because only the last operand ever
5497 adds a comment. If that ever changes then we'll need to be
5498 smarter here. */
5499 if (cmt[0] != '\0')
5500 {
5501 strcat (buf, "\t// ");
5502 strcat (buf, cmt);
5503 }
5504 }
5505
5506 obstack_free (&content, NULL);
5507 }
5508
5509 /* Send to stderr a string as information. */
5510
5511 static void
5512 output_info (const char *format, ...)
5513 {
5514 const char *file;
5515 unsigned int line;
5516 va_list args;
5517
5518 file = as_where (&line);
5519 if (file)
5520 {
5521 if (line != 0)
5522 fprintf (stderr, "%s:%u: ", file, line);
5523 else
5524 fprintf (stderr, "%s: ", file);
5525 }
5526 fprintf (stderr, _("Info: "));
5527 va_start (args, format);
5528 vfprintf (stderr, format, args);
5529 va_end (args);
5530 (void) putc ('\n', stderr);
5531 }
5532
5533 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5534 relates to registers or register lists. If so, return a string that
5535 reports the error against "operand %d", otherwise return null. */
5536
5537 static const char *
5538 get_reg_error_message (const aarch64_operand_error *detail)
5539 {
5540 /* Handle the case where we found a register that was expected
5541 to be in a register list outside of a register list. */
5542 if ((detail->data[1].i & detail->data[2].i) != 0
5543 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5544 return _("missing braces at operand %d");
5545
5546 /* If some opcodes expected a register, and we found a register,
5547 complain about the difference. */
5548 if (detail->data[2].i)
5549 {
5550 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5551 ? detail->data[1].i & ~SEF_IN_REGLIST
5552 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5553 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5554 if (!msg)
5555 msg = N_("unexpected register type at operand %d");
5556 return msg;
5557 }
5558
5559 /* Handle the case where we got to the point of trying to parse a
5560 register within a register list, but didn't find a known register. */
5561 if (detail->data[1].i & SEF_IN_REGLIST)
5562 {
5563 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5564 const char *msg = get_reg_expected_msg (expected, 0);
5565 if (!msg)
5566 msg = _("invalid register list at operand %d");
5567 return msg;
5568 }
5569
5570 /* Punt if register-related problems weren't the only errors. */
5571 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5572 return NULL;
5573
5574 /* Handle the case where the only acceptable things are registers. */
5575 if (detail->data[1].i == 0)
5576 {
5577 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5578 if (!msg)
5579 msg = _("expected a register at operand %d");
5580 return msg;
5581 }
5582
5583 /* Handle the case where the only acceptable things are register lists,
5584 and there was no opening '{'. */
5585 if (detail->data[0].i == 0)
5586 return _("expected '{' at operand %d");
5587
5588 return _("expected a register or register list at operand %d");
5589 }
5590
5591 /* Output one operand error record. */
5592
5593 static void
5594 output_operand_error_record (const operand_error_record *record, char *str)
5595 {
5596 const aarch64_operand_error *detail = &record->detail;
5597 int idx = detail->index;
5598 const aarch64_opcode *opcode = record->opcode;
5599 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5600 : AARCH64_OPND_NIL);
5601
5602 typedef void (*handler_t)(const char *format, ...);
5603 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5604 const char *msg = detail->error;
5605
5606 switch (detail->kind)
5607 {
5608 case AARCH64_OPDE_NIL:
5609 gas_assert (0);
5610 break;
5611
5612 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5613 handler (_("this `%s' should have an immediately preceding `%s'"
5614 " -- `%s'"),
5615 detail->data[0].s, detail->data[1].s, str);
5616 break;
5617
5618 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5619 handler (_("the preceding `%s' should be followed by `%s` rather"
5620 " than `%s` -- `%s'"),
5621 detail->data[1].s, detail->data[0].s, opcode->name, str);
5622 break;
5623
5624 case AARCH64_OPDE_SYNTAX_ERROR:
5625 if (!msg && idx >= 0)
5626 {
5627 msg = get_reg_error_message (detail);
5628 if (msg)
5629 {
5630 char *full_msg = xasprintf (msg, idx + 1);
5631 handler (_("%s -- `%s'"), full_msg, str);
5632 free (full_msg);
5633 break;
5634 }
5635 }
5636 /* Fall through. */
5637
5638 case AARCH64_OPDE_RECOVERABLE:
5639 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5640 case AARCH64_OPDE_OTHER_ERROR:
5641 /* Use the prepared error message if there is, otherwise use the
5642 operand description string to describe the error. */
5643 if (msg != NULL)
5644 {
5645 if (idx < 0)
5646 handler (_("%s -- `%s'"), msg, str);
5647 else
5648 handler (_("%s at operand %d -- `%s'"),
5649 msg, idx + 1, str);
5650 }
5651 else
5652 {
5653 gas_assert (idx >= 0);
5654 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5655 aarch64_get_operand_desc (opd_code), str);
5656 }
5657 break;
5658
5659 case AARCH64_OPDE_INVALID_VARIANT:
5660 handler (_("operand mismatch -- `%s'"), str);
5661 if (verbose_error_p)
5662 {
5663 /* We will try to correct the erroneous instruction and also provide
5664 more information e.g. all other valid variants.
5665
5666 The string representation of the corrected instruction and other
5667 valid variants are generated by
5668
5669 1) obtaining the intermediate representation of the erroneous
5670 instruction;
5671 2) manipulating the IR, e.g. replacing the operand qualifier;
5672 3) printing out the instruction by calling the printer functions
5673 shared with the disassembler.
5674
5675 The limitation of this method is that the exact input assembly
5676 line cannot be accurately reproduced in some cases, for example an
5677 optional operand present in the actual assembly line will be
5678 omitted in the output; likewise for the optional syntax rules,
5679 e.g. the # before the immediate. Another limitation is that the
5680 assembly symbols and relocation operations in the assembly line
5681 currently cannot be printed out in the error report. Last but not
5682 least, when there is other error(s) co-exist with this error, the
5683 'corrected' instruction may be still incorrect, e.g. given
5684 'ldnp h0,h1,[x0,#6]!'
5685 this diagnosis will provide the version:
5686 'ldnp s0,s1,[x0,#6]!'
5687 which is still not right. */
5688 size_t len = strlen (get_mnemonic_name (str));
5689 int i, qlf_idx;
5690 bool result;
5691 char buf[2048];
5692 aarch64_inst *inst_base = &inst.base;
5693 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5694
5695 /* Init inst. */
5696 reset_aarch64_instruction (&inst);
5697 inst_base->opcode = opcode;
5698
5699 /* Reset the error report so that there is no side effect on the
5700 following operand parsing. */
5701 init_operand_error_report ();
5702
5703 /* Fill inst. */
5704 result = parse_operands (str + len, opcode)
5705 && programmer_friendly_fixup (&inst);
5706 gas_assert (result);
5707 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5708 NULL, NULL, insn_sequence);
5709 gas_assert (!result);
5710
5711 /* Find the most matched qualifier sequence. */
5712 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5713 gas_assert (qlf_idx > -1);
5714
5715 /* Assign the qualifiers. */
5716 assign_qualifier_sequence (inst_base,
5717 opcode->qualifiers_list[qlf_idx]);
5718
5719 /* Print the hint. */
5720 output_info (_(" did you mean this?"));
5721 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5722 print_operands (buf, opcode, inst_base->operands);
5723 output_info (_(" %s"), buf);
5724
5725 /* Print out other variant(s) if there is any. */
5726 if (qlf_idx != 0 ||
5727 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5728 output_info (_(" other valid variant(s):"));
5729
5730 /* For each pattern. */
5731 qualifiers_list = opcode->qualifiers_list;
5732 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5733 {
5734 /* Most opcodes has much fewer patterns in the list.
5735 First NIL qualifier indicates the end in the list. */
5736 if (empty_qualifier_sequence_p (*qualifiers_list))
5737 break;
5738
5739 if (i != qlf_idx)
5740 {
5741 /* Mnemonics name. */
5742 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5743
5744 /* Assign the qualifiers. */
5745 assign_qualifier_sequence (inst_base, *qualifiers_list);
5746
5747 /* Print instruction. */
5748 print_operands (buf, opcode, inst_base->operands);
5749
5750 output_info (_(" %s"), buf);
5751 }
5752 }
5753 }
5754 break;
5755
5756 case AARCH64_OPDE_UNTIED_IMMS:
5757 handler (_("operand %d must have the same immediate value "
5758 "as operand 1 -- `%s'"),
5759 detail->index + 1, str);
5760 break;
5761
5762 case AARCH64_OPDE_UNTIED_OPERAND:
5763 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5764 detail->index + 1, str);
5765 break;
5766
5767 case AARCH64_OPDE_INVALID_REGNO:
5768 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5769 detail->data[0].s, detail->data[1].i,
5770 detail->data[0].s, detail->data[2].i, idx + 1, str);
5771 break;
5772
5773 case AARCH64_OPDE_OUT_OF_RANGE:
5774 if (detail->data[0].i != detail->data[1].i)
5775 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5776 msg ? msg : _("immediate value"),
5777 detail->data[0].i, detail->data[1].i, idx + 1, str);
5778 else
5779 handler (_("%s must be %d at operand %d -- `%s'"),
5780 msg ? msg : _("immediate value"),
5781 detail->data[0].i, idx + 1, str);
5782 break;
5783
5784 case AARCH64_OPDE_INVALID_VG_SIZE:
5785 if (detail->data[0].i == 0)
5786 handler (_("unexpected vector group size at operand %d -- `%s'"),
5787 idx + 1, str);
5788 else
5789 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5790 idx + 1, detail->data[0].i, str);
5791 break;
5792
5793 case AARCH64_OPDE_REG_LIST_LENGTH:
5794 if (detail->data[0].i == (1 << 1))
5795 handler (_("expected a single-register list at operand %d -- `%s'"),
5796 idx + 1, str);
5797 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5798 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5799 get_log2 (detail->data[0].i), idx + 1, str);
5800 else if (detail->data[0].i == 0x14)
5801 handler (_("expected a list of %d or %d registers at"
5802 " operand %d -- `%s'"),
5803 2, 4, idx + 1, str);
5804 else
5805 handler (_("invalid number of registers in the list"
5806 " at operand %d -- `%s'"), idx + 1, str);
5807 break;
5808
5809 case AARCH64_OPDE_REG_LIST_STRIDE:
5810 if (detail->data[0].i == (1 << 1))
5811 handler (_("the register list must have a stride of %d"
5812 " at operand %d -- `%s'"), 1, idx + 1, str);
5813 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5814 handler (_("the register list must have a stride of %d or %d"
5815 " at operand %d -- `%s`"), 1,
5816 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5817 else
5818 handler (_("invalid register stride at operand %d -- `%s'"),
5819 idx + 1, str);
5820 break;
5821
5822 case AARCH64_OPDE_UNALIGNED:
5823 handler (_("immediate value must be a multiple of "
5824 "%d at operand %d -- `%s'"),
5825 detail->data[0].i, idx + 1, str);
5826 break;
5827
5828 default:
5829 gas_assert (0);
5830 break;
5831 }
5832 }
5833
5834 /* Return true if the presence of error A against an instruction means
5835 that error B should not be reported. This is only used as a first pass,
5836 to pick the kind of error that we should report. */
5837
5838 static bool
5839 better_error_p (operand_error_record *a, operand_error_record *b)
5840 {
5841 /* For errors reported during parsing, prefer errors that relate to
5842 later operands, since that implies that the earlier operands were
5843 syntactically valid.
5844
5845 For example, if we see a register R instead of an immediate in
5846 operand N, we'll report that as a recoverable "immediate operand
5847 required" error. This is because there is often another opcode
5848 entry that accepts a register operand N, and any errors about R
5849 should be reported against the register forms of the instruction.
5850 But if no such register form exists, the recoverable error should
5851 still win over a syntax error against operand N-1.
5852
5853 For these purposes, count an error reported at the end of the
5854 assembly string as equivalent to an error reported against the
5855 final operand. This means that opcode entries that expect more
5856 operands win over "unexpected characters following instruction". */
5857 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5858 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5859 {
5860 int a_index = (a->detail.index < 0
5861 ? aarch64_num_of_operands (a->opcode) - 1
5862 : a->detail.index);
5863 int b_index = (b->detail.index < 0
5864 ? aarch64_num_of_operands (b->opcode) - 1
5865 : b->detail.index);
5866 if (a_index != b_index)
5867 return a_index > b_index;
5868 }
5869 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5870 }
5871
5872 /* Process and output the error message about the operand mismatching.
5873
5874 When this function is called, the operand error information had
5875 been collected for an assembly line and there will be multiple
5876 errors in the case of multiple instruction templates; output the
5877 error message that most closely describes the problem.
5878
5879 The errors to be printed can be filtered on printing all errors
5880 or only non-fatal errors. This distinction has to be made because
5881 the error buffer may already be filled with fatal errors we don't want to
5882 print due to the different instruction templates. */
5883
5884 static void
5885 output_operand_error_report (char *str, bool non_fatal_only)
5886 {
5887 enum aarch64_operand_error_kind kind;
5888 operand_error_record *curr;
5889 operand_error_record *head = operand_error_report.head;
5890 operand_error_record *record;
5891
5892 /* No error to report. */
5893 if (head == NULL)
5894 return;
5895
5896 gas_assert (head != NULL && operand_error_report.tail != NULL);
5897
5898 /* Only one error. */
5899 if (head == operand_error_report.tail)
5900 {
5901 /* If the only error is a non-fatal one and we don't want to print it,
5902 just exit. */
5903 if (!non_fatal_only || head->detail.non_fatal)
5904 {
5905 DEBUG_TRACE ("single opcode entry with error kind: %s",
5906 operand_mismatch_kind_names[head->detail.kind]);
5907 output_operand_error_record (head, str);
5908 }
5909 return;
5910 }
5911
5912 /* Find the error kind of the highest severity. */
5913 DEBUG_TRACE ("multiple opcode entries with error kind");
5914 record = NULL;
5915 for (curr = head; curr != NULL; curr = curr->next)
5916 {
5917 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5918 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5919 {
5920 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5921 operand_mismatch_kind_names[curr->detail.kind],
5922 curr->detail.data[0].i, curr->detail.data[1].i,
5923 curr->detail.data[2].i);
5924 }
5925 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5926 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5927 {
5928 DEBUG_TRACE ("\t%s [%x]",
5929 operand_mismatch_kind_names[curr->detail.kind],
5930 curr->detail.data[0].i);
5931 }
5932 else
5933 {
5934 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5935 }
5936 if ((!non_fatal_only || curr->detail.non_fatal)
5937 && (!record || better_error_p (curr, record)))
5938 record = curr;
5939 }
5940
5941 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5942 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5943
5944 /* Pick up one of errors of KIND to report. */
5945 record = NULL;
5946 for (curr = head; curr != NULL; curr = curr->next)
5947 {
5948 /* If we don't want to print non-fatal errors then don't consider them
5949 at all. */
5950 if (curr->detail.kind != kind
5951 || (non_fatal_only && !curr->detail.non_fatal))
5952 continue;
5953 /* If there are multiple errors, pick up the one with the highest
5954 mismatching operand index. In the case of multiple errors with
5955 the equally highest operand index, pick up the first one or the
5956 first one with non-NULL error message. */
5957 if (!record || curr->detail.index > record->detail.index)
5958 record = curr;
5959 else if (curr->detail.index == record->detail.index
5960 && !record->detail.error)
5961 {
5962 if (curr->detail.error)
5963 record = curr;
5964 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5965 {
5966 record->detail.data[0].i |= curr->detail.data[0].i;
5967 record->detail.data[1].i |= curr->detail.data[1].i;
5968 record->detail.data[2].i |= curr->detail.data[2].i;
5969 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5970 operand_mismatch_kind_names[kind],
5971 curr->detail.data[0].i, curr->detail.data[1].i,
5972 curr->detail.data[2].i);
5973 }
5974 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
5975 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
5976 {
5977 record->detail.data[0].i |= curr->detail.data[0].i;
5978 DEBUG_TRACE ("\t--> %s [%x]",
5979 operand_mismatch_kind_names[kind],
5980 curr->detail.data[0].i);
5981 }
5982 /* Pick the variant with the cloest match. */
5983 else if (kind == AARCH64_OPDE_INVALID_VARIANT
5984 && record->detail.data[0].i > curr->detail.data[0].i)
5985 record = curr;
5986 }
5987 }
5988
5989 /* The way errors are collected in the back-end is a bit non-intuitive. But
5990 essentially, because each operand template is tried recursively you may
5991 always have errors collected from the previous tried OPND. These are
5992 usually skipped if there is one successful match. However now with the
5993 non-fatal errors we have to ignore those previously collected hard errors
5994 when we're only interested in printing the non-fatal ones. This condition
5995 prevents us from printing errors that are not appropriate, since we did
5996 match a condition, but it also has warnings that it wants to print. */
5997 if (non_fatal_only && !record)
5998 return;
5999
6000 gas_assert (record);
6001 DEBUG_TRACE ("Pick up error kind %s to report",
6002 operand_mismatch_kind_names[kind]);
6003
6004 /* Output. */
6005 output_operand_error_record (record, str);
6006 }
6007 \f
6008 /* Write an AARCH64 instruction to buf - always little-endian. */
6009 static void
6010 put_aarch64_insn (char *buf, uint32_t insn)
6011 {
6012 unsigned char *where = (unsigned char *) buf;
6013 where[0] = insn;
6014 where[1] = insn >> 8;
6015 where[2] = insn >> 16;
6016 where[3] = insn >> 24;
6017 }
6018
6019 static uint32_t
6020 get_aarch64_insn (char *buf)
6021 {
6022 unsigned char *where = (unsigned char *) buf;
6023 uint32_t result;
6024 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6025 | ((uint32_t) where[3] << 24)));
6026 return result;
6027 }
6028
6029 static void
6030 output_inst (struct aarch64_inst *new_inst)
6031 {
6032 char *to = NULL;
6033
6034 to = frag_more (INSN_SIZE);
6035
6036 frag_now->tc_frag_data.recorded = 1;
6037
6038 put_aarch64_insn (to, inst.base.value);
6039
6040 if (inst.reloc.type != BFD_RELOC_UNUSED)
6041 {
6042 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6043 INSN_SIZE, &inst.reloc.exp,
6044 inst.reloc.pc_rel,
6045 inst.reloc.type);
6046 DEBUG_TRACE ("Prepared relocation fix up");
6047 /* Don't check the addend value against the instruction size,
6048 that's the job of our code in md_apply_fix(). */
6049 fixp->fx_no_overflow = 1;
6050 if (new_inst != NULL)
6051 fixp->tc_fix_data.inst = new_inst;
6052 if (aarch64_gas_internal_fixup_p ())
6053 {
6054 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6055 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6056 fixp->fx_addnumber = inst.reloc.flags;
6057 }
6058 }
6059
6060 dwarf2_emit_insn (INSN_SIZE);
6061 }
6062
6063 /* Link together opcodes of the same name. */
6064
6065 struct templates
6066 {
6067 const aarch64_opcode *opcode;
6068 struct templates *next;
6069 };
6070
6071 typedef struct templates templates;
6072
6073 static templates *
6074 lookup_mnemonic (const char *start, int len)
6075 {
6076 templates *templ = NULL;
6077
6078 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6079 return templ;
6080 }
6081
6082 /* Subroutine of md_assemble, responsible for looking up the primary
6083 opcode from the mnemonic the user wrote. BASE points to the beginning
6084 of the mnemonic, DOT points to the first '.' within the mnemonic
6085 (if any) and END points to the end of the mnemonic. */
6086
6087 static templates *
6088 opcode_lookup (char *base, char *dot, char *end)
6089 {
6090 const aarch64_cond *cond;
6091 char condname[16];
6092 int len;
6093
6094 if (dot == end)
6095 return 0;
6096
6097 inst.cond = COND_ALWAYS;
6098
6099 /* Handle a possible condition. */
6100 if (dot)
6101 {
6102 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6103 if (!cond)
6104 return 0;
6105 inst.cond = cond->value;
6106 len = dot - base;
6107 }
6108 else
6109 len = end - base;
6110
6111 if (inst.cond == COND_ALWAYS)
6112 {
6113 /* Look for unaffixed mnemonic. */
6114 return lookup_mnemonic (base, len);
6115 }
6116 else if (len <= 13)
6117 {
6118 /* append ".c" to mnemonic if conditional */
6119 memcpy (condname, base, len);
6120 memcpy (condname + len, ".c", 2);
6121 base = condname;
6122 len += 2;
6123 return lookup_mnemonic (base, len);
6124 }
6125
6126 return NULL;
6127 }
6128
6129 /* Process an optional operand that is found omitted from the assembly line.
6130 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6131 instruction's opcode entry while IDX is the index of this omitted operand.
6132 */
6133
6134 static void
6135 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6136 int idx, aarch64_opnd_info *operand)
6137 {
6138 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6139 gas_assert (optional_operand_p (opcode, idx));
6140 gas_assert (!operand->present);
6141
6142 switch (type)
6143 {
6144 case AARCH64_OPND_Rd:
6145 case AARCH64_OPND_Rn:
6146 case AARCH64_OPND_Rm:
6147 case AARCH64_OPND_Rt:
6148 case AARCH64_OPND_Rt2:
6149 case AARCH64_OPND_Rt_LS64:
6150 case AARCH64_OPND_Rt_SP:
6151 case AARCH64_OPND_Rs:
6152 case AARCH64_OPND_Ra:
6153 case AARCH64_OPND_Rt_SYS:
6154 case AARCH64_OPND_Rd_SP:
6155 case AARCH64_OPND_Rn_SP:
6156 case AARCH64_OPND_Rm_SP:
6157 case AARCH64_OPND_Fd:
6158 case AARCH64_OPND_Fn:
6159 case AARCH64_OPND_Fm:
6160 case AARCH64_OPND_Fa:
6161 case AARCH64_OPND_Ft:
6162 case AARCH64_OPND_Ft2:
6163 case AARCH64_OPND_Sd:
6164 case AARCH64_OPND_Sn:
6165 case AARCH64_OPND_Sm:
6166 case AARCH64_OPND_Va:
6167 case AARCH64_OPND_Vd:
6168 case AARCH64_OPND_Vn:
6169 case AARCH64_OPND_Vm:
6170 case AARCH64_OPND_VdD1:
6171 case AARCH64_OPND_VnD1:
6172 operand->reg.regno = default_value;
6173 break;
6174 case AARCH64_OPND_PAIRREG_OR_XZR:
6175 if (inst.base.operands[idx - 1].reg.regno == 0x1f)
6176 {
6177 operand->reg.regno = 0x1f;
6178 break;
6179 }
6180 operand->reg.regno = inst.base.operands[idx - 1].reg.regno + 1;
6181 break;
6182 case AARCH64_OPND_PAIRREG:
6183 operand->reg.regno = inst.base.operands[idx - 1].reg.regno + 1;
6184 break;
6185
6186 case AARCH64_OPND_Ed:
6187 case AARCH64_OPND_En:
6188 case AARCH64_OPND_Em:
6189 case AARCH64_OPND_Em16:
6190 case AARCH64_OPND_SM3_IMM2:
6191 operand->reglane.regno = default_value;
6192 break;
6193
6194 case AARCH64_OPND_IDX:
6195 case AARCH64_OPND_BIT_NUM:
6196 case AARCH64_OPND_IMMR:
6197 case AARCH64_OPND_IMMS:
6198 case AARCH64_OPND_SHLL_IMM:
6199 case AARCH64_OPND_IMM_VLSL:
6200 case AARCH64_OPND_IMM_VLSR:
6201 case AARCH64_OPND_CCMP_IMM:
6202 case AARCH64_OPND_FBITS:
6203 case AARCH64_OPND_UIMM4:
6204 case AARCH64_OPND_UIMM3_OP1:
6205 case AARCH64_OPND_UIMM3_OP2:
6206 case AARCH64_OPND_IMM:
6207 case AARCH64_OPND_IMM_2:
6208 case AARCH64_OPND_WIDTH:
6209 case AARCH64_OPND_UIMM7:
6210 case AARCH64_OPND_NZCV:
6211 case AARCH64_OPND_SVE_PATTERN:
6212 case AARCH64_OPND_SVE_PRFOP:
6213 operand->imm.value = default_value;
6214 break;
6215
6216 case AARCH64_OPND_SVE_PATTERN_SCALED:
6217 operand->imm.value = default_value;
6218 operand->shifter.kind = AARCH64_MOD_MUL;
6219 operand->shifter.amount = 1;
6220 break;
6221
6222 case AARCH64_OPND_EXCEPTION:
6223 inst.reloc.type = BFD_RELOC_UNUSED;
6224 break;
6225
6226 case AARCH64_OPND_BARRIER_ISB:
6227 operand->barrier = aarch64_barrier_options + default_value;
6228 break;
6229
6230 case AARCH64_OPND_BTI_TARGET:
6231 operand->hint_option = aarch64_hint_options + default_value;
6232 break;
6233
6234 default:
6235 break;
6236 }
6237 }
6238
6239 /* Process the relocation type for move wide instructions.
6240 Return TRUE on success; otherwise return FALSE. */
6241
6242 static bool
6243 process_movw_reloc_info (void)
6244 {
6245 int is32;
6246 unsigned shift;
6247
6248 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6249
6250 if (inst.base.opcode->op == OP_MOVK)
6251 switch (inst.reloc.type)
6252 {
6253 case BFD_RELOC_AARCH64_MOVW_G0_S:
6254 case BFD_RELOC_AARCH64_MOVW_G1_S:
6255 case BFD_RELOC_AARCH64_MOVW_G2_S:
6256 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6257 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6258 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6259 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6260 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6261 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6262 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6263 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6264 set_syntax_error
6265 (_("the specified relocation type is not allowed for MOVK"));
6266 return false;
6267 default:
6268 break;
6269 }
6270
6271 switch (inst.reloc.type)
6272 {
6273 case BFD_RELOC_AARCH64_MOVW_G0:
6274 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6275 case BFD_RELOC_AARCH64_MOVW_G0_S:
6276 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6277 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6278 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6279 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6280 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6281 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6282 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6283 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6284 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6285 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6286 shift = 0;
6287 break;
6288 case BFD_RELOC_AARCH64_MOVW_G1:
6289 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6290 case BFD_RELOC_AARCH64_MOVW_G1_S:
6291 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6292 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6293 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6294 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6295 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6296 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6297 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6298 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6301 shift = 16;
6302 break;
6303 case BFD_RELOC_AARCH64_MOVW_G2:
6304 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6305 case BFD_RELOC_AARCH64_MOVW_G2_S:
6306 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6307 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6308 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6309 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6310 if (is32)
6311 {
6312 set_fatal_syntax_error
6313 (_("the specified relocation type is not allowed for 32-bit "
6314 "register"));
6315 return false;
6316 }
6317 shift = 32;
6318 break;
6319 case BFD_RELOC_AARCH64_MOVW_G3:
6320 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6321 if (is32)
6322 {
6323 set_fatal_syntax_error
6324 (_("the specified relocation type is not allowed for 32-bit "
6325 "register"));
6326 return false;
6327 }
6328 shift = 48;
6329 break;
6330 default:
6331 /* More cases should be added when more MOVW-related relocation types
6332 are supported in GAS. */
6333 gas_assert (aarch64_gas_internal_fixup_p ());
6334 /* The shift amount should have already been set by the parser. */
6335 return true;
6336 }
6337 inst.base.operands[1].shifter.amount = shift;
6338 return true;
6339 }
6340
6341 /* Determine and return the real reloc type code for an instruction
6342 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6343
6344 static inline bfd_reloc_code_real_type
6345 ldst_lo12_determine_real_reloc_type (void)
6346 {
6347 unsigned logsz, max_logsz;
6348 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6349 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6350
6351 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6352 {
6353 BFD_RELOC_AARCH64_LDST8_LO12,
6354 BFD_RELOC_AARCH64_LDST16_LO12,
6355 BFD_RELOC_AARCH64_LDST32_LO12,
6356 BFD_RELOC_AARCH64_LDST64_LO12,
6357 BFD_RELOC_AARCH64_LDST128_LO12
6358 },
6359 {
6360 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6361 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6362 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6363 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6364 BFD_RELOC_AARCH64_NONE
6365 },
6366 {
6367 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6368 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6369 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6370 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6371 BFD_RELOC_AARCH64_NONE
6372 },
6373 {
6374 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6375 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6376 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6377 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6378 BFD_RELOC_AARCH64_NONE
6379 },
6380 {
6381 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6382 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6383 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6384 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6385 BFD_RELOC_AARCH64_NONE
6386 }
6387 };
6388
6389 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6390 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6391 || (inst.reloc.type
6392 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6393 || (inst.reloc.type
6394 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6395 || (inst.reloc.type
6396 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6397 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6398
6399 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6400 opd1_qlf =
6401 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6402 1, opd0_qlf, 0);
6403 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6404
6405 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6406
6407 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6408 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6409 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6410 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6411 max_logsz = 3;
6412 else
6413 max_logsz = 4;
6414
6415 if (logsz > max_logsz)
6416 {
6417 /* SEE PR 27904 for an example of this. */
6418 set_fatal_syntax_error
6419 (_("relocation qualifier does not match instruction size"));
6420 return BFD_RELOC_AARCH64_NONE;
6421 }
6422
6423 /* In reloc.c, these pseudo relocation types should be defined in similar
6424 order as above reloc_ldst_lo12 array. Because the array index calculation
6425 below relies on this. */
6426 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6427 }
6428
6429 /* Check whether a register list REGINFO is valid. The registers have type
6430 REG_TYPE and must be numbered in increasing order (modulo the register
6431 bank size). They must have a consistent stride.
6432
6433 Return true if the list is valid, describing it in LIST if so. */
6434
6435 static bool
6436 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6437 aarch64_reg_type reg_type)
6438 {
6439 uint32_t i, nb_regs, prev_regno, incr, mask;
6440 mask = reg_type_mask (reg_type);
6441
6442 nb_regs = 1 + (reginfo & 0x3);
6443 reginfo >>= 2;
6444 prev_regno = reginfo & 0x1f;
6445 incr = 1;
6446
6447 list->first_regno = prev_regno;
6448 list->num_regs = nb_regs;
6449
6450 for (i = 1; i < nb_regs; ++i)
6451 {
6452 uint32_t curr_regno, curr_incr;
6453 reginfo >>= 5;
6454 curr_regno = reginfo & 0x1f;
6455 curr_incr = (curr_regno - prev_regno) & mask;
6456 if (curr_incr == 0)
6457 return false;
6458 else if (i == 1)
6459 incr = curr_incr;
6460 else if (curr_incr != incr)
6461 return false;
6462 prev_regno = curr_regno;
6463 }
6464
6465 list->stride = incr;
6466 return true;
6467 }
6468
6469 /* Generic instruction operand parser. This does no encoding and no
6470 semantic validation; it merely squirrels values away in the inst
6471 structure. Returns TRUE or FALSE depending on whether the
6472 specified grammar matched. */
6473
6474 static bool
6475 parse_operands (char *str, const aarch64_opcode *opcode)
6476 {
6477 int i;
6478 char *backtrack_pos = 0;
6479 const enum aarch64_opnd *operands = opcode->operands;
6480 const uint64_t flags = opcode->flags;
6481 aarch64_reg_type imm_reg_type;
6482
6483 clear_error ();
6484 skip_whitespace (str);
6485
6486 if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SME2))
6487 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
6488 else if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
6489 || AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))
6490 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6491 else
6492 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6493
6494 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6495 {
6496 int64_t val;
6497 const reg_entry *reg;
6498 int comma_skipped_p = 0;
6499 struct vector_type_el vectype;
6500 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6501 aarch64_opnd_info *info = &inst.base.operands[i];
6502 aarch64_reg_type reg_type;
6503
6504 DEBUG_TRACE ("parse operand %d", i);
6505
6506 /* Assign the operand code. */
6507 info->type = operands[i];
6508
6509 if (optional_operand_p (opcode, i))
6510 {
6511 /* Remember where we are in case we need to backtrack. */
6512 gas_assert (!backtrack_pos);
6513 backtrack_pos = str;
6514 }
6515
6516 /* Expect comma between operands; the backtrack mechanism will take
6517 care of cases of omitted optional operand. */
6518 if (i > 0 && ! skip_past_char (&str, ','))
6519 {
6520 set_syntax_error (_("comma expected between operands"));
6521 goto failure;
6522 }
6523 else
6524 comma_skipped_p = 1;
6525
6526 switch (operands[i])
6527 {
6528 case AARCH64_OPND_Rd:
6529 case AARCH64_OPND_Rn:
6530 case AARCH64_OPND_Rm:
6531 case AARCH64_OPND_Rt:
6532 case AARCH64_OPND_Rt2:
6533 case AARCH64_OPND_X16:
6534 case AARCH64_OPND_Rs:
6535 case AARCH64_OPND_Ra:
6536 case AARCH64_OPND_Rt_LS64:
6537 case AARCH64_OPND_Rt_SYS:
6538 case AARCH64_OPND_PAIRREG:
6539 case AARCH64_OPND_PAIRREG_OR_XZR:
6540 case AARCH64_OPND_SVE_Rm:
6541 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6542
6543 /* In LS64 load/store instructions Rt register number must be even
6544 and <=22. */
6545 if (operands[i] == AARCH64_OPND_Rt_LS64)
6546 {
6547 /* We've already checked if this is valid register.
6548 This will check if register number (Rt) is not undefined for
6549 LS64 instructions:
6550 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6551 if ((info->reg.regno & 0x18) == 0x18
6552 || (info->reg.regno & 0x01) == 0x01)
6553 {
6554 set_syntax_error
6555 (_("invalid Rt register number in 64-byte load/store"));
6556 goto failure;
6557 }
6558 }
6559 else if (operands[i] == AARCH64_OPND_X16)
6560 {
6561 if (info->reg.regno != 16)
6562 {
6563 goto failure;
6564 }
6565 }
6566 break;
6567
6568 case AARCH64_OPND_Rd_SP:
6569 case AARCH64_OPND_Rn_SP:
6570 case AARCH64_OPND_Rt_SP:
6571 case AARCH64_OPND_SVE_Rn_SP:
6572 case AARCH64_OPND_Rm_SP:
6573 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6574 break;
6575
6576 case AARCH64_OPND_Rm_EXT:
6577 case AARCH64_OPND_Rm_SFT:
6578 po_misc_or_fail (parse_shifter_operand
6579 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6580 ? SHIFTED_ARITH_IMM
6581 : SHIFTED_LOGIC_IMM)));
6582 if (!info->shifter.operator_present)
6583 {
6584 /* Default to LSL if not present. Libopcodes prefers shifter
6585 kind to be explicit. */
6586 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6587 info->shifter.kind = AARCH64_MOD_LSL;
6588 /* For Rm_EXT, libopcodes will carry out further check on whether
6589 or not stack pointer is used in the instruction (Recall that
6590 "the extend operator is not optional unless at least one of
6591 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6592 }
6593 break;
6594
6595 case AARCH64_OPND_Fd:
6596 case AARCH64_OPND_Fn:
6597 case AARCH64_OPND_Fm:
6598 case AARCH64_OPND_Fa:
6599 case AARCH64_OPND_Ft:
6600 case AARCH64_OPND_Ft2:
6601 case AARCH64_OPND_Sd:
6602 case AARCH64_OPND_Sn:
6603 case AARCH64_OPND_Sm:
6604 case AARCH64_OPND_SVE_VZn:
6605 case AARCH64_OPND_SVE_Vd:
6606 case AARCH64_OPND_SVE_Vm:
6607 case AARCH64_OPND_SVE_Vn:
6608 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6609 break;
6610
6611 case AARCH64_OPND_SVE_Pd:
6612 case AARCH64_OPND_SVE_Pg3:
6613 case AARCH64_OPND_SVE_Pg4_5:
6614 case AARCH64_OPND_SVE_Pg4_10:
6615 case AARCH64_OPND_SVE_Pg4_16:
6616 case AARCH64_OPND_SVE_Pm:
6617 case AARCH64_OPND_SVE_Pn:
6618 case AARCH64_OPND_SVE_Pt:
6619 case AARCH64_OPND_SME_Pm:
6620 reg_type = REG_TYPE_P;
6621 goto vector_reg;
6622
6623 case AARCH64_OPND_SVE_Za_5:
6624 case AARCH64_OPND_SVE_Za_16:
6625 case AARCH64_OPND_SVE_Zd:
6626 case AARCH64_OPND_SVE_Zm_5:
6627 case AARCH64_OPND_SVE_Zm_16:
6628 case AARCH64_OPND_SVE_Zn:
6629 case AARCH64_OPND_SVE_Zt:
6630 case AARCH64_OPND_SME_Zm:
6631 reg_type = REG_TYPE_Z;
6632 goto vector_reg;
6633
6634 case AARCH64_OPND_SVE_PNd:
6635 case AARCH64_OPND_SVE_PNg4_10:
6636 case AARCH64_OPND_SVE_PNn:
6637 case AARCH64_OPND_SVE_PNt:
6638 case AARCH64_OPND_SME_PNd3:
6639 case AARCH64_OPND_SME_PNg3:
6640 case AARCH64_OPND_SME_PNn:
6641 reg_type = REG_TYPE_PN;
6642 goto vector_reg;
6643
6644 case AARCH64_OPND_Va:
6645 case AARCH64_OPND_Vd:
6646 case AARCH64_OPND_Vn:
6647 case AARCH64_OPND_Vm:
6648 reg_type = REG_TYPE_V;
6649 vector_reg:
6650 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6651 if (!reg)
6652 goto failure;
6653 if (vectype.defined & NTA_HASINDEX)
6654 goto failure;
6655
6656 info->reg.regno = reg->number;
6657 if ((reg_type == REG_TYPE_P
6658 || reg_type == REG_TYPE_PN
6659 || reg_type == REG_TYPE_Z)
6660 && vectype.type == NT_invtype)
6661 /* Unqualified P and Z registers are allowed in certain
6662 contexts. Rely on F_STRICT qualifier checking to catch
6663 invalid uses. */
6664 info->qualifier = AARCH64_OPND_QLF_NIL;
6665 else
6666 {
6667 info->qualifier = vectype_to_qualifier (&vectype);
6668 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6669 goto failure;
6670 }
6671 break;
6672
6673 case AARCH64_OPND_VdD1:
6674 case AARCH64_OPND_VnD1:
6675 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6676 if (!reg)
6677 goto failure;
6678 if (vectype.type != NT_d || vectype.index != 1)
6679 {
6680 set_fatal_syntax_error
6681 (_("the top half of a 128-bit FP/SIMD register is expected"));
6682 goto failure;
6683 }
6684 info->reg.regno = reg->number;
6685 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6686 here; it is correct for the purpose of encoding/decoding since
6687 only the register number is explicitly encoded in the related
6688 instructions, although this appears a bit hacky. */
6689 info->qualifier = AARCH64_OPND_QLF_S_D;
6690 break;
6691
6692 case AARCH64_OPND_SVE_Zm3_INDEX:
6693 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6694 case AARCH64_OPND_SVE_Zm3_19_INDEX:
6695 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6696 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6697 case AARCH64_OPND_SVE_Zm4_INDEX:
6698 case AARCH64_OPND_SVE_Zn_INDEX:
6699 case AARCH64_OPND_SME_Zm_INDEX1:
6700 case AARCH64_OPND_SME_Zm_INDEX2:
6701 case AARCH64_OPND_SME_Zm_INDEX3_1:
6702 case AARCH64_OPND_SME_Zm_INDEX3_2:
6703 case AARCH64_OPND_SME_Zm_INDEX3_10:
6704 case AARCH64_OPND_SME_Zm_INDEX4_1:
6705 case AARCH64_OPND_SME_Zm_INDEX4_10:
6706 case AARCH64_OPND_SME_Zn_INDEX1_16:
6707 case AARCH64_OPND_SME_Zn_INDEX2_15:
6708 case AARCH64_OPND_SME_Zn_INDEX2_16:
6709 case AARCH64_OPND_SME_Zn_INDEX3_14:
6710 case AARCH64_OPND_SME_Zn_INDEX3_15:
6711 case AARCH64_OPND_SME_Zn_INDEX4_14:
6712 reg_type = REG_TYPE_Z;
6713 goto vector_reg_index;
6714
6715 case AARCH64_OPND_Ed:
6716 case AARCH64_OPND_En:
6717 case AARCH64_OPND_Em:
6718 case AARCH64_OPND_Em16:
6719 case AARCH64_OPND_SM3_IMM2:
6720 reg_type = REG_TYPE_V;
6721 vector_reg_index:
6722 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6723 if (!reg)
6724 goto failure;
6725 if (!(vectype.defined & NTA_HASINDEX))
6726 goto failure;
6727
6728 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6729 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6730 info->qualifier = AARCH64_OPND_QLF_NIL;
6731 else
6732 {
6733 if (vectype.type == NT_invtype)
6734 goto failure;
6735 info->qualifier = vectype_to_qualifier (&vectype);
6736 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6737 goto failure;
6738 }
6739
6740 info->reglane.regno = reg->number;
6741 info->reglane.index = vectype.index;
6742 break;
6743
6744 case AARCH64_OPND_SVE_ZnxN:
6745 case AARCH64_OPND_SVE_ZtxN:
6746 case AARCH64_OPND_SME_Zdnx2:
6747 case AARCH64_OPND_SME_Zdnx4:
6748 case AARCH64_OPND_SME_Zmx2:
6749 case AARCH64_OPND_SME_Zmx4:
6750 case AARCH64_OPND_SME_Znx2:
6751 case AARCH64_OPND_SME_Znx4:
6752 case AARCH64_OPND_SME_Ztx2_STRIDED:
6753 case AARCH64_OPND_SME_Ztx4_STRIDED:
6754 reg_type = REG_TYPE_Z;
6755 goto vector_reg_list;
6756
6757 case AARCH64_OPND_SME_Pdx2:
6758 case AARCH64_OPND_SME_PdxN:
6759 reg_type = REG_TYPE_P;
6760 goto vector_reg_list;
6761
6762 case AARCH64_OPND_LVn:
6763 case AARCH64_OPND_LVt:
6764 case AARCH64_OPND_LVt_AL:
6765 case AARCH64_OPND_LEt:
6766 reg_type = REG_TYPE_V;
6767 vector_reg_list:
6768 if (reg_type == REG_TYPE_Z
6769 && get_opcode_dependent_value (opcode) == 1
6770 && *str != '{')
6771 {
6772 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6773 if (!reg)
6774 goto failure;
6775 info->reglist.first_regno = reg->number;
6776 info->reglist.num_regs = 1;
6777 info->reglist.stride = 1;
6778 }
6779 else
6780 {
6781 val = parse_vector_reg_list (&str, reg_type, &vectype);
6782 if (val == PARSE_FAIL)
6783 goto failure;
6784
6785 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6786 {
6787 set_fatal_syntax_error (_("invalid register list"));
6788 goto failure;
6789 }
6790
6791 if ((int) vectype.width > 0 && *str != ',')
6792 {
6793 set_fatal_syntax_error
6794 (_("expected element type rather than vector type"));
6795 goto failure;
6796 }
6797 }
6798 if (operands[i] == AARCH64_OPND_LEt)
6799 {
6800 if (!(vectype.defined & NTA_HASINDEX))
6801 goto failure;
6802 info->reglist.has_index = 1;
6803 info->reglist.index = vectype.index;
6804 }
6805 else
6806 {
6807 if (vectype.defined & NTA_HASINDEX)
6808 goto failure;
6809 if (!(vectype.defined & NTA_HASTYPE))
6810 {
6811 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6812 set_fatal_syntax_error (_("missing type suffix"));
6813 goto failure;
6814 }
6815 }
6816 info->qualifier = vectype_to_qualifier (&vectype);
6817 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6818 goto failure;
6819 break;
6820
6821 case AARCH64_OPND_CRn:
6822 case AARCH64_OPND_CRm:
6823 {
6824 char prefix = *(str++);
6825 if (prefix != 'c' && prefix != 'C')
6826 goto failure;
6827
6828 po_imm_nc_or_fail ();
6829 if (flags & F_OPD_NARROW)
6830 {
6831 if ((operands[i] == AARCH64_OPND_CRn)
6832 && (val < 8 || val > 9))
6833 {
6834 set_fatal_syntax_error (_(N_ ("C8 - C9 expected")));
6835 goto failure;
6836 }
6837 else if ((operands[i] == AARCH64_OPND_CRm)
6838 && (val > 7))
6839 {
6840 set_fatal_syntax_error (_(N_ ("C0 - C7 expected")));
6841 goto failure;
6842 }
6843 }
6844 else if (val > 15)
6845 {
6846 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6847 goto failure;
6848 }
6849 info->qualifier = AARCH64_OPND_QLF_CR;
6850 info->imm.value = val;
6851 break;
6852 }
6853
6854 case AARCH64_OPND_SHLL_IMM:
6855 case AARCH64_OPND_IMM_VLSR:
6856 po_imm_or_fail (1, 64);
6857 info->imm.value = val;
6858 break;
6859
6860 case AARCH64_OPND_CCMP_IMM:
6861 case AARCH64_OPND_SIMM5:
6862 case AARCH64_OPND_FBITS:
6863 case AARCH64_OPND_TME_UIMM16:
6864 case AARCH64_OPND_UIMM4:
6865 case AARCH64_OPND_UIMM4_ADDG:
6866 case AARCH64_OPND_UIMM10:
6867 case AARCH64_OPND_UIMM3_OP1:
6868 case AARCH64_OPND_UIMM3_OP2:
6869 case AARCH64_OPND_IMM_VLSL:
6870 case AARCH64_OPND_IMM:
6871 case AARCH64_OPND_IMM_2:
6872 case AARCH64_OPND_WIDTH:
6873 case AARCH64_OPND_SVE_INV_LIMM:
6874 case AARCH64_OPND_SVE_LIMM:
6875 case AARCH64_OPND_SVE_LIMM_MOV:
6876 case AARCH64_OPND_SVE_SHLIMM_PRED:
6877 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6878 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6879 case AARCH64_OPND_SME_SHRIMM4:
6880 case AARCH64_OPND_SME_SHRIMM5:
6881 case AARCH64_OPND_SVE_SHRIMM_PRED:
6882 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6883 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6884 case AARCH64_OPND_SVE_SIMM5:
6885 case AARCH64_OPND_SVE_SIMM5B:
6886 case AARCH64_OPND_SVE_SIMM6:
6887 case AARCH64_OPND_SVE_SIMM8:
6888 case AARCH64_OPND_SVE_UIMM3:
6889 case AARCH64_OPND_SVE_UIMM7:
6890 case AARCH64_OPND_SVE_UIMM8:
6891 case AARCH64_OPND_SVE_UIMM8_53:
6892 case AARCH64_OPND_IMM_ROT1:
6893 case AARCH64_OPND_IMM_ROT2:
6894 case AARCH64_OPND_IMM_ROT3:
6895 case AARCH64_OPND_SVE_IMM_ROT1:
6896 case AARCH64_OPND_SVE_IMM_ROT2:
6897 case AARCH64_OPND_SVE_IMM_ROT3:
6898 case AARCH64_OPND_CSSC_SIMM8:
6899 case AARCH64_OPND_CSSC_UIMM8:
6900 po_imm_nc_or_fail ();
6901 info->imm.value = val;
6902 break;
6903
6904 case AARCH64_OPND_SVE_AIMM:
6905 case AARCH64_OPND_SVE_ASIMM:
6906 po_imm_nc_or_fail ();
6907 info->imm.value = val;
6908 skip_whitespace (str);
6909 if (skip_past_comma (&str))
6910 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6911 else
6912 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6913 break;
6914
6915 case AARCH64_OPND_SVE_PATTERN:
6916 po_enum_or_fail (aarch64_sve_pattern_array);
6917 info->imm.value = val;
6918 break;
6919
6920 case AARCH64_OPND_SVE_PATTERN_SCALED:
6921 po_enum_or_fail (aarch64_sve_pattern_array);
6922 info->imm.value = val;
6923 if (skip_past_comma (&str)
6924 && !parse_shift (&str, info, SHIFTED_MUL))
6925 goto failure;
6926 if (!info->shifter.operator_present)
6927 {
6928 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6929 info->shifter.kind = AARCH64_MOD_MUL;
6930 info->shifter.amount = 1;
6931 }
6932 break;
6933
6934 case AARCH64_OPND_SVE_PRFOP:
6935 po_enum_or_fail (aarch64_sve_prfop_array);
6936 info->imm.value = val;
6937 break;
6938
6939 case AARCH64_OPND_UIMM7:
6940 po_imm_or_fail (0, 127);
6941 info->imm.value = val;
6942 break;
6943
6944 case AARCH64_OPND_IDX:
6945 case AARCH64_OPND_MASK:
6946 case AARCH64_OPND_BIT_NUM:
6947 case AARCH64_OPND_IMMR:
6948 case AARCH64_OPND_IMMS:
6949 po_imm_or_fail (0, 63);
6950 info->imm.value = val;
6951 break;
6952
6953 case AARCH64_OPND_IMM0:
6954 po_imm_nc_or_fail ();
6955 if (val != 0)
6956 {
6957 set_fatal_syntax_error (_("immediate zero expected"));
6958 goto failure;
6959 }
6960 info->imm.value = 0;
6961 break;
6962
6963 case AARCH64_OPND_FPIMM0:
6964 {
6965 int qfloat;
6966 bool res1 = false, res2 = false;
6967 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6968 it is probably not worth the effort to support it. */
6969 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6970 imm_reg_type))
6971 && (error_p ()
6972 || !(res2 = parse_constant_immediate (&str, &val,
6973 imm_reg_type))))
6974 goto failure;
6975 if ((res1 && qfloat == 0) || (res2 && val == 0))
6976 {
6977 info->imm.value = 0;
6978 info->imm.is_fp = 1;
6979 break;
6980 }
6981 set_fatal_syntax_error (_("immediate zero expected"));
6982 goto failure;
6983 }
6984
6985 case AARCH64_OPND_IMM_MOV:
6986 {
6987 char *saved = str;
6988 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
6989 || reg_name_p (str, REG_TYPE_V))
6990 goto failure;
6991 str = saved;
6992 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6993 GE_OPT_PREFIX, REJECT_ABSENT));
6994 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6995 later. fix_mov_imm_insn will try to determine a machine
6996 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6997 message if the immediate cannot be moved by a single
6998 instruction. */
6999 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7000 inst.base.operands[i].skip = 1;
7001 }
7002 break;
7003
7004 case AARCH64_OPND_SIMD_IMM:
7005 case AARCH64_OPND_SIMD_IMM_SFT:
7006 if (! parse_big_immediate (&str, &val, imm_reg_type))
7007 goto failure;
7008 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7009 /* addr_off_p */ 0,
7010 /* need_libopcodes_p */ 1,
7011 /* skip_p */ 1);
7012 /* Parse shift.
7013 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
7014 shift, we don't check it here; we leave the checking to
7015 the libopcodes (operand_general_constraint_met_p). By
7016 doing this, we achieve better diagnostics. */
7017 if (skip_past_comma (&str)
7018 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
7019 goto failure;
7020 if (!info->shifter.operator_present
7021 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7022 {
7023 /* Default to LSL if not present. Libopcodes prefers shifter
7024 kind to be explicit. */
7025 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7026 info->shifter.kind = AARCH64_MOD_LSL;
7027 }
7028 break;
7029
7030 case AARCH64_OPND_FPIMM:
7031 case AARCH64_OPND_SIMD_FPIMM:
7032 case AARCH64_OPND_SVE_FPIMM8:
7033 {
7034 int qfloat;
7035 bool dp_p;
7036
7037 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7038 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7039 || !aarch64_imm_float_p (qfloat))
7040 {
7041 if (!error_p ())
7042 set_fatal_syntax_error (_("invalid floating-point"
7043 " constant"));
7044 goto failure;
7045 }
7046 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7047 inst.base.operands[i].imm.is_fp = 1;
7048 }
7049 break;
7050
7051 case AARCH64_OPND_SVE_I1_HALF_ONE:
7052 case AARCH64_OPND_SVE_I1_HALF_TWO:
7053 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7054 {
7055 int qfloat;
7056 bool dp_p;
7057
7058 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7059 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7060 {
7061 if (!error_p ())
7062 set_fatal_syntax_error (_("invalid floating-point"
7063 " constant"));
7064 goto failure;
7065 }
7066 inst.base.operands[i].imm.value = qfloat;
7067 inst.base.operands[i].imm.is_fp = 1;
7068 }
7069 break;
7070
7071 case AARCH64_OPND_LIMM:
7072 po_misc_or_fail (parse_shifter_operand (&str, info,
7073 SHIFTED_LOGIC_IMM));
7074 if (info->shifter.operator_present)
7075 {
7076 set_fatal_syntax_error
7077 (_("shift not allowed for bitmask immediate"));
7078 goto failure;
7079 }
7080 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7081 /* addr_off_p */ 0,
7082 /* need_libopcodes_p */ 1,
7083 /* skip_p */ 1);
7084 break;
7085
7086 case AARCH64_OPND_AIMM:
7087 if (opcode->op == OP_ADD)
7088 /* ADD may have relocation types. */
7089 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7090 SHIFTED_ARITH_IMM));
7091 else
7092 po_misc_or_fail (parse_shifter_operand (&str, info,
7093 SHIFTED_ARITH_IMM));
7094 switch (inst.reloc.type)
7095 {
7096 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7097 info->shifter.amount = 12;
7098 break;
7099 case BFD_RELOC_UNUSED:
7100 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7101 if (info->shifter.kind != AARCH64_MOD_NONE)
7102 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7103 inst.reloc.pc_rel = 0;
7104 break;
7105 default:
7106 break;
7107 }
7108 info->imm.value = 0;
7109 if (!info->shifter.operator_present)
7110 {
7111 /* Default to LSL if not present. Libopcodes prefers shifter
7112 kind to be explicit. */
7113 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7114 info->shifter.kind = AARCH64_MOD_LSL;
7115 }
7116 break;
7117
7118 case AARCH64_OPND_HALF:
7119 {
7120 /* #<imm16> or relocation. */
7121 int internal_fixup_p;
7122 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7123 if (internal_fixup_p)
7124 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7125 skip_whitespace (str);
7126 if (skip_past_comma (&str))
7127 {
7128 /* {, LSL #<shift>} */
7129 if (! aarch64_gas_internal_fixup_p ())
7130 {
7131 set_fatal_syntax_error (_("can't mix relocation modifier "
7132 "with explicit shift"));
7133 goto failure;
7134 }
7135 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7136 }
7137 else
7138 inst.base.operands[i].shifter.amount = 0;
7139 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7140 inst.base.operands[i].imm.value = 0;
7141 if (! process_movw_reloc_info ())
7142 goto failure;
7143 }
7144 break;
7145
7146 case AARCH64_OPND_EXCEPTION:
7147 case AARCH64_OPND_UNDEFINED:
7148 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7149 imm_reg_type));
7150 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7151 /* addr_off_p */ 0,
7152 /* need_libopcodes_p */ 0,
7153 /* skip_p */ 1);
7154 break;
7155
7156 case AARCH64_OPND_NZCV:
7157 {
7158 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7159 if (nzcv != NULL)
7160 {
7161 str += 4;
7162 info->imm.value = nzcv->value;
7163 break;
7164 }
7165 po_imm_or_fail (0, 15);
7166 info->imm.value = val;
7167 }
7168 break;
7169
7170 case AARCH64_OPND_COND:
7171 case AARCH64_OPND_COND1:
7172 {
7173 char *start = str;
7174 do
7175 str++;
7176 while (ISALPHA (*str));
7177 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7178 if (info->cond == NULL)
7179 {
7180 set_syntax_error (_("invalid condition"));
7181 goto failure;
7182 }
7183 else if (operands[i] == AARCH64_OPND_COND1
7184 && (info->cond->value & 0xe) == 0xe)
7185 {
7186 /* Do not allow AL or NV. */
7187 set_default_error ();
7188 goto failure;
7189 }
7190 }
7191 break;
7192
7193 case AARCH64_OPND_ADDR_ADRP:
7194 po_misc_or_fail (parse_adrp (&str));
7195 /* Clear the value as operand needs to be relocated. */
7196 info->imm.value = 0;
7197 break;
7198
7199 case AARCH64_OPND_ADDR_PCREL14:
7200 case AARCH64_OPND_ADDR_PCREL19:
7201 case AARCH64_OPND_ADDR_PCREL21:
7202 case AARCH64_OPND_ADDR_PCREL26:
7203 po_misc_or_fail (parse_address (&str, info));
7204 if (!info->addr.pcrel)
7205 {
7206 set_syntax_error (_("invalid pc-relative address"));
7207 goto failure;
7208 }
7209 if (inst.gen_lit_pool
7210 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7211 {
7212 /* Only permit "=value" in the literal load instructions.
7213 The literal will be generated by programmer_friendly_fixup. */
7214 set_syntax_error (_("invalid use of \"=immediate\""));
7215 goto failure;
7216 }
7217 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7218 {
7219 set_syntax_error (_("unrecognized relocation suffix"));
7220 goto failure;
7221 }
7222 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7223 {
7224 info->imm.value = inst.reloc.exp.X_add_number;
7225 inst.reloc.type = BFD_RELOC_UNUSED;
7226 }
7227 else
7228 {
7229 info->imm.value = 0;
7230 if (inst.reloc.type == BFD_RELOC_UNUSED)
7231 switch (opcode->iclass)
7232 {
7233 case compbranch:
7234 case condbranch:
7235 /* e.g. CBZ or B.COND */
7236 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7237 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7238 break;
7239 case testbranch:
7240 /* e.g. TBZ */
7241 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7242 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7243 break;
7244 case branch_imm:
7245 /* e.g. B or BL */
7246 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7247 inst.reloc.type =
7248 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7249 : BFD_RELOC_AARCH64_JUMP26;
7250 break;
7251 case loadlit:
7252 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7253 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7254 break;
7255 case pcreladdr:
7256 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7257 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7258 break;
7259 default:
7260 gas_assert (0);
7261 abort ();
7262 }
7263 inst.reloc.pc_rel = 1;
7264 }
7265 break;
7266
7267 case AARCH64_OPND_ADDR_SIMPLE:
7268 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7269 {
7270 /* [<Xn|SP>{, #<simm>}] */
7271 char *start = str;
7272 /* First use the normal address-parsing routines, to get
7273 the usual syntax errors. */
7274 po_misc_or_fail (parse_address (&str, info));
7275 if (info->addr.pcrel || info->addr.offset.is_reg
7276 || !info->addr.preind || info->addr.postind
7277 || info->addr.writeback)
7278 {
7279 set_syntax_error (_("invalid addressing mode"));
7280 goto failure;
7281 }
7282
7283 /* Then retry, matching the specific syntax of these addresses. */
7284 str = start;
7285 po_char_or_fail ('[');
7286 po_reg_or_fail (REG_TYPE_R64_SP);
7287 /* Accept optional ", #0". */
7288 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7289 && skip_past_char (&str, ','))
7290 {
7291 skip_past_char (&str, '#');
7292 if (! skip_past_char (&str, '0'))
7293 {
7294 set_fatal_syntax_error
7295 (_("the optional immediate offset can only be 0"));
7296 goto failure;
7297 }
7298 }
7299 po_char_or_fail (']');
7300 break;
7301 }
7302
7303 case AARCH64_OPND_ADDR_REGOFF:
7304 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7305 po_misc_or_fail (parse_address (&str, info));
7306 regoff_addr:
7307 if (info->addr.pcrel || !info->addr.offset.is_reg
7308 || !info->addr.preind || info->addr.postind
7309 || info->addr.writeback)
7310 {
7311 set_syntax_error (_("invalid addressing mode"));
7312 goto failure;
7313 }
7314 if (!info->shifter.operator_present)
7315 {
7316 /* Default to LSL if not present. Libopcodes prefers shifter
7317 kind to be explicit. */
7318 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7319 info->shifter.kind = AARCH64_MOD_LSL;
7320 }
7321 /* Qualifier to be deduced by libopcodes. */
7322 break;
7323
7324 case AARCH64_OPND_ADDR_SIMM7:
7325 po_misc_or_fail (parse_address (&str, info));
7326 if (info->addr.pcrel || info->addr.offset.is_reg
7327 || (!info->addr.preind && !info->addr.postind))
7328 {
7329 set_syntax_error (_("invalid addressing mode"));
7330 goto failure;
7331 }
7332 if (inst.reloc.type != BFD_RELOC_UNUSED)
7333 {
7334 set_syntax_error (_("relocation not allowed"));
7335 goto failure;
7336 }
7337 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7338 /* addr_off_p */ 1,
7339 /* need_libopcodes_p */ 1,
7340 /* skip_p */ 0);
7341 break;
7342
7343 case AARCH64_OPND_ADDR_SIMM9:
7344 case AARCH64_OPND_ADDR_SIMM9_2:
7345 case AARCH64_OPND_ADDR_SIMM11:
7346 case AARCH64_OPND_ADDR_SIMM13:
7347 po_misc_or_fail (parse_address (&str, info));
7348 if (info->addr.pcrel || info->addr.offset.is_reg
7349 || (!info->addr.preind && !info->addr.postind)
7350 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7351 && info->addr.writeback))
7352 {
7353 set_syntax_error (_("invalid addressing mode"));
7354 goto failure;
7355 }
7356 if (inst.reloc.type != BFD_RELOC_UNUSED)
7357 {
7358 set_syntax_error (_("relocation not allowed"));
7359 goto failure;
7360 }
7361 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7362 /* addr_off_p */ 1,
7363 /* need_libopcodes_p */ 1,
7364 /* skip_p */ 0);
7365 break;
7366
7367 case AARCH64_OPND_ADDR_SIMM10:
7368 case AARCH64_OPND_ADDR_OFFSET:
7369 po_misc_or_fail (parse_address (&str, info));
7370 if (info->addr.pcrel || info->addr.offset.is_reg
7371 || !info->addr.preind || info->addr.postind)
7372 {
7373 set_syntax_error (_("invalid addressing mode"));
7374 goto failure;
7375 }
7376 if (inst.reloc.type != BFD_RELOC_UNUSED)
7377 {
7378 set_syntax_error (_("relocation not allowed"));
7379 goto failure;
7380 }
7381 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7382 /* addr_off_p */ 1,
7383 /* need_libopcodes_p */ 1,
7384 /* skip_p */ 0);
7385 break;
7386
7387 case AARCH64_OPND_ADDR_UIMM12:
7388 po_misc_or_fail (parse_address (&str, info));
7389 if (info->addr.pcrel || info->addr.offset.is_reg
7390 || !info->addr.preind || info->addr.writeback)
7391 {
7392 set_syntax_error (_("invalid addressing mode"));
7393 goto failure;
7394 }
7395 if (inst.reloc.type == BFD_RELOC_UNUSED)
7396 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7397 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7398 || (inst.reloc.type
7399 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7400 || (inst.reloc.type
7401 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7402 || (inst.reloc.type
7403 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7404 || (inst.reloc.type
7405 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7406 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7407 /* Leave qualifier to be determined by libopcodes. */
7408 break;
7409
7410 case AARCH64_OPND_SIMD_ADDR_POST:
7411 /* [<Xn|SP>], <Xm|#<amount>> */
7412 po_misc_or_fail (parse_address (&str, info));
7413 if (!info->addr.postind || !info->addr.writeback)
7414 {
7415 set_syntax_error (_("invalid addressing mode"));
7416 goto failure;
7417 }
7418 if (!info->addr.offset.is_reg)
7419 {
7420 if (inst.reloc.exp.X_op == O_constant)
7421 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7422 else
7423 {
7424 set_fatal_syntax_error
7425 (_("writeback value must be an immediate constant"));
7426 goto failure;
7427 }
7428 }
7429 /* No qualifier. */
7430 break;
7431
7432 case AARCH64_OPND_SME_SM_ZA:
7433 /* { SM | ZA } */
7434 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7435 {
7436 set_syntax_error (_("unknown or missing PSTATE field name"));
7437 goto failure;
7438 }
7439 info->reg.regno = val;
7440 break;
7441
7442 case AARCH64_OPND_SME_PnT_Wm_imm:
7443 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7444 &info->indexed_za, &qualifier, 0))
7445 goto failure;
7446 info->qualifier = qualifier;
7447 break;
7448
7449 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7450 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7451 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7452 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7453 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7454 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7455 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7456 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7457 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7458 case AARCH64_OPND_SVE_ADDR_RI_U6:
7459 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7460 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7461 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7462 /* [X<n>{, #imm, MUL VL}]
7463 [X<n>{, #imm}]
7464 but recognizing SVE registers. */
7465 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7466 &offset_qualifier));
7467 if (base_qualifier != AARCH64_OPND_QLF_X)
7468 {
7469 set_syntax_error (_("invalid addressing mode"));
7470 goto failure;
7471 }
7472 sve_regimm:
7473 if (info->addr.pcrel || info->addr.offset.is_reg
7474 || !info->addr.preind || info->addr.writeback)
7475 {
7476 set_syntax_error (_("invalid addressing mode"));
7477 goto failure;
7478 }
7479 if (inst.reloc.type != BFD_RELOC_UNUSED
7480 || inst.reloc.exp.X_op != O_constant)
7481 {
7482 /* Make sure this has priority over
7483 "invalid addressing mode". */
7484 set_fatal_syntax_error (_("constant offset required"));
7485 goto failure;
7486 }
7487 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7488 break;
7489
7490 case AARCH64_OPND_SVE_ADDR_R:
7491 /* [<Xn|SP>{, <R><m>}]
7492 but recognizing SVE registers. */
7493 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7494 &offset_qualifier));
7495 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7496 {
7497 offset_qualifier = AARCH64_OPND_QLF_X;
7498 info->addr.offset.is_reg = 1;
7499 info->addr.offset.regno = 31;
7500 }
7501 else if (base_qualifier != AARCH64_OPND_QLF_X
7502 || offset_qualifier != AARCH64_OPND_QLF_X)
7503 {
7504 set_syntax_error (_("invalid addressing mode"));
7505 goto failure;
7506 }
7507 goto regoff_addr;
7508
7509 case AARCH64_OPND_SVE_ADDR_RR:
7510 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7511 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7512 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7513 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7514 case AARCH64_OPND_SVE_ADDR_RX:
7515 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7516 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7517 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7518 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7519 but recognizing SVE registers. */
7520 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7521 &offset_qualifier));
7522 if (base_qualifier != AARCH64_OPND_QLF_X
7523 || offset_qualifier != AARCH64_OPND_QLF_X)
7524 {
7525 set_syntax_error (_("invalid addressing mode"));
7526 goto failure;
7527 }
7528 goto regoff_addr;
7529
7530 case AARCH64_OPND_SVE_ADDR_RZ:
7531 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7532 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7533 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7534 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7535 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7536 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7537 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7538 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7539 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7540 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7541 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7542 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7543 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7544 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7545 &offset_qualifier));
7546 if (base_qualifier != AARCH64_OPND_QLF_X
7547 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7548 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7549 {
7550 set_syntax_error (_("invalid addressing mode"));
7551 goto failure;
7552 }
7553 info->qualifier = offset_qualifier;
7554 goto regoff_addr;
7555
7556 case AARCH64_OPND_SVE_ADDR_ZX:
7557 /* [Zn.<T>{, <Xm>}]. */
7558 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7559 &offset_qualifier));
7560 /* Things to check:
7561 base_qualifier either S_S or S_D
7562 offset_qualifier must be X
7563 */
7564 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7565 && base_qualifier != AARCH64_OPND_QLF_S_D)
7566 || offset_qualifier != AARCH64_OPND_QLF_X)
7567 {
7568 set_syntax_error (_("invalid addressing mode"));
7569 goto failure;
7570 }
7571 info->qualifier = base_qualifier;
7572 if (!info->addr.offset.is_reg || info->addr.pcrel
7573 || !info->addr.preind || info->addr.writeback
7574 || info->shifter.operator_present != 0)
7575 {
7576 set_syntax_error (_("invalid addressing mode"));
7577 goto failure;
7578 }
7579 info->shifter.kind = AARCH64_MOD_LSL;
7580 break;
7581
7582
7583 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7584 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7585 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7586 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7587 /* [Z<n>.<T>{, #imm}] */
7588 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7589 &offset_qualifier));
7590 if (base_qualifier != AARCH64_OPND_QLF_S_S
7591 && base_qualifier != AARCH64_OPND_QLF_S_D)
7592 {
7593 set_syntax_error (_("invalid addressing mode"));
7594 goto failure;
7595 }
7596 info->qualifier = base_qualifier;
7597 goto sve_regimm;
7598
7599 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7600 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7601 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7602 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7603 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7604
7605 We don't reject:
7606
7607 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7608
7609 here since we get better error messages by leaving it to
7610 the qualifier checking routines. */
7611 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7612 &offset_qualifier));
7613 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7614 && base_qualifier != AARCH64_OPND_QLF_S_D)
7615 || offset_qualifier != base_qualifier)
7616 {
7617 set_syntax_error (_("invalid addressing mode"));
7618 goto failure;
7619 }
7620 info->qualifier = base_qualifier;
7621 goto regoff_addr;
7622 case AARCH64_OPND_SYSREG:
7623 case AARCH64_OPND_SYSREG128:
7624 {
7625 bool sysreg128_p = operands[i] == AARCH64_OPND_SYSREG128;
7626 uint32_t sysreg_flags;
7627 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7628 &sysreg_flags,
7629 sysreg128_p)) == PARSE_FAIL)
7630 {
7631 set_syntax_error (_("unknown or missing system register name"));
7632 goto failure;
7633 }
7634 inst.base.operands[i].sysreg.value = val;
7635 inst.base.operands[i].sysreg.flags = sysreg_flags;
7636 break;
7637 }
7638
7639 case AARCH64_OPND_PSTATEFIELD:
7640 {
7641 uint32_t sysreg_flags;
7642 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7643 &sysreg_flags, false)) == PARSE_FAIL)
7644 {
7645 set_syntax_error (_("unknown or missing PSTATE field name"));
7646 goto failure;
7647 }
7648 inst.base.operands[i].pstatefield = val;
7649 inst.base.operands[i].sysreg.flags = sysreg_flags;
7650 break;
7651 }
7652
7653 case AARCH64_OPND_SYSREG_IC:
7654 inst.base.operands[i].sysins_op =
7655 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7656 goto sys_reg_ins;
7657
7658 case AARCH64_OPND_SYSREG_DC:
7659 inst.base.operands[i].sysins_op =
7660 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7661 goto sys_reg_ins;
7662
7663 case AARCH64_OPND_SYSREG_AT:
7664 inst.base.operands[i].sysins_op =
7665 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7666 goto sys_reg_ins;
7667
7668 case AARCH64_OPND_SYSREG_SR:
7669 inst.base.operands[i].sysins_op =
7670 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7671 goto sys_reg_ins;
7672
7673 case AARCH64_OPND_SYSREG_TLBI:
7674 case AARCH64_OPND_SYSREG_TLBIP:
7675 inst.base.operands[i].sysins_op =
7676 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7677 sys_reg_ins:
7678 if (inst.base.operands[i].sysins_op == NULL)
7679 {
7680 set_fatal_syntax_error ( _("unknown or missing operation name"));
7681 goto failure;
7682 }
7683 break;
7684
7685 case AARCH64_OPND_BARRIER:
7686 case AARCH64_OPND_BARRIER_ISB:
7687 val = parse_barrier (&str);
7688 if (val != PARSE_FAIL
7689 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7690 {
7691 /* ISB only accepts options name 'sy'. */
7692 set_syntax_error
7693 (_("the specified option is not accepted in ISB"));
7694 /* Turn off backtrack as this optional operand is present. */
7695 backtrack_pos = 0;
7696 goto failure;
7697 }
7698 if (val != PARSE_FAIL
7699 && operands[i] == AARCH64_OPND_BARRIER)
7700 {
7701 /* Regular barriers accept options CRm (C0-C15).
7702 DSB nXS barrier variant accepts values > 15. */
7703 if (val < 0 || val > 15)
7704 {
7705 set_syntax_error (_("the specified option is not accepted in DSB"));
7706 goto failure;
7707 }
7708 }
7709 /* This is an extension to accept a 0..15 immediate. */
7710 if (val == PARSE_FAIL)
7711 po_imm_or_fail (0, 15);
7712 info->barrier = aarch64_barrier_options + val;
7713 break;
7714
7715 case AARCH64_OPND_BARRIER_DSB_NXS:
7716 val = parse_barrier (&str);
7717 if (val != PARSE_FAIL)
7718 {
7719 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7720 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7721 {
7722 set_syntax_error (_("the specified option is not accepted in DSB"));
7723 /* Turn off backtrack as this optional operand is present. */
7724 backtrack_pos = 0;
7725 goto failure;
7726 }
7727 }
7728 else
7729 {
7730 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7731 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7732 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7733 goto failure;
7734 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7735 {
7736 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7737 goto failure;
7738 }
7739 }
7740 /* Option index is encoded as 2-bit value in val<3:2>. */
7741 val = (val >> 2) - 4;
7742 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7743 break;
7744
7745 case AARCH64_OPND_PRFOP:
7746 val = parse_pldop (&str);
7747 /* This is an extension to accept a 0..31 immediate. */
7748 if (val == PARSE_FAIL)
7749 po_imm_or_fail (0, 31);
7750 inst.base.operands[i].prfop = aarch64_prfops + val;
7751 break;
7752
7753 case AARCH64_OPND_RPRFMOP:
7754 po_enum_or_fail (aarch64_rprfmop_array);
7755 info->imm.value = val;
7756 break;
7757
7758 case AARCH64_OPND_BARRIER_PSB:
7759 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7760 goto failure;
7761 break;
7762
7763 case AARCH64_OPND_SME_ZT0:
7764 po_reg_or_fail (REG_TYPE_ZT0);
7765 break;
7766
7767 case AARCH64_OPND_SME_ZT0_INDEX:
7768 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7769 if (!reg || vectype.type != NT_invtype)
7770 goto failure;
7771 if (!(vectype.defined & NTA_HASINDEX))
7772 {
7773 set_syntax_error (_("missing register index"));
7774 goto failure;
7775 }
7776 info->imm.value = vectype.index;
7777 break;
7778
7779 case AARCH64_OPND_SME_ZT0_LIST:
7780 if (*str != '{')
7781 {
7782 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7783 goto failure;
7784 }
7785 str++;
7786 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7787 goto failure;
7788 if (*str != '}')
7789 {
7790 set_syntax_error (_("expected '}' after ZT0"));
7791 goto failure;
7792 }
7793 str++;
7794 break;
7795
7796 case AARCH64_OPND_SME_PNn3_INDEX1:
7797 case AARCH64_OPND_SME_PNn3_INDEX2:
7798 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7799 if (!reg)
7800 goto failure;
7801 if (!(vectype.defined & NTA_HASINDEX))
7802 {
7803 set_syntax_error (_("missing register index"));
7804 goto failure;
7805 }
7806 info->reglane.regno = reg->number;
7807 info->reglane.index = vectype.index;
7808 if (vectype.type == NT_invtype)
7809 info->qualifier = AARCH64_OPND_QLF_NIL;
7810 else
7811 info->qualifier = vectype_to_qualifier (&vectype);
7812 break;
7813
7814 case AARCH64_OPND_BARRIER_GCSB:
7815 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7816 goto failure;
7817 break;
7818
7819 case AARCH64_OPND_BTI_TARGET:
7820 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7821 goto failure;
7822 break;
7823
7824 case AARCH64_OPND_SME_ZAda_2b:
7825 case AARCH64_OPND_SME_ZAda_3b:
7826 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7827 if (!reg)
7828 goto failure;
7829 info->reg.regno = reg->number;
7830 info->qualifier = qualifier;
7831 break;
7832
7833 case AARCH64_OPND_SME_ZA_HV_idx_src:
7834 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7835 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7836 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7837 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7838 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7839 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7840 &info->indexed_za,
7841 &qualifier)
7842 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7843 &info->indexed_za, &qualifier, 0))
7844 goto failure;
7845 info->qualifier = qualifier;
7846 break;
7847
7848 case AARCH64_OPND_SME_list_of_64bit_tiles:
7849 val = parse_sme_list_of_64bit_tiles (&str);
7850 if (val == PARSE_FAIL)
7851 goto failure;
7852 info->imm.value = val;
7853 break;
7854
7855 case AARCH64_OPND_SME_ZA_array_off1x4:
7856 case AARCH64_OPND_SME_ZA_array_off2x2:
7857 case AARCH64_OPND_SME_ZA_array_off2x4:
7858 case AARCH64_OPND_SME_ZA_array_off3_0:
7859 case AARCH64_OPND_SME_ZA_array_off3_5:
7860 case AARCH64_OPND_SME_ZA_array_off3x2:
7861 case AARCH64_OPND_SME_ZA_array_off4:
7862 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7863 &info->indexed_za, &qualifier, 0))
7864 goto failure;
7865 info->qualifier = qualifier;
7866 break;
7867
7868 case AARCH64_OPND_SME_VLxN_10:
7869 case AARCH64_OPND_SME_VLxN_13:
7870 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7871 info->imm.value = val;
7872 break;
7873
7874 case AARCH64_OPND_MOPS_ADDR_Rd:
7875 case AARCH64_OPND_MOPS_ADDR_Rs:
7876 po_char_or_fail ('[');
7877 if (!parse_x0_to_x30 (&str, info))
7878 goto failure;
7879 po_char_or_fail (']');
7880 po_char_or_fail ('!');
7881 break;
7882
7883 case AARCH64_OPND_MOPS_WB_Rn:
7884 if (!parse_x0_to_x30 (&str, info))
7885 goto failure;
7886 po_char_or_fail ('!');
7887 break;
7888
7889 case AARCH64_OPND_LSE128_Rt:
7890 case AARCH64_OPND_LSE128_Rt2:
7891 po_int_fp_reg_or_fail (REG_TYPE_R_64);
7892 break;
7893
7894 default:
7895 as_fatal (_("unhandled operand code %d"), operands[i]);
7896 }
7897
7898 /* If we get here, this operand was successfully parsed. */
7899 inst.base.operands[i].present = 1;
7900
7901 /* As instructions can have multiple optional operands, it is imporant to
7902 reset the backtrack_pos variable once we finish processing an operand
7903 successfully. */
7904 backtrack_pos = 0;
7905
7906 continue;
7907
7908 failure:
7909 /* The parse routine should already have set the error, but in case
7910 not, set a default one here. */
7911 if (! error_p ())
7912 set_default_error ();
7913
7914 if (! backtrack_pos)
7915 goto parse_operands_return;
7916
7917 {
7918 /* We reach here because this operand is marked as optional, and
7919 either no operand was supplied or the operand was supplied but it
7920 was syntactically incorrect. In the latter case we report an
7921 error. In the former case we perform a few more checks before
7922 dropping through to the code to insert the default operand. */
7923
7924 char *tmp = backtrack_pos;
7925 char endchar = END_OF_INSN;
7926
7927 skip_past_char (&tmp, ',');
7928
7929 if (*tmp != endchar)
7930 /* The user has supplied an operand in the wrong format. */
7931 goto parse_operands_return;
7932
7933 /* Make sure there is not a comma before the optional operand.
7934 For example the fifth operand of 'sys' is optional:
7935
7936 sys #0,c0,c0,#0, <--- wrong
7937 sys #0,c0,c0,#0 <--- correct. */
7938 if (comma_skipped_p && i && endchar == END_OF_INSN)
7939 {
7940 set_fatal_syntax_error
7941 (_("unexpected comma before the omitted optional operand"));
7942 goto parse_operands_return;
7943 }
7944 }
7945
7946 /* Reaching here means we are dealing with an optional operand that is
7947 omitted from the assembly line. */
7948 gas_assert (optional_operand_p (opcode, i));
7949 info->present = 0;
7950 process_omitted_operand (operands[i], opcode, i, info);
7951
7952 /* Try again, skipping the optional operand at backtrack_pos. */
7953 str = backtrack_pos;
7954 backtrack_pos = 0;
7955
7956 /* Clear any error record after the omitted optional operand has been
7957 successfully handled. */
7958 clear_error ();
7959 }
7960
7961 /* Check if we have parsed all the operands. */
7962 if (*str != '\0' && ! error_p ())
7963 {
7964 /* Set I to the index of the last present operand; this is
7965 for the purpose of diagnostics. */
7966 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7967 ;
7968 set_fatal_syntax_error
7969 (_("unexpected characters following instruction"));
7970 }
7971
7972 parse_operands_return:
7973
7974 if (error_p ())
7975 {
7976 inst.parsing_error.index = i;
7977 DEBUG_TRACE ("parsing FAIL: %s - %s",
7978 operand_mismatch_kind_names[inst.parsing_error.kind],
7979 inst.parsing_error.error);
7980 /* Record the operand error properly; this is useful when there
7981 are multiple instruction templates for a mnemonic name, so that
7982 later on, we can select the error that most closely describes
7983 the problem. */
7984 record_operand_error_info (opcode, &inst.parsing_error);
7985 return false;
7986 }
7987 else
7988 {
7989 DEBUG_TRACE ("parsing SUCCESS");
7990 return true;
7991 }
7992 }
7993
7994 /* It does some fix-up to provide some programmer friendly feature while
7995 keeping the libopcodes happy, i.e. libopcodes only accepts
7996 the preferred architectural syntax.
7997 Return FALSE if there is any failure; otherwise return TRUE. */
7998
7999 static bool
8000 programmer_friendly_fixup (aarch64_instruction *instr)
8001 {
8002 aarch64_inst *base = &instr->base;
8003 const aarch64_opcode *opcode = base->opcode;
8004 enum aarch64_op op = opcode->op;
8005 aarch64_opnd_info *operands = base->operands;
8006
8007 DEBUG_TRACE ("enter");
8008
8009 switch (opcode->iclass)
8010 {
8011 case testbranch:
8012 /* TBNZ Xn|Wn, #uimm6, label
8013 Test and Branch Not Zero: conditionally jumps to label if bit number
8014 uimm6 in register Xn is not zero. The bit number implies the width of
8015 the register, which may be written and should be disassembled as Wn if
8016 uimm is less than 32. */
8017 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
8018 {
8019 if (operands[1].imm.value >= 32)
8020 {
8021 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
8022 0, 31);
8023 return false;
8024 }
8025 operands[0].qualifier = AARCH64_OPND_QLF_X;
8026 }
8027 break;
8028 case loadlit:
8029 /* LDR Wt, label | =value
8030 As a convenience assemblers will typically permit the notation
8031 "=value" in conjunction with the pc-relative literal load instructions
8032 to automatically place an immediate value or symbolic address in a
8033 nearby literal pool and generate a hidden label which references it.
8034 ISREG has been set to 0 in the case of =value. */
8035 if (instr->gen_lit_pool
8036 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8037 {
8038 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8039 if (op == OP_LDRSW_LIT)
8040 size = 4;
8041 if (instr->reloc.exp.X_op != O_constant
8042 && instr->reloc.exp.X_op != O_big
8043 && instr->reloc.exp.X_op != O_symbol)
8044 {
8045 record_operand_error (opcode, 1,
8046 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8047 _("constant expression expected"));
8048 return false;
8049 }
8050 if (! add_to_lit_pool (&instr->reloc.exp, size))
8051 {
8052 record_operand_error (opcode, 1,
8053 AARCH64_OPDE_OTHER_ERROR,
8054 _("literal pool insertion failed"));
8055 return false;
8056 }
8057 }
8058 break;
8059 case log_shift:
8060 case bitfield:
8061 /* UXT[BHW] Wd, Wn
8062 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8063 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8064 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8065 A programmer-friendly assembler should accept a destination Xd in
8066 place of Wd, however that is not the preferred form for disassembly.
8067 */
8068 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8069 && operands[1].qualifier == AARCH64_OPND_QLF_W
8070 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8071 operands[0].qualifier = AARCH64_OPND_QLF_W;
8072 break;
8073
8074 case addsub_ext:
8075 {
8076 /* In the 64-bit form, the final register operand is written as Wm
8077 for all but the (possibly omitted) UXTX/LSL and SXTX
8078 operators.
8079 As a programmer-friendly assembler, we accept e.g.
8080 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8081 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8082 int idx = aarch64_operand_index (opcode->operands,
8083 AARCH64_OPND_Rm_EXT);
8084 gas_assert (idx == 1 || idx == 2);
8085 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8086 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8087 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8088 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8089 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8090 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8091 }
8092 break;
8093
8094 default:
8095 break;
8096 }
8097
8098 DEBUG_TRACE ("exit with SUCCESS");
8099 return true;
8100 }
8101
8102 /* Check for loads and stores that will cause unpredictable behavior. */
8103
8104 static void
8105 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8106 {
8107 aarch64_inst *base = &instr->base;
8108 const aarch64_opcode *opcode = base->opcode;
8109 const aarch64_opnd_info *opnds = base->operands;
8110 switch (opcode->iclass)
8111 {
8112 case ldst_pos:
8113 case ldst_imm9:
8114 case ldst_imm10:
8115 case ldst_unscaled:
8116 case ldst_unpriv:
8117 /* Loading/storing the base register is unpredictable if writeback. */
8118 if ((aarch64_get_operand_class (opnds[0].type)
8119 == AARCH64_OPND_CLASS_INT_REG)
8120 && opnds[0].reg.regno == opnds[1].addr.base_regno
8121 && opnds[1].addr.base_regno != REG_SP
8122 /* Exempt STG/STZG/ST2G/STZ2G. */
8123 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8124 && opnds[1].addr.writeback)
8125 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8126 break;
8127
8128 case ldstpair_off:
8129 case ldstnapair_offs:
8130 case ldstpair_indexed:
8131 /* Loading/storing the base register is unpredictable if writeback. */
8132 if ((aarch64_get_operand_class (opnds[0].type)
8133 == AARCH64_OPND_CLASS_INT_REG)
8134 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8135 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8136 && opnds[2].addr.base_regno != REG_SP
8137 /* Exempt STGP. */
8138 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8139 && opnds[2].addr.writeback)
8140 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8141 /* Load operations must load different registers. */
8142 if ((opcode->opcode & (1 << 22))
8143 && opnds[0].reg.regno == opnds[1].reg.regno)
8144 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8145 break;
8146
8147 case ldstexcl:
8148 if ((aarch64_get_operand_class (opnds[0].type)
8149 == AARCH64_OPND_CLASS_INT_REG)
8150 && (aarch64_get_operand_class (opnds[1].type)
8151 == AARCH64_OPND_CLASS_INT_REG))
8152 {
8153 if ((opcode->opcode & (1 << 22)))
8154 {
8155 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8156 if ((opcode->opcode & (1 << 21))
8157 && opnds[0].reg.regno == opnds[1].reg.regno)
8158 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8159 }
8160 else
8161 {
8162 /* Store-Exclusive is unpredictable if Rt == Rs. */
8163 if (opnds[0].reg.regno == opnds[1].reg.regno)
8164 as_warn
8165 (_("unpredictable: identical transfer and status registers"
8166 " --`%s'"),str);
8167
8168 if (opnds[0].reg.regno == opnds[2].reg.regno)
8169 {
8170 if (!(opcode->opcode & (1 << 21)))
8171 /* Store-Exclusive is unpredictable if Rn == Rs. */
8172 as_warn
8173 (_("unpredictable: identical base and status registers"
8174 " --`%s'"),str);
8175 else
8176 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8177 as_warn
8178 (_("unpredictable: "
8179 "identical transfer and status registers"
8180 " --`%s'"),str);
8181 }
8182
8183 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8184 if ((opcode->opcode & (1 << 21))
8185 && opnds[0].reg.regno == opnds[3].reg.regno
8186 && opnds[3].reg.regno != REG_SP)
8187 as_warn (_("unpredictable: identical base and status registers"
8188 " --`%s'"),str);
8189 }
8190 }
8191 break;
8192
8193 default:
8194 break;
8195 }
8196 }
8197
8198 static void
8199 force_automatic_sequence_close (void)
8200 {
8201 struct aarch64_segment_info_type *tc_seg_info;
8202
8203 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8204 if (tc_seg_info->insn_sequence.instr)
8205 {
8206 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8207 _("previous `%s' sequence has not been closed"),
8208 tc_seg_info->insn_sequence.instr->opcode->name);
8209 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8210 }
8211 }
8212
8213 /* A wrapper function to interface with libopcodes on encoding and
8214 record the error message if there is any.
8215
8216 Return TRUE on success; otherwise return FALSE. */
8217
8218 static bool
8219 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8220 aarch64_insn *code)
8221 {
8222 aarch64_operand_error error_info;
8223 memset (&error_info, '\0', sizeof (error_info));
8224 error_info.kind = AARCH64_OPDE_NIL;
8225 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8226 && !error_info.non_fatal)
8227 return true;
8228
8229 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8230 record_operand_error_info (opcode, &error_info);
8231 return error_info.non_fatal;
8232 }
8233
8234 #ifdef DEBUG_AARCH64
8235 static inline void
8236 dump_opcode_operands (const aarch64_opcode *opcode)
8237 {
8238 int i = 0;
8239 while (opcode->operands[i] != AARCH64_OPND_NIL)
8240 {
8241 aarch64_verbose ("\t\t opnd%d: %s", i,
8242 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8243 ? aarch64_get_operand_name (opcode->operands[i])
8244 : aarch64_get_operand_desc (opcode->operands[i]));
8245 ++i;
8246 }
8247 }
8248 #endif /* DEBUG_AARCH64 */
8249
8250 /* This is the guts of the machine-dependent assembler. STR points to a
8251 machine dependent instruction. This function is supposed to emit
8252 the frags/bytes it assembles to. */
8253
8254 void
8255 md_assemble (char *str)
8256 {
8257 templates *template;
8258 const aarch64_opcode *opcode;
8259 struct aarch64_segment_info_type *tc_seg_info;
8260 aarch64_inst *inst_base;
8261 unsigned saved_cond;
8262
8263 /* Align the previous label if needed. */
8264 if (last_label_seen != NULL)
8265 {
8266 symbol_set_frag (last_label_seen, frag_now);
8267 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8268 S_SET_SEGMENT (last_label_seen, now_seg);
8269 }
8270
8271 /* Update the current insn_sequence from the segment. */
8272 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8273 insn_sequence = &tc_seg_info->insn_sequence;
8274 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8275
8276 inst.reloc.type = BFD_RELOC_UNUSED;
8277
8278 DEBUG_TRACE ("\n\n");
8279 DEBUG_TRACE ("==============================");
8280 DEBUG_TRACE ("Enter md_assemble with %s", str);
8281
8282 /* Scan up to the end of the mnemonic, which must end in whitespace,
8283 '.', or end of string. */
8284 char *p = str;
8285 char *dot = 0;
8286 for (; is_part_of_name (*p); p++)
8287 if (*p == '.' && !dot)
8288 dot = p;
8289
8290 if (p == str)
8291 {
8292 as_bad (_("unknown mnemonic -- `%s'"), str);
8293 return;
8294 }
8295
8296 if (!dot && create_register_alias (str, p))
8297 return;
8298
8299 template = opcode_lookup (str, dot, p);
8300 if (!template)
8301 {
8302 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8303 str);
8304 return;
8305 }
8306
8307 skip_whitespace (p);
8308 if (*p == ',')
8309 {
8310 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8311 get_mnemonic_name (str), str);
8312 return;
8313 }
8314
8315 init_operand_error_report ();
8316
8317 /* Sections are assumed to start aligned. In executable section, there is no
8318 MAP_DATA symbol pending. So we only align the address during
8319 MAP_DATA --> MAP_INSN transition.
8320 For other sections, this is not guaranteed. */
8321 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8322 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8323 frag_align_code (2, 0);
8324
8325 saved_cond = inst.cond;
8326 reset_aarch64_instruction (&inst);
8327 inst.cond = saved_cond;
8328
8329 /* Iterate through all opcode entries with the same mnemonic name. */
8330 do
8331 {
8332 opcode = template->opcode;
8333
8334 DEBUG_TRACE ("opcode %s found", opcode->name);
8335 #ifdef DEBUG_AARCH64
8336 if (debug_dump)
8337 dump_opcode_operands (opcode);
8338 #endif /* DEBUG_AARCH64 */
8339
8340 mapping_state (MAP_INSN);
8341
8342 inst_base = &inst.base;
8343 inst_base->opcode = opcode;
8344
8345 /* Truly conditionally executed instructions, e.g. b.cond. */
8346 if (opcode->flags & F_COND)
8347 {
8348 gas_assert (inst.cond != COND_ALWAYS);
8349 inst_base->cond = get_cond_from_value (inst.cond);
8350 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8351 }
8352 else if (inst.cond != COND_ALWAYS)
8353 {
8354 /* It shouldn't arrive here, where the assembly looks like a
8355 conditional instruction but the found opcode is unconditional. */
8356 gas_assert (0);
8357 continue;
8358 }
8359
8360 if (parse_operands (p, opcode)
8361 && programmer_friendly_fixup (&inst)
8362 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8363 {
8364 /* Check that this instruction is supported for this CPU. */
8365 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8366 {
8367 as_bad (_("selected processor does not support `%s'"), str);
8368 return;
8369 }
8370
8371 warn_unpredictable_ldst (&inst, str);
8372
8373 if (inst.reloc.type == BFD_RELOC_UNUSED
8374 || !inst.reloc.need_libopcodes_p)
8375 output_inst (NULL);
8376 else
8377 {
8378 /* If there is relocation generated for the instruction,
8379 store the instruction information for the future fix-up. */
8380 struct aarch64_inst *copy;
8381 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8382 copy = XNEW (struct aarch64_inst);
8383 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8384 output_inst (copy);
8385 }
8386
8387 /* Issue non-fatal messages if any. */
8388 output_operand_error_report (str, true);
8389 return;
8390 }
8391
8392 template = template->next;
8393 if (template != NULL)
8394 {
8395 reset_aarch64_instruction (&inst);
8396 inst.cond = saved_cond;
8397 }
8398 }
8399 while (template != NULL);
8400
8401 /* Issue the error messages if any. */
8402 output_operand_error_report (str, false);
8403 }
8404
8405 /* Various frobbings of labels and their addresses. */
8406
8407 void
8408 aarch64_start_line_hook (void)
8409 {
8410 last_label_seen = NULL;
8411 }
8412
8413 void
8414 aarch64_frob_label (symbolS * sym)
8415 {
8416 last_label_seen = sym;
8417
8418 dwarf2_emit_label (sym);
8419 }
8420
8421 void
8422 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8423 {
8424 /* Check to see if we have a block to close. */
8425 force_automatic_sequence_close ();
8426 }
8427
8428 int
8429 aarch64_data_in_code (void)
8430 {
8431 if (startswith (input_line_pointer + 1, "data:"))
8432 {
8433 *input_line_pointer = '/';
8434 input_line_pointer += 5;
8435 *input_line_pointer = 0;
8436 return 1;
8437 }
8438
8439 return 0;
8440 }
8441
8442 char *
8443 aarch64_canonicalize_symbol_name (char *name)
8444 {
8445 int len;
8446
8447 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8448 *(name + len - 5) = 0;
8449
8450 return name;
8451 }
8452 \f
8453 /* Table of all register names defined by default. The user can
8454 define additional names with .req. Note that all register names
8455 should appear in both upper and lowercase variants. Some registers
8456 also have mixed-case names. */
8457
8458 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8459 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8460 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8461 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8462 #define REGSET16(p,t) \
8463 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8464 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8465 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8466 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8467 #define REGSET16S(p,s,t) \
8468 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8469 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8470 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8471 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8472 #define REGSET31(p,t) \
8473 REGSET16(p, t), \
8474 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8475 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8476 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8477 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8478 #define REGSET(p,t) \
8479 REGSET31(p,t), REGNUM(p,31,t)
8480
8481 /* These go into aarch64_reg_hsh hash-table. */
8482 static const reg_entry reg_names[] = {
8483 /* Integer registers. */
8484 REGSET31 (x, R_64), REGSET31 (X, R_64),
8485 REGSET31 (w, R_32), REGSET31 (W, R_32),
8486
8487 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8488 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8489 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8490 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8491 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8492 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8493
8494 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8495 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8496
8497 /* Floating-point single precision registers. */
8498 REGSET (s, FP_S), REGSET (S, FP_S),
8499
8500 /* Floating-point double precision registers. */
8501 REGSET (d, FP_D), REGSET (D, FP_D),
8502
8503 /* Floating-point half precision registers. */
8504 REGSET (h, FP_H), REGSET (H, FP_H),
8505
8506 /* Floating-point byte precision registers. */
8507 REGSET (b, FP_B), REGSET (B, FP_B),
8508
8509 /* Floating-point quad precision registers. */
8510 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8511
8512 /* FP/SIMD registers. */
8513 REGSET (v, V), REGSET (V, V),
8514
8515 /* SVE vector registers. */
8516 REGSET (z, Z), REGSET (Z, Z),
8517
8518 /* SVE predicate(-as-mask) registers. */
8519 REGSET16 (p, P), REGSET16 (P, P),
8520
8521 /* SVE predicate-as-counter registers. */
8522 REGSET16 (pn, PN), REGSET16 (PN, PN),
8523
8524 /* SME ZA. We model this as a register because it acts syntactically
8525 like ZA0H, supporting qualifier suffixes and indexing. */
8526 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8527
8528 /* SME ZA tile registers. */
8529 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8530
8531 /* SME ZA tile registers (horizontal slice). */
8532 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8533
8534 /* SME ZA tile registers (vertical slice). */
8535 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8536
8537 /* SME2 ZT0. */
8538 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8539 };
8540
8541 #undef REGDEF
8542 #undef REGDEF_ALIAS
8543 #undef REGNUM
8544 #undef REGSET16
8545 #undef REGSET31
8546 #undef REGSET
8547
8548 #define N 1
8549 #define n 0
8550 #define Z 1
8551 #define z 0
8552 #define C 1
8553 #define c 0
8554 #define V 1
8555 #define v 0
8556 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8557 static const asm_nzcv nzcv_names[] = {
8558 {"nzcv", B (n, z, c, v)},
8559 {"nzcV", B (n, z, c, V)},
8560 {"nzCv", B (n, z, C, v)},
8561 {"nzCV", B (n, z, C, V)},
8562 {"nZcv", B (n, Z, c, v)},
8563 {"nZcV", B (n, Z, c, V)},
8564 {"nZCv", B (n, Z, C, v)},
8565 {"nZCV", B (n, Z, C, V)},
8566 {"Nzcv", B (N, z, c, v)},
8567 {"NzcV", B (N, z, c, V)},
8568 {"NzCv", B (N, z, C, v)},
8569 {"NzCV", B (N, z, C, V)},
8570 {"NZcv", B (N, Z, c, v)},
8571 {"NZcV", B (N, Z, c, V)},
8572 {"NZCv", B (N, Z, C, v)},
8573 {"NZCV", B (N, Z, C, V)}
8574 };
8575
8576 #undef N
8577 #undef n
8578 #undef Z
8579 #undef z
8580 #undef C
8581 #undef c
8582 #undef V
8583 #undef v
8584 #undef B
8585 \f
8586 /* MD interface: bits in the object file. */
8587
8588 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8589 for use in the a.out file, and stores them in the array pointed to by buf.
8590 This knows about the endian-ness of the target machine and does
8591 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8592 2 (short) and 4 (long) Floating numbers are put out as a series of
8593 LITTLENUMS (shorts, here at least). */
8594
8595 void
8596 md_number_to_chars (char *buf, valueT val, int n)
8597 {
8598 if (target_big_endian)
8599 number_to_chars_bigendian (buf, val, n);
8600 else
8601 number_to_chars_littleendian (buf, val, n);
8602 }
8603
8604 /* MD interface: Sections. */
8605
8606 /* Estimate the size of a frag before relaxing. Assume everything fits in
8607 4 bytes. */
8608
8609 int
8610 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8611 {
8612 fragp->fr_var = 4;
8613 return 4;
8614 }
8615
8616 /* Round up a section size to the appropriate boundary. */
8617
8618 valueT
8619 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8620 {
8621 return size;
8622 }
8623
8624 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8625 of an rs_align_code fragment.
8626
8627 Here we fill the frag with the appropriate info for padding the
8628 output stream. The resulting frag will consist of a fixed (fr_fix)
8629 and of a repeating (fr_var) part.
8630
8631 The fixed content is always emitted before the repeating content and
8632 these two parts are used as follows in constructing the output:
8633 - the fixed part will be used to align to a valid instruction word
8634 boundary, in case that we start at a misaligned address; as no
8635 executable instruction can live at the misaligned location, we
8636 simply fill with zeros;
8637 - the variable part will be used to cover the remaining padding and
8638 we fill using the AArch64 NOP instruction.
8639
8640 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8641 enough storage space for up to 3 bytes for padding the back to a valid
8642 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8643
8644 void
8645 aarch64_handle_align (fragS * fragP)
8646 {
8647 /* NOP = d503201f */
8648 /* AArch64 instructions are always little-endian. */
8649 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8650
8651 int bytes, fix, noop_size;
8652 char *p;
8653
8654 if (fragP->fr_type != rs_align_code)
8655 return;
8656
8657 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8658 p = fragP->fr_literal + fragP->fr_fix;
8659
8660 #ifdef OBJ_ELF
8661 gas_assert (fragP->tc_frag_data.recorded);
8662 #endif
8663
8664 noop_size = sizeof (aarch64_noop);
8665
8666 fix = bytes & (noop_size - 1);
8667 if (fix)
8668 {
8669 #if defined OBJ_ELF || defined OBJ_COFF
8670 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8671 #endif
8672 memset (p, 0, fix);
8673 p += fix;
8674 fragP->fr_fix += fix;
8675 }
8676
8677 if (noop_size)
8678 memcpy (p, aarch64_noop, noop_size);
8679 fragP->fr_var = noop_size;
8680 }
8681
8682 /* Perform target specific initialisation of a frag.
8683 Note - despite the name this initialisation is not done when the frag
8684 is created, but only when its type is assigned. A frag can be created
8685 and used a long time before its type is set, so beware of assuming that
8686 this initialisation is performed first. */
8687
8688 #ifndef OBJ_ELF
8689 void
8690 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8691 int max_chars ATTRIBUTE_UNUSED)
8692 {
8693 }
8694
8695 #else /* OBJ_ELF is defined. */
8696 void
8697 aarch64_init_frag (fragS * fragP, int max_chars)
8698 {
8699 /* Record a mapping symbol for alignment frags. We will delete this
8700 later if the alignment ends up empty. */
8701 if (!fragP->tc_frag_data.recorded)
8702 fragP->tc_frag_data.recorded = 1;
8703
8704 /* PR 21809: Do not set a mapping state for debug sections
8705 - it just confuses other tools. */
8706 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8707 return;
8708
8709 switch (fragP->fr_type)
8710 {
8711 case rs_align_test:
8712 case rs_fill:
8713 mapping_state_2 (MAP_DATA, max_chars);
8714 break;
8715 case rs_align:
8716 /* PR 20364: We can get alignment frags in code sections,
8717 so do not just assume that we should use the MAP_DATA state. */
8718 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8719 break;
8720 case rs_align_code:
8721 mapping_state_2 (MAP_INSN, max_chars);
8722 break;
8723 default:
8724 break;
8725 }
8726 }
8727
8728 /* Whether SFrame stack trace info is supported. */
8729
8730 bool
8731 aarch64_support_sframe_p (void)
8732 {
8733 /* At this time, SFrame is supported for aarch64 only. */
8734 return (aarch64_abi == AARCH64_ABI_LP64);
8735 }
8736
8737 /* Specify if RA tracking is needed. */
8738
8739 bool
8740 aarch64_sframe_ra_tracking_p (void)
8741 {
8742 return true;
8743 }
8744
8745 /* Specify the fixed offset to recover RA from CFA.
8746 (useful only when RA tracking is not needed). */
8747
8748 offsetT
8749 aarch64_sframe_cfa_ra_offset (void)
8750 {
8751 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8752 }
8753
8754 /* Get the abi/arch indentifier for SFrame. */
8755
8756 unsigned char
8757 aarch64_sframe_get_abi_arch (void)
8758 {
8759 unsigned char sframe_abi_arch = 0;
8760
8761 if (aarch64_support_sframe_p ())
8762 {
8763 sframe_abi_arch = target_big_endian
8764 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8765 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8766 }
8767
8768 return sframe_abi_arch;
8769 }
8770
8771 #endif /* OBJ_ELF */
8772 \f
8773 /* Initialize the DWARF-2 unwind information for this procedure. */
8774
8775 void
8776 tc_aarch64_frame_initial_instructions (void)
8777 {
8778 cfi_add_CFA_def_cfa (REG_SP, 0);
8779 }
8780
8781 /* Convert REGNAME to a DWARF-2 register number. */
8782
8783 int
8784 tc_aarch64_regname_to_dw2regnum (char *regname)
8785 {
8786 const reg_entry *reg = parse_reg (&regname);
8787 if (reg == NULL)
8788 return -1;
8789
8790 switch (reg->type)
8791 {
8792 case REG_TYPE_SP_32:
8793 case REG_TYPE_SP_64:
8794 case REG_TYPE_R_32:
8795 case REG_TYPE_R_64:
8796 return reg->number;
8797
8798 case REG_TYPE_FP_B:
8799 case REG_TYPE_FP_H:
8800 case REG_TYPE_FP_S:
8801 case REG_TYPE_FP_D:
8802 case REG_TYPE_FP_Q:
8803 return reg->number + 64;
8804
8805 default:
8806 break;
8807 }
8808 return -1;
8809 }
8810
8811 /* Implement DWARF2_ADDR_SIZE. */
8812
8813 int
8814 aarch64_dwarf2_addr_size (void)
8815 {
8816 if (ilp32_p)
8817 return 4;
8818 else if (llp64_p)
8819 return 8;
8820 return bfd_arch_bits_per_address (stdoutput) / 8;
8821 }
8822
8823 /* MD interface: Symbol and relocation handling. */
8824
8825 /* Return the address within the segment that a PC-relative fixup is
8826 relative to. For AArch64 PC-relative fixups applied to instructions
8827 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8828
8829 long
8830 md_pcrel_from_section (fixS * fixP, segT seg)
8831 {
8832 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8833
8834 /* If this is pc-relative and we are going to emit a relocation
8835 then we just want to put out any pipeline compensation that the linker
8836 will need. Otherwise we want to use the calculated base. */
8837 if (fixP->fx_pcrel
8838 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8839 || aarch64_force_relocation (fixP)))
8840 base = 0;
8841
8842 /* AArch64 should be consistent for all pc-relative relocations. */
8843 return base + AARCH64_PCREL_OFFSET;
8844 }
8845
8846 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8847 Otherwise we have no need to default values of symbols. */
8848
8849 symbolS *
8850 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8851 {
8852 #ifdef OBJ_ELF
8853 if (name[0] == '_' && name[1] == 'G'
8854 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8855 {
8856 if (!GOT_symbol)
8857 {
8858 if (symbol_find (name))
8859 as_bad (_("GOT already in the symbol table"));
8860
8861 GOT_symbol = symbol_new (name, undefined_section,
8862 &zero_address_frag, 0);
8863 }
8864
8865 return GOT_symbol;
8866 }
8867 #endif
8868
8869 return 0;
8870 }
8871
8872 /* Return non-zero if the indicated VALUE has overflowed the maximum
8873 range expressible by a unsigned number with the indicated number of
8874 BITS. */
8875
8876 static bool
8877 unsigned_overflow (valueT value, unsigned bits)
8878 {
8879 valueT lim;
8880 if (bits >= sizeof (valueT) * 8)
8881 return false;
8882 lim = (valueT) 1 << bits;
8883 return (value >= lim);
8884 }
8885
8886
8887 /* Return non-zero if the indicated VALUE has overflowed the maximum
8888 range expressible by an signed number with the indicated number of
8889 BITS. */
8890
8891 static bool
8892 signed_overflow (offsetT value, unsigned bits)
8893 {
8894 offsetT lim;
8895 if (bits >= sizeof (offsetT) * 8)
8896 return false;
8897 lim = (offsetT) 1 << (bits - 1);
8898 return (value < -lim || value >= lim);
8899 }
8900
8901 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8902 unsigned immediate offset load/store instruction, try to encode it as
8903 an unscaled, 9-bit, signed immediate offset load/store instruction.
8904 Return TRUE if it is successful; otherwise return FALSE.
8905
8906 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8907 in response to the standard LDR/STR mnemonics when the immediate offset is
8908 unambiguous, i.e. when it is negative or unaligned. */
8909
8910 static bool
8911 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8912 {
8913 int idx;
8914 enum aarch64_op new_op;
8915 const aarch64_opcode *new_opcode;
8916
8917 gas_assert (instr->opcode->iclass == ldst_pos);
8918
8919 switch (instr->opcode->op)
8920 {
8921 case OP_LDRB_POS:new_op = OP_LDURB; break;
8922 case OP_STRB_POS: new_op = OP_STURB; break;
8923 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8924 case OP_LDRH_POS: new_op = OP_LDURH; break;
8925 case OP_STRH_POS: new_op = OP_STURH; break;
8926 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8927 case OP_LDR_POS: new_op = OP_LDUR; break;
8928 case OP_STR_POS: new_op = OP_STUR; break;
8929 case OP_LDRF_POS: new_op = OP_LDURV; break;
8930 case OP_STRF_POS: new_op = OP_STURV; break;
8931 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8932 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8933 default: new_op = OP_NIL; break;
8934 }
8935
8936 if (new_op == OP_NIL)
8937 return false;
8938
8939 new_opcode = aarch64_get_opcode (new_op);
8940 gas_assert (new_opcode != NULL);
8941
8942 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8943 instr->opcode->op, new_opcode->op);
8944
8945 aarch64_replace_opcode (instr, new_opcode);
8946
8947 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8948 qualifier matching may fail because the out-of-date qualifier will
8949 prevent the operand being updated with a new and correct qualifier. */
8950 idx = aarch64_operand_index (instr->opcode->operands,
8951 AARCH64_OPND_ADDR_SIMM9);
8952 gas_assert (idx == 1);
8953 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8954
8955 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8956
8957 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8958 insn_sequence))
8959 return false;
8960
8961 return true;
8962 }
8963
8964 /* Called by fix_insn to fix a MOV immediate alias instruction.
8965
8966 Operand for a generic move immediate instruction, which is an alias
8967 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8968 a 32-bit/64-bit immediate value into general register. An assembler error
8969 shall result if the immediate cannot be created by a single one of these
8970 instructions. If there is a choice, then to ensure reversability an
8971 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8972
8973 static void
8974 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8975 {
8976 const aarch64_opcode *opcode;
8977
8978 /* Need to check if the destination is SP/ZR. The check has to be done
8979 before any aarch64_replace_opcode. */
8980 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8981 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8982
8983 instr->operands[1].imm.value = value;
8984 instr->operands[1].skip = 0;
8985
8986 if (try_mov_wide_p)
8987 {
8988 /* Try the MOVZ alias. */
8989 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8990 aarch64_replace_opcode (instr, opcode);
8991 if (aarch64_opcode_encode (instr->opcode, instr,
8992 &instr->value, NULL, NULL, insn_sequence))
8993 {
8994 put_aarch64_insn (buf, instr->value);
8995 return;
8996 }
8997 /* Try the MOVK alias. */
8998 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8999 aarch64_replace_opcode (instr, opcode);
9000 if (aarch64_opcode_encode (instr->opcode, instr,
9001 &instr->value, NULL, NULL, insn_sequence))
9002 {
9003 put_aarch64_insn (buf, instr->value);
9004 return;
9005 }
9006 }
9007
9008 if (try_mov_bitmask_p)
9009 {
9010 /* Try the ORR alias. */
9011 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
9012 aarch64_replace_opcode (instr, opcode);
9013 if (aarch64_opcode_encode (instr->opcode, instr,
9014 &instr->value, NULL, NULL, insn_sequence))
9015 {
9016 put_aarch64_insn (buf, instr->value);
9017 return;
9018 }
9019 }
9020
9021 as_bad_where (fixP->fx_file, fixP->fx_line,
9022 _("immediate cannot be moved by a single instruction"));
9023 }
9024
9025 /* An instruction operand which is immediate related may have symbol used
9026 in the assembly, e.g.
9027
9028 mov w0, u32
9029 .set u32, 0x00ffff00
9030
9031 At the time when the assembly instruction is parsed, a referenced symbol,
9032 like 'u32' in the above example may not have been seen; a fixS is created
9033 in such a case and is handled here after symbols have been resolved.
9034 Instruction is fixed up with VALUE using the information in *FIXP plus
9035 extra information in FLAGS.
9036
9037 This function is called by md_apply_fix to fix up instructions that need
9038 a fix-up described above but does not involve any linker-time relocation. */
9039
9040 static void
9041 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9042 {
9043 int idx;
9044 uint32_t insn;
9045 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9046 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9047 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9048
9049 if (new_inst)
9050 {
9051 /* Now the instruction is about to be fixed-up, so the operand that
9052 was previously marked as 'ignored' needs to be unmarked in order
9053 to get the encoding done properly. */
9054 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9055 new_inst->operands[idx].skip = 0;
9056 }
9057
9058 gas_assert (opnd != AARCH64_OPND_NIL);
9059
9060 switch (opnd)
9061 {
9062 case AARCH64_OPND_EXCEPTION:
9063 case AARCH64_OPND_UNDEFINED:
9064 if (unsigned_overflow (value, 16))
9065 as_bad_where (fixP->fx_file, fixP->fx_line,
9066 _("immediate out of range"));
9067 insn = get_aarch64_insn (buf);
9068 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9069 put_aarch64_insn (buf, insn);
9070 break;
9071
9072 case AARCH64_OPND_AIMM:
9073 /* ADD or SUB with immediate.
9074 NOTE this assumes we come here with a add/sub shifted reg encoding
9075 3 322|2222|2 2 2 21111 111111
9076 1 098|7654|3 2 1 09876 543210 98765 43210
9077 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9078 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9079 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9080 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9081 ->
9082 3 322|2222|2 2 221111111111
9083 1 098|7654|3 2 109876543210 98765 43210
9084 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9085 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9086 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9087 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9088 Fields sf Rn Rd are already set. */
9089 insn = get_aarch64_insn (buf);
9090 if (value < 0)
9091 {
9092 /* Add <-> sub. */
9093 insn = reencode_addsub_switch_add_sub (insn);
9094 value = -value;
9095 }
9096
9097 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9098 && unsigned_overflow (value, 12))
9099 {
9100 /* Try to shift the value by 12 to make it fit. */
9101 if (((value >> 12) << 12) == value
9102 && ! unsigned_overflow (value, 12 + 12))
9103 {
9104 value >>= 12;
9105 insn |= encode_addsub_imm_shift_amount (1);
9106 }
9107 }
9108
9109 if (unsigned_overflow (value, 12))
9110 as_bad_where (fixP->fx_file, fixP->fx_line,
9111 _("immediate out of range"));
9112
9113 insn |= encode_addsub_imm (value);
9114
9115 put_aarch64_insn (buf, insn);
9116 break;
9117
9118 case AARCH64_OPND_SIMD_IMM:
9119 case AARCH64_OPND_SIMD_IMM_SFT:
9120 case AARCH64_OPND_LIMM:
9121 /* Bit mask immediate. */
9122 gas_assert (new_inst != NULL);
9123 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9124 new_inst->operands[idx].imm.value = value;
9125 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9126 &new_inst->value, NULL, NULL, insn_sequence))
9127 put_aarch64_insn (buf, new_inst->value);
9128 else
9129 as_bad_where (fixP->fx_file, fixP->fx_line,
9130 _("invalid immediate"));
9131 break;
9132
9133 case AARCH64_OPND_HALF:
9134 /* 16-bit unsigned immediate. */
9135 if (unsigned_overflow (value, 16))
9136 as_bad_where (fixP->fx_file, fixP->fx_line,
9137 _("immediate out of range"));
9138 insn = get_aarch64_insn (buf);
9139 insn |= encode_movw_imm (value & 0xffff);
9140 put_aarch64_insn (buf, insn);
9141 break;
9142
9143 case AARCH64_OPND_IMM_MOV:
9144 /* Operand for a generic move immediate instruction, which is
9145 an alias instruction that generates a single MOVZ, MOVN or ORR
9146 instruction to loads a 32-bit/64-bit immediate value into general
9147 register. An assembler error shall result if the immediate cannot be
9148 created by a single one of these instructions. If there is a choice,
9149 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9150 and MOVZ or MOVN to ORR. */
9151 gas_assert (new_inst != NULL);
9152 fix_mov_imm_insn (fixP, buf, new_inst, value);
9153 break;
9154
9155 case AARCH64_OPND_ADDR_SIMM7:
9156 case AARCH64_OPND_ADDR_SIMM9:
9157 case AARCH64_OPND_ADDR_SIMM9_2:
9158 case AARCH64_OPND_ADDR_SIMM10:
9159 case AARCH64_OPND_ADDR_UIMM12:
9160 case AARCH64_OPND_ADDR_SIMM11:
9161 case AARCH64_OPND_ADDR_SIMM13:
9162 /* Immediate offset in an address. */
9163 insn = get_aarch64_insn (buf);
9164
9165 gas_assert (new_inst != NULL && new_inst->value == insn);
9166 gas_assert (new_inst->opcode->operands[1] == opnd
9167 || new_inst->opcode->operands[2] == opnd);
9168
9169 /* Get the index of the address operand. */
9170 if (new_inst->opcode->operands[1] == opnd)
9171 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9172 idx = 1;
9173 else
9174 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9175 idx = 2;
9176
9177 /* Update the resolved offset value. */
9178 new_inst->operands[idx].addr.offset.imm = value;
9179
9180 /* Encode/fix-up. */
9181 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9182 &new_inst->value, NULL, NULL, insn_sequence))
9183 {
9184 put_aarch64_insn (buf, new_inst->value);
9185 break;
9186 }
9187 else if (new_inst->opcode->iclass == ldst_pos
9188 && try_to_encode_as_unscaled_ldst (new_inst))
9189 {
9190 put_aarch64_insn (buf, new_inst->value);
9191 break;
9192 }
9193
9194 as_bad_where (fixP->fx_file, fixP->fx_line,
9195 _("immediate offset out of range"));
9196 break;
9197
9198 default:
9199 gas_assert (0);
9200 as_fatal (_("unhandled operand code %d"), opnd);
9201 }
9202 }
9203
9204 /* Apply a fixup (fixP) to segment data, once it has been determined
9205 by our caller that we have all the info we need to fix it up.
9206
9207 Parameter valP is the pointer to the value of the bits. */
9208
9209 void
9210 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9211 {
9212 offsetT value = *valP;
9213 uint32_t insn;
9214 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9215 int scale;
9216 unsigned flags = fixP->fx_addnumber;
9217
9218 DEBUG_TRACE ("\n\n");
9219 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9220 DEBUG_TRACE ("Enter md_apply_fix");
9221
9222 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9223
9224 /* Note whether this will delete the relocation. */
9225
9226 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9227 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9228 fixP->fx_done = 1;
9229
9230 /* Process the relocations. */
9231 switch (fixP->fx_r_type)
9232 {
9233 case BFD_RELOC_NONE:
9234 /* This will need to go in the object file. */
9235 fixP->fx_done = 0;
9236 break;
9237
9238 case BFD_RELOC_8:
9239 case BFD_RELOC_8_PCREL:
9240 if (fixP->fx_done || !seg->use_rela_p)
9241 md_number_to_chars (buf, value, 1);
9242 break;
9243
9244 case BFD_RELOC_16:
9245 case BFD_RELOC_16_PCREL:
9246 if (fixP->fx_done || !seg->use_rela_p)
9247 md_number_to_chars (buf, value, 2);
9248 break;
9249
9250 case BFD_RELOC_32:
9251 case BFD_RELOC_32_PCREL:
9252 if (fixP->fx_done || !seg->use_rela_p)
9253 md_number_to_chars (buf, value, 4);
9254 break;
9255
9256 case BFD_RELOC_64:
9257 case BFD_RELOC_64_PCREL:
9258 if (fixP->fx_done || !seg->use_rela_p)
9259 md_number_to_chars (buf, value, 8);
9260 break;
9261
9262 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9263 /* We claim that these fixups have been processed here, even if
9264 in fact we generate an error because we do not have a reloc
9265 for them, so tc_gen_reloc() will reject them. */
9266 fixP->fx_done = 1;
9267 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9268 {
9269 as_bad_where (fixP->fx_file, fixP->fx_line,
9270 _("undefined symbol %s used as an immediate value"),
9271 S_GET_NAME (fixP->fx_addsy));
9272 goto apply_fix_return;
9273 }
9274 fix_insn (fixP, flags, value);
9275 break;
9276
9277 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9278 if (fixP->fx_done || !seg->use_rela_p)
9279 {
9280 if (value & 3)
9281 as_bad_where (fixP->fx_file, fixP->fx_line,
9282 _("pc-relative load offset not word aligned"));
9283 if (signed_overflow (value, 21))
9284 as_bad_where (fixP->fx_file, fixP->fx_line,
9285 _("pc-relative load offset out of range"));
9286 insn = get_aarch64_insn (buf);
9287 insn |= encode_ld_lit_ofs_19 (value >> 2);
9288 put_aarch64_insn (buf, insn);
9289 }
9290 break;
9291
9292 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9293 if (fixP->fx_done || !seg->use_rela_p)
9294 {
9295 if (signed_overflow (value, 21))
9296 as_bad_where (fixP->fx_file, fixP->fx_line,
9297 _("pc-relative address offset out of range"));
9298 insn = get_aarch64_insn (buf);
9299 insn |= encode_adr_imm (value);
9300 put_aarch64_insn (buf, insn);
9301 }
9302 break;
9303
9304 case BFD_RELOC_AARCH64_BRANCH19:
9305 if (fixP->fx_done || !seg->use_rela_p)
9306 {
9307 if (value & 3)
9308 as_bad_where (fixP->fx_file, fixP->fx_line,
9309 _("conditional branch target not word aligned"));
9310 if (signed_overflow (value, 21))
9311 as_bad_where (fixP->fx_file, fixP->fx_line,
9312 _("conditional branch out of range"));
9313 insn = get_aarch64_insn (buf);
9314 insn |= encode_cond_branch_ofs_19 (value >> 2);
9315 put_aarch64_insn (buf, insn);
9316 }
9317 break;
9318
9319 case BFD_RELOC_AARCH64_TSTBR14:
9320 if (fixP->fx_done || !seg->use_rela_p)
9321 {
9322 if (value & 3)
9323 as_bad_where (fixP->fx_file, fixP->fx_line,
9324 _("conditional branch target not word aligned"));
9325 if (signed_overflow (value, 16))
9326 as_bad_where (fixP->fx_file, fixP->fx_line,
9327 _("conditional branch out of range"));
9328 insn = get_aarch64_insn (buf);
9329 insn |= encode_tst_branch_ofs_14 (value >> 2);
9330 put_aarch64_insn (buf, insn);
9331 }
9332 break;
9333
9334 case BFD_RELOC_AARCH64_CALL26:
9335 case BFD_RELOC_AARCH64_JUMP26:
9336 if (fixP->fx_done || !seg->use_rela_p)
9337 {
9338 if (value & 3)
9339 as_bad_where (fixP->fx_file, fixP->fx_line,
9340 _("branch target not word aligned"));
9341 if (signed_overflow (value, 28))
9342 as_bad_where (fixP->fx_file, fixP->fx_line,
9343 _("branch out of range"));
9344 insn = get_aarch64_insn (buf);
9345 insn |= encode_branch_ofs_26 (value >> 2);
9346 put_aarch64_insn (buf, insn);
9347 }
9348 break;
9349
9350 case BFD_RELOC_AARCH64_MOVW_G0:
9351 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9352 case BFD_RELOC_AARCH64_MOVW_G0_S:
9353 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9354 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9355 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9356 scale = 0;
9357 goto movw_common;
9358 case BFD_RELOC_AARCH64_MOVW_G1:
9359 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9360 case BFD_RELOC_AARCH64_MOVW_G1_S:
9361 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9362 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9363 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9364 scale = 16;
9365 goto movw_common;
9366 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9367 scale = 0;
9368 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9369 /* Should always be exported to object file, see
9370 aarch64_force_relocation(). */
9371 gas_assert (!fixP->fx_done);
9372 gas_assert (seg->use_rela_p);
9373 goto movw_common;
9374 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9375 scale = 16;
9376 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9377 /* Should always be exported to object file, see
9378 aarch64_force_relocation(). */
9379 gas_assert (!fixP->fx_done);
9380 gas_assert (seg->use_rela_p);
9381 goto movw_common;
9382 case BFD_RELOC_AARCH64_MOVW_G2:
9383 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9384 case BFD_RELOC_AARCH64_MOVW_G2_S:
9385 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9386 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9387 scale = 32;
9388 goto movw_common;
9389 case BFD_RELOC_AARCH64_MOVW_G3:
9390 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9391 scale = 48;
9392 movw_common:
9393 if (fixP->fx_done || !seg->use_rela_p)
9394 {
9395 insn = get_aarch64_insn (buf);
9396
9397 if (!fixP->fx_done)
9398 {
9399 /* REL signed addend must fit in 16 bits */
9400 if (signed_overflow (value, 16))
9401 as_bad_where (fixP->fx_file, fixP->fx_line,
9402 _("offset out of range"));
9403 }
9404 else
9405 {
9406 /* Check for overflow and scale. */
9407 switch (fixP->fx_r_type)
9408 {
9409 case BFD_RELOC_AARCH64_MOVW_G0:
9410 case BFD_RELOC_AARCH64_MOVW_G1:
9411 case BFD_RELOC_AARCH64_MOVW_G2:
9412 case BFD_RELOC_AARCH64_MOVW_G3:
9413 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9414 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9415 if (unsigned_overflow (value, scale + 16))
9416 as_bad_where (fixP->fx_file, fixP->fx_line,
9417 _("unsigned value out of range"));
9418 break;
9419 case BFD_RELOC_AARCH64_MOVW_G0_S:
9420 case BFD_RELOC_AARCH64_MOVW_G1_S:
9421 case BFD_RELOC_AARCH64_MOVW_G2_S:
9422 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9423 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9424 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9425 /* NOTE: We can only come here with movz or movn. */
9426 if (signed_overflow (value, scale + 16))
9427 as_bad_where (fixP->fx_file, fixP->fx_line,
9428 _("signed value out of range"));
9429 if (value < 0)
9430 {
9431 /* Force use of MOVN. */
9432 value = ~value;
9433 insn = reencode_movzn_to_movn (insn);
9434 }
9435 else
9436 {
9437 /* Force use of MOVZ. */
9438 insn = reencode_movzn_to_movz (insn);
9439 }
9440 break;
9441 default:
9442 /* Unchecked relocations. */
9443 break;
9444 }
9445 value >>= scale;
9446 }
9447
9448 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9449 insn |= encode_movw_imm (value & 0xffff);
9450
9451 put_aarch64_insn (buf, insn);
9452 }
9453 break;
9454
9455 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9456 fixP->fx_r_type = (ilp32_p
9457 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9458 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9459 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9460 /* Should always be exported to object file, see
9461 aarch64_force_relocation(). */
9462 gas_assert (!fixP->fx_done);
9463 gas_assert (seg->use_rela_p);
9464 break;
9465
9466 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9467 fixP->fx_r_type = (ilp32_p
9468 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9469 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9470 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9471 /* Should always be exported to object file, see
9472 aarch64_force_relocation(). */
9473 gas_assert (!fixP->fx_done);
9474 gas_assert (seg->use_rela_p);
9475 break;
9476
9477 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9478 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9479 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9480 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9481 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9482 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9483 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9484 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9485 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9486 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9487 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9488 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9489 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9490 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9491 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9492 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9493 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9494 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9495 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9496 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9497 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9498 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9499 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9500 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9501 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9502 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9503 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9504 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9505 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9506 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9507 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9508 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9509 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9510 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9511 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9512 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9513 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9514 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9515 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9516 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9517 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9518 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9519 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9520 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9521 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9522 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9523 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9524 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9525 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9526 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9527 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9528 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9529 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9530 /* Should always be exported to object file, see
9531 aarch64_force_relocation(). */
9532 gas_assert (!fixP->fx_done);
9533 gas_assert (seg->use_rela_p);
9534 break;
9535
9536 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9537 /* Should always be exported to object file, see
9538 aarch64_force_relocation(). */
9539 fixP->fx_r_type = (ilp32_p
9540 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9541 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9542 gas_assert (!fixP->fx_done);
9543 gas_assert (seg->use_rela_p);
9544 break;
9545
9546 case BFD_RELOC_AARCH64_ADD_LO12:
9547 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9548 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9549 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9550 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9551 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9552 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9553 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9554 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9555 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9556 case BFD_RELOC_AARCH64_LDST128_LO12:
9557 case BFD_RELOC_AARCH64_LDST16_LO12:
9558 case BFD_RELOC_AARCH64_LDST32_LO12:
9559 case BFD_RELOC_AARCH64_LDST64_LO12:
9560 case BFD_RELOC_AARCH64_LDST8_LO12:
9561 /* Should always be exported to object file, see
9562 aarch64_force_relocation(). */
9563 gas_assert (!fixP->fx_done);
9564 gas_assert (seg->use_rela_p);
9565 break;
9566
9567 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9568 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9569 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9570 break;
9571
9572 case BFD_RELOC_UNUSED:
9573 /* An error will already have been reported. */
9574 break;
9575
9576 case BFD_RELOC_RVA:
9577 case BFD_RELOC_32_SECREL:
9578 case BFD_RELOC_16_SECIDX:
9579 break;
9580
9581 default:
9582 as_bad_where (fixP->fx_file, fixP->fx_line,
9583 _("unexpected %s fixup"),
9584 bfd_get_reloc_code_name (fixP->fx_r_type));
9585 break;
9586 }
9587
9588 apply_fix_return:
9589 /* Free the allocated the struct aarch64_inst.
9590 N.B. currently there are very limited number of fix-up types actually use
9591 this field, so the impact on the performance should be minimal . */
9592 free (fixP->tc_fix_data.inst);
9593
9594 return;
9595 }
9596
9597 /* Translate internal representation of relocation info to BFD target
9598 format. */
9599
9600 arelent *
9601 tc_gen_reloc (asection * section, fixS * fixp)
9602 {
9603 arelent *reloc;
9604 bfd_reloc_code_real_type code;
9605
9606 reloc = XNEW (arelent);
9607
9608 reloc->sym_ptr_ptr = XNEW (asymbol *);
9609 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9610 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9611
9612 if (fixp->fx_pcrel)
9613 {
9614 if (section->use_rela_p)
9615 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9616 else
9617 fixp->fx_offset = reloc->address;
9618 }
9619 reloc->addend = fixp->fx_offset;
9620
9621 code = fixp->fx_r_type;
9622 switch (code)
9623 {
9624 case BFD_RELOC_16:
9625 if (fixp->fx_pcrel)
9626 code = BFD_RELOC_16_PCREL;
9627 break;
9628
9629 case BFD_RELOC_32:
9630 if (fixp->fx_pcrel)
9631 code = BFD_RELOC_32_PCREL;
9632 break;
9633
9634 case BFD_RELOC_64:
9635 if (fixp->fx_pcrel)
9636 code = BFD_RELOC_64_PCREL;
9637 break;
9638
9639 default:
9640 break;
9641 }
9642
9643 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9644 if (reloc->howto == NULL)
9645 {
9646 as_bad_where (fixp->fx_file, fixp->fx_line,
9647 _
9648 ("cannot represent %s relocation in this object file format"),
9649 bfd_get_reloc_code_name (code));
9650 return NULL;
9651 }
9652
9653 return reloc;
9654 }
9655
9656 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9657
9658 void
9659 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9660 {
9661 bfd_reloc_code_real_type type;
9662 int pcrel = 0;
9663
9664 #ifdef TE_PE
9665 if (exp->X_op == O_secrel)
9666 {
9667 exp->X_op = O_symbol;
9668 type = BFD_RELOC_32_SECREL;
9669 }
9670 else if (exp->X_op == O_secidx)
9671 {
9672 exp->X_op = O_symbol;
9673 type = BFD_RELOC_16_SECIDX;
9674 }
9675 else
9676 {
9677 #endif
9678 /* Pick a reloc.
9679 FIXME: @@ Should look at CPU word size. */
9680 switch (size)
9681 {
9682 case 1:
9683 type = BFD_RELOC_8;
9684 break;
9685 case 2:
9686 type = BFD_RELOC_16;
9687 break;
9688 case 4:
9689 type = BFD_RELOC_32;
9690 break;
9691 case 8:
9692 type = BFD_RELOC_64;
9693 break;
9694 default:
9695 as_bad (_("cannot do %u-byte relocation"), size);
9696 type = BFD_RELOC_UNUSED;
9697 break;
9698 }
9699 #ifdef TE_PE
9700 }
9701 #endif
9702
9703 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9704 }
9705
9706 /* Implement md_after_parse_args. This is the earliest time we need to decide
9707 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9708
9709 void
9710 aarch64_after_parse_args (void)
9711 {
9712 if (aarch64_abi != AARCH64_ABI_NONE)
9713 return;
9714
9715 #ifdef OBJ_ELF
9716 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9717 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9718 aarch64_abi = AARCH64_ABI_ILP32;
9719 else
9720 aarch64_abi = AARCH64_ABI_LP64;
9721 #else
9722 aarch64_abi = AARCH64_ABI_LLP64;
9723 #endif
9724 }
9725
9726 #ifdef OBJ_ELF
9727 const char *
9728 elf64_aarch64_target_format (void)
9729 {
9730 #ifdef TE_CLOUDABI
9731 /* FIXME: What to do for ilp32_p ? */
9732 if (target_big_endian)
9733 return "elf64-bigaarch64-cloudabi";
9734 else
9735 return "elf64-littleaarch64-cloudabi";
9736 #else
9737 if (target_big_endian)
9738 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9739 else
9740 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9741 #endif
9742 }
9743
9744 void
9745 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9746 {
9747 elf_frob_symbol (symp, puntp);
9748 }
9749 #elif defined OBJ_COFF
9750 const char *
9751 coff_aarch64_target_format (void)
9752 {
9753 return "pe-aarch64-little";
9754 }
9755 #endif
9756
9757 /* MD interface: Finalization. */
9758
9759 /* A good place to do this, although this was probably not intended
9760 for this kind of use. We need to dump the literal pool before
9761 references are made to a null symbol pointer. */
9762
9763 void
9764 aarch64_cleanup (void)
9765 {
9766 literal_pool *pool;
9767
9768 for (pool = list_of_pools; pool; pool = pool->next)
9769 {
9770 /* Put it at the end of the relevant section. */
9771 subseg_set (pool->section, pool->sub_section);
9772 s_ltorg (0);
9773 }
9774 }
9775
9776 #ifdef OBJ_ELF
9777 /* Remove any excess mapping symbols generated for alignment frags in
9778 SEC. We may have created a mapping symbol before a zero byte
9779 alignment; remove it if there's a mapping symbol after the
9780 alignment. */
9781 static void
9782 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9783 void *dummy ATTRIBUTE_UNUSED)
9784 {
9785 segment_info_type *seginfo = seg_info (sec);
9786 fragS *fragp;
9787
9788 if (seginfo == NULL || seginfo->frchainP == NULL)
9789 return;
9790
9791 for (fragp = seginfo->frchainP->frch_root;
9792 fragp != NULL; fragp = fragp->fr_next)
9793 {
9794 symbolS *sym = fragp->tc_frag_data.last_map;
9795 fragS *next = fragp->fr_next;
9796
9797 /* Variable-sized frags have been converted to fixed size by
9798 this point. But if this was variable-sized to start with,
9799 there will be a fixed-size frag after it. So don't handle
9800 next == NULL. */
9801 if (sym == NULL || next == NULL)
9802 continue;
9803
9804 if (S_GET_VALUE (sym) < next->fr_address)
9805 /* Not at the end of this frag. */
9806 continue;
9807 know (S_GET_VALUE (sym) == next->fr_address);
9808
9809 do
9810 {
9811 if (next->tc_frag_data.first_map != NULL)
9812 {
9813 /* Next frag starts with a mapping symbol. Discard this
9814 one. */
9815 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9816 break;
9817 }
9818
9819 if (next->fr_next == NULL)
9820 {
9821 /* This mapping symbol is at the end of the section. Discard
9822 it. */
9823 know (next->fr_fix == 0 && next->fr_var == 0);
9824 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9825 break;
9826 }
9827
9828 /* As long as we have empty frags without any mapping symbols,
9829 keep looking. */
9830 /* If the next frag is non-empty and does not start with a
9831 mapping symbol, then this mapping symbol is required. */
9832 if (next->fr_address != next->fr_next->fr_address)
9833 break;
9834
9835 next = next->fr_next;
9836 }
9837 while (next != NULL);
9838 }
9839 }
9840 #endif
9841
9842 /* Adjust the symbol table. */
9843
9844 void
9845 aarch64_adjust_symtab (void)
9846 {
9847 #ifdef OBJ_ELF
9848 /* Remove any overlapping mapping symbols generated by alignment frags. */
9849 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9850 /* Now do generic ELF adjustments. */
9851 elf_adjust_symtab ();
9852 #endif
9853 }
9854
9855 static void
9856 checked_hash_insert (htab_t table, const char *key, void *value)
9857 {
9858 str_hash_insert (table, key, value, 0);
9859 }
9860
9861 static void
9862 sysreg_hash_insert (htab_t table, const char *key, void *value)
9863 {
9864 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9865 checked_hash_insert (table, key, value);
9866 }
9867
9868 static void
9869 fill_instruction_hash_table (void)
9870 {
9871 const aarch64_opcode *opcode = aarch64_opcode_table;
9872
9873 while (opcode->name != NULL)
9874 {
9875 templates *templ, *new_templ;
9876 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9877
9878 new_templ = XNEW (templates);
9879 new_templ->opcode = opcode;
9880 new_templ->next = NULL;
9881
9882 if (!templ)
9883 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9884 else
9885 {
9886 new_templ->next = templ->next;
9887 templ->next = new_templ;
9888 }
9889 ++opcode;
9890 }
9891 }
9892
9893 static inline void
9894 convert_to_upper (char *dst, const char *src, size_t num)
9895 {
9896 unsigned int i;
9897 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9898 *dst = TOUPPER (*src);
9899 *dst = '\0';
9900 }
9901
9902 /* Assume STR point to a lower-case string, allocate, convert and return
9903 the corresponding upper-case string. */
9904 static inline const char*
9905 get_upper_str (const char *str)
9906 {
9907 char *ret;
9908 size_t len = strlen (str);
9909 ret = XNEWVEC (char, len + 1);
9910 convert_to_upper (ret, str, len);
9911 return ret;
9912 }
9913
9914 /* MD interface: Initialization. */
9915
9916 void
9917 md_begin (void)
9918 {
9919 unsigned mach;
9920 unsigned int i;
9921
9922 aarch64_ops_hsh = str_htab_create ();
9923 aarch64_cond_hsh = str_htab_create ();
9924 aarch64_shift_hsh = str_htab_create ();
9925 aarch64_sys_regs_hsh = str_htab_create ();
9926 aarch64_pstatefield_hsh = str_htab_create ();
9927 aarch64_sys_regs_ic_hsh = str_htab_create ();
9928 aarch64_sys_regs_dc_hsh = str_htab_create ();
9929 aarch64_sys_regs_at_hsh = str_htab_create ();
9930 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9931 aarch64_sys_regs_sr_hsh = str_htab_create ();
9932 aarch64_reg_hsh = str_htab_create ();
9933 aarch64_barrier_opt_hsh = str_htab_create ();
9934 aarch64_nzcv_hsh = str_htab_create ();
9935 aarch64_pldop_hsh = str_htab_create ();
9936 aarch64_hint_opt_hsh = str_htab_create ();
9937
9938 fill_instruction_hash_table ();
9939
9940 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9941 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9942 (void *) (aarch64_sys_regs + i));
9943
9944 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9945 sysreg_hash_insert (aarch64_pstatefield_hsh,
9946 aarch64_pstatefields[i].name,
9947 (void *) (aarch64_pstatefields + i));
9948
9949 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9950 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9951 aarch64_sys_regs_ic[i].name,
9952 (void *) (aarch64_sys_regs_ic + i));
9953
9954 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9955 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9956 aarch64_sys_regs_dc[i].name,
9957 (void *) (aarch64_sys_regs_dc + i));
9958
9959 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9960 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9961 aarch64_sys_regs_at[i].name,
9962 (void *) (aarch64_sys_regs_at + i));
9963
9964 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9965 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9966 aarch64_sys_regs_tlbi[i].name,
9967 (void *) (aarch64_sys_regs_tlbi + i));
9968
9969 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9970 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9971 aarch64_sys_regs_sr[i].name,
9972 (void *) (aarch64_sys_regs_sr + i));
9973
9974 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9975 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9976 (void *) (reg_names + i));
9977
9978 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9979 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9980 (void *) (nzcv_names + i));
9981
9982 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9983 {
9984 const char *name = aarch64_operand_modifiers[i].name;
9985 checked_hash_insert (aarch64_shift_hsh, name,
9986 (void *) (aarch64_operand_modifiers + i));
9987 /* Also hash the name in the upper case. */
9988 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9989 (void *) (aarch64_operand_modifiers + i));
9990 }
9991
9992 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9993 {
9994 unsigned int j;
9995 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9996 the same condition code. */
9997 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9998 {
9999 const char *name = aarch64_conds[i].names[j];
10000 if (name == NULL)
10001 break;
10002 checked_hash_insert (aarch64_cond_hsh, name,
10003 (void *) (aarch64_conds + i));
10004 /* Also hash the name in the upper case. */
10005 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
10006 (void *) (aarch64_conds + i));
10007 }
10008 }
10009
10010 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
10011 {
10012 const char *name = aarch64_barrier_options[i].name;
10013 /* Skip xx00 - the unallocated values of option. */
10014 if ((i & 0x3) == 0)
10015 continue;
10016 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10017 (void *) (aarch64_barrier_options + i));
10018 /* Also hash the name in the upper case. */
10019 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10020 (void *) (aarch64_barrier_options + i));
10021 }
10022
10023 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
10024 {
10025 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
10026 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10027 (void *) (aarch64_barrier_dsb_nxs_options + i));
10028 /* Also hash the name in the upper case. */
10029 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10030 (void *) (aarch64_barrier_dsb_nxs_options + i));
10031 }
10032
10033 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10034 {
10035 const char* name = aarch64_prfops[i].name;
10036 /* Skip the unallocated hint encodings. */
10037 if (name == NULL)
10038 continue;
10039 checked_hash_insert (aarch64_pldop_hsh, name,
10040 (void *) (aarch64_prfops + i));
10041 /* Also hash the name in the upper case. */
10042 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10043 (void *) (aarch64_prfops + i));
10044 }
10045
10046 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10047 {
10048 const char* name = aarch64_hint_options[i].name;
10049 const char* upper_name = get_upper_str(name);
10050
10051 checked_hash_insert (aarch64_hint_opt_hsh, name,
10052 (void *) (aarch64_hint_options + i));
10053
10054 /* Also hash the name in the upper case if not the same. */
10055 if (strcmp (name, upper_name) != 0)
10056 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10057 (void *) (aarch64_hint_options + i));
10058 }
10059
10060 /* Set the cpu variant based on the command-line options. */
10061 if (!mcpu_cpu_opt)
10062 mcpu_cpu_opt = march_cpu_opt;
10063
10064 if (!mcpu_cpu_opt)
10065 mcpu_cpu_opt = &cpu_default;
10066
10067 cpu_variant = *mcpu_cpu_opt;
10068
10069 /* Record the CPU type. */
10070 if(ilp32_p)
10071 mach = bfd_mach_aarch64_ilp32;
10072 else if (llp64_p)
10073 mach = bfd_mach_aarch64_llp64;
10074 else
10075 mach = bfd_mach_aarch64;
10076
10077 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10078 #ifdef OBJ_ELF
10079 /* FIXME - is there a better way to do it ? */
10080 aarch64_sframe_cfa_sp_reg = 31;
10081 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10082 aarch64_sframe_cfa_ra_reg = 30;
10083 #endif
10084 }
10085
10086 /* Command line processing. */
10087
10088 const char *md_shortopts = "m:";
10089
10090 #ifdef AARCH64_BI_ENDIAN
10091 #define OPTION_EB (OPTION_MD_BASE + 0)
10092 #define OPTION_EL (OPTION_MD_BASE + 1)
10093 #else
10094 #if TARGET_BYTES_BIG_ENDIAN
10095 #define OPTION_EB (OPTION_MD_BASE + 0)
10096 #else
10097 #define OPTION_EL (OPTION_MD_BASE + 1)
10098 #endif
10099 #endif
10100
10101 struct option md_longopts[] = {
10102 #ifdef OPTION_EB
10103 {"EB", no_argument, NULL, OPTION_EB},
10104 #endif
10105 #ifdef OPTION_EL
10106 {"EL", no_argument, NULL, OPTION_EL},
10107 #endif
10108 {NULL, no_argument, NULL, 0}
10109 };
10110
10111 size_t md_longopts_size = sizeof (md_longopts);
10112
10113 struct aarch64_option_table
10114 {
10115 const char *option; /* Option name to match. */
10116 const char *help; /* Help information. */
10117 int *var; /* Variable to change. */
10118 int value; /* What to change it to. */
10119 char *deprecated; /* If non-null, print this message. */
10120 };
10121
10122 static struct aarch64_option_table aarch64_opts[] = {
10123 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10124 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10125 NULL},
10126 #ifdef DEBUG_AARCH64
10127 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10128 #endif /* DEBUG_AARCH64 */
10129 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10130 NULL},
10131 {"mno-verbose-error", N_("do not output verbose error messages"),
10132 &verbose_error_p, 0, NULL},
10133 {NULL, NULL, NULL, 0, NULL}
10134 };
10135
10136 struct aarch64_cpu_option_table
10137 {
10138 const char *name;
10139 const aarch64_feature_set value;
10140 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10141 case. */
10142 const char *canonical_name;
10143 };
10144
10145 /* This list should, at a minimum, contain all the cpu names
10146 recognized by GCC. */
10147 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10148 {"all", AARCH64_ALL_FEATURES, NULL},
10149 {"cortex-a34", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A34"},
10150 {"cortex-a35", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A35"},
10151 {"cortex-a53", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A53"},
10152 {"cortex-a57", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A57"},
10153 {"cortex-a72", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A72"},
10154 {"cortex-a73", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A73"},
10155 {"cortex-a55", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10156 "Cortex-A55"},
10157 {"cortex-a75", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10158 "Cortex-A75"},
10159 {"cortex-a76", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10160 "Cortex-A76"},
10161 {"cortex-a76ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10162 SSBS), "Cortex-A76AE"},
10163 {"cortex-a77", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10164 SSBS), "Cortex-A77"},
10165 {"cortex-a65", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10166 SSBS), "Cortex-A65"},
10167 {"cortex-a65ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10168 SSBS), "Cortex-A65AE"},
10169 {"cortex-a78", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10170 SSBS, PROFILE), "Cortex-A78"},
10171 {"cortex-a78ae", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10172 SSBS, PROFILE), "Cortex-A78AE"},
10173 {"cortex-a78c", AARCH64_CPU_FEATURES (V8_2A, 7, DOTPROD, F16, FLAGM,
10174 PAC, PROFILE, RCPC, SSBS),
10175 "Cortex-A78C"},
10176 {"cortex-a510", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10177 SVE2_BITPERM), "Cortex-A510"},
10178 {"cortex-a520", AARCH64_CPU_FEATURES (V9_2A, 2, MEMTAG, SVE2_BITPERM),
10179 "Cortex-A520"},
10180 {"cortex-a710", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10181 SVE2_BITPERM), "Cortex-A710"},
10182 {"cortex-a720", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10183 SVE2_BITPERM), "Cortex-A720"},
10184 {"ares", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10185 PROFILE), "Ares"},
10186 {"exynos-m1", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10187 "Samsung Exynos M1"},
10188 {"falkor", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10189 "Qualcomm Falkor"},
10190 {"neoverse-e1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10191 SSBS), "Neoverse E1"},
10192 {"neoverse-n1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10193 PROFILE), "Neoverse N1"},
10194 {"neoverse-n2", AARCH64_CPU_FEATURES (V8_5A, 8, BFLOAT16, I8MM, F16,
10195 SVE, SVE2, SVE2_BITPERM, MEMTAG,
10196 RNG), "Neoverse N2"},
10197 {"neoverse-v1", AARCH64_CPU_FEATURES (V8_4A, 8, PROFILE, CVADP, SVE,
10198 SSBS, RNG, F16, BFLOAT16, I8MM),
10199 "Neoverse V1"},
10200 {"qdf24xx", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10201 "Qualcomm QDF24XX"},
10202 {"saphira", AARCH64_CPU_FEATURES (V8_4A, 3, SHA2, AES, PROFILE),
10203 "Qualcomm Saphira"},
10204 {"thunderx", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10205 "Cavium ThunderX"},
10206 {"vulcan", AARCH64_CPU_FEATURES (V8_1A, 2, SHA2, AES),
10207 "Broadcom Vulcan"},
10208 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10209 in earlier releases and is superseded by 'xgene1' in all
10210 tools. */
10211 {"xgene-1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10212 {"xgene1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10213 {"xgene2", AARCH64_CPU_FEATURES (V8A, 1, CRC), "APM X-Gene 2"},
10214 {"cortex-r82", AARCH64_ARCH_FEATURES (V8R), "Cortex-R82"},
10215 {"cortex-x1", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10216 SSBS, PROFILE), "Cortex-X1"},
10217 {"cortex-x2", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10218 SVE2_BITPERM), "Cortex-X2"},
10219 {"cortex-x3", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10220 SVE2_BITPERM), "Cortex-X3"},
10221 {"cortex-x4", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10222 SVE2_BITPERM), "Cortex-X4"},
10223 {"generic", AARCH64_ARCH_FEATURES (V8A), NULL},
10224
10225 {NULL, AARCH64_NO_FEATURES, NULL}
10226 };
10227
10228 struct aarch64_arch_option_table
10229 {
10230 const char *name;
10231 const aarch64_feature_set value;
10232 };
10233
10234 /* This list should, at a minimum, contain all the architecture names
10235 recognized by GCC. */
10236 static const struct aarch64_arch_option_table aarch64_archs[] = {
10237 {"all", AARCH64_ALL_FEATURES},
10238 {"armv8-a", AARCH64_ARCH_FEATURES (V8A)},
10239 {"armv8.1-a", AARCH64_ARCH_FEATURES (V8_1A)},
10240 {"armv8.2-a", AARCH64_ARCH_FEATURES (V8_2A)},
10241 {"armv8.3-a", AARCH64_ARCH_FEATURES (V8_3A)},
10242 {"armv8.4-a", AARCH64_ARCH_FEATURES (V8_4A)},
10243 {"armv8.5-a", AARCH64_ARCH_FEATURES (V8_5A)},
10244 {"armv8.6-a", AARCH64_ARCH_FEATURES (V8_6A)},
10245 {"armv8.7-a", AARCH64_ARCH_FEATURES (V8_7A)},
10246 {"armv8.8-a", AARCH64_ARCH_FEATURES (V8_8A)},
10247 {"armv8.9-a", AARCH64_ARCH_FEATURES (V8_9A)},
10248 {"armv8-r", AARCH64_ARCH_FEATURES (V8R)},
10249 {"armv9-a", AARCH64_ARCH_FEATURES (V9A)},
10250 {"armv9.1-a", AARCH64_ARCH_FEATURES (V9_1A)},
10251 {"armv9.2-a", AARCH64_ARCH_FEATURES (V9_2A)},
10252 {"armv9.3-a", AARCH64_ARCH_FEATURES (V9_3A)},
10253 {"armv9.4-a", AARCH64_ARCH_FEATURES (V9_4A)},
10254 {NULL, AARCH64_NO_FEATURES}
10255 };
10256
10257 /* ISA extensions. */
10258 struct aarch64_option_cpu_value_table
10259 {
10260 const char *name;
10261 const aarch64_feature_set value;
10262 const aarch64_feature_set require; /* Feature dependencies. */
10263 };
10264
10265 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10266 {"crc", AARCH64_FEATURE (CRC), AARCH64_NO_FEATURES},
10267 {"crypto", AARCH64_FEATURES (2, AES, SHA2),
10268 AARCH64_FEATURE (SIMD)},
10269 {"fp", AARCH64_FEATURE (FP), AARCH64_NO_FEATURES},
10270 {"lse", AARCH64_FEATURE (LSE), AARCH64_NO_FEATURES},
10271 {"lse128", AARCH64_FEATURE (LSE128), AARCH64_FEATURE (LSE)},
10272 {"simd", AARCH64_FEATURE (SIMD), AARCH64_FEATURE (FP)},
10273 {"pan", AARCH64_FEATURE (PAN), AARCH64_NO_FEATURES},
10274 {"lor", AARCH64_FEATURE (LOR), AARCH64_NO_FEATURES},
10275 {"ras", AARCH64_FEATURE (RAS), AARCH64_NO_FEATURES},
10276 {"rdma", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10277 {"rdm", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10278 {"fp16", AARCH64_FEATURE (F16), AARCH64_FEATURE (FP)},
10279 {"fp16fml", AARCH64_FEATURE (F16_FML), AARCH64_FEATURE (F16)},
10280 {"profile", AARCH64_FEATURE (PROFILE), AARCH64_NO_FEATURES},
10281 {"sve", AARCH64_FEATURE (SVE), AARCH64_FEATURE (COMPNUM)},
10282 {"tme", AARCH64_FEATURE (TME), AARCH64_NO_FEATURES},
10283 {"fcma", AARCH64_FEATURE (COMPNUM),
10284 AARCH64_FEATURES (2, F16, SIMD)},
10285 {"compnum", AARCH64_FEATURE (COMPNUM),
10286 AARCH64_FEATURES (2, F16, SIMD)},
10287 {"jscvt", AARCH64_FEATURE (JSCVT), AARCH64_FEATURE (FP)},
10288 {"rcpc", AARCH64_FEATURE (RCPC), AARCH64_NO_FEATURES},
10289 {"rcpc2", AARCH64_FEATURE (RCPC2), AARCH64_FEATURE (RCPC)},
10290 {"dotprod", AARCH64_FEATURE (DOTPROD), AARCH64_FEATURE (SIMD)},
10291 {"sha2", AARCH64_FEATURE (SHA2), AARCH64_FEATURE (FP)},
10292 {"frintts", AARCH64_FEATURE (FRINTTS), AARCH64_FEATURE (SIMD)},
10293 {"sb", AARCH64_FEATURE (SB), AARCH64_NO_FEATURES},
10294 {"predres", AARCH64_FEATURE (PREDRES), AARCH64_NO_FEATURES},
10295 {"predres2", AARCH64_FEATURE (PREDRES2), AARCH64_FEATURE (PREDRES)},
10296 {"aes", AARCH64_FEATURE (AES), AARCH64_FEATURE (SIMD)},
10297 {"sm4", AARCH64_FEATURE (SM4), AARCH64_FEATURE (SIMD)},
10298 {"sha3", AARCH64_FEATURE (SHA3), AARCH64_FEATURE (SHA2)},
10299 {"rng", AARCH64_FEATURE (RNG), AARCH64_NO_FEATURES},
10300 {"ssbs", AARCH64_FEATURE (SSBS), AARCH64_NO_FEATURES},
10301 {"memtag", AARCH64_FEATURE (MEMTAG), AARCH64_NO_FEATURES},
10302 {"sve2", AARCH64_FEATURE (SVE2), AARCH64_FEATURE (SVE)},
10303 {"sve2-sm4", AARCH64_FEATURE (SVE2_SM4),
10304 AARCH64_FEATURES (2, SVE2, SM4)},
10305 {"sve2-aes", AARCH64_FEATURE (SVE2_AES),
10306 AARCH64_FEATURES (2, SVE2, AES)},
10307 {"sve2-sha3", AARCH64_FEATURE (SVE2_SHA3),
10308 AARCH64_FEATURES (2, SVE2, SHA3)},
10309 {"sve2-bitperm", AARCH64_FEATURE (SVE2_BITPERM),
10310 AARCH64_FEATURE (SVE2)},
10311 {"sme", AARCH64_FEATURE (SME),
10312 AARCH64_FEATURES (2, SVE2, BFLOAT16)},
10313 {"sme-f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10314 {"sme-f64f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10315 {"sme-i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10316 {"sme-i16i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10317 {"sme2", AARCH64_FEATURE (SME2), AARCH64_FEATURE (SME)},
10318 {"bf16", AARCH64_FEATURE (BFLOAT16), AARCH64_FEATURE (FP)},
10319 {"i8mm", AARCH64_FEATURE (I8MM), AARCH64_FEATURE (SIMD)},
10320 {"f32mm", AARCH64_FEATURE (F32MM), AARCH64_FEATURE (SVE)},
10321 {"f64mm", AARCH64_FEATURE (F64MM), AARCH64_FEATURE (SVE)},
10322 {"ls64", AARCH64_FEATURE (LS64), AARCH64_NO_FEATURES},
10323 {"flagm", AARCH64_FEATURE (FLAGM), AARCH64_NO_FEATURES},
10324 {"flagm2", AARCH64_FEATURE (FLAGMANIP), AARCH64_FEATURE (FLAGM)},
10325 {"pauth", AARCH64_FEATURE (PAC), AARCH64_NO_FEATURES},
10326 {"mops", AARCH64_FEATURE (MOPS), AARCH64_NO_FEATURES},
10327 {"hbc", AARCH64_FEATURE (HBC), AARCH64_NO_FEATURES},
10328 {"cssc", AARCH64_FEATURE (CSSC), AARCH64_NO_FEATURES},
10329 {"chk", AARCH64_FEATURE (CHK), AARCH64_NO_FEATURES},
10330 {"gcs", AARCH64_FEATURE (GCS), AARCH64_NO_FEATURES},
10331 {"the", AARCH64_FEATURE (THE), AARCH64_NO_FEATURES},
10332 {"rasv2", AARCH64_FEATURE (RASv2), AARCH64_FEATURE (RAS)},
10333 {"ite", AARCH64_FEATURE (ITE), AARCH64_NO_FEATURES},
10334 {"d128", AARCH64_FEATURE (D128),
10335 AARCH64_FEATURE (LSE128)},
10336 {NULL, AARCH64_NO_FEATURES, AARCH64_NO_FEATURES},
10337 };
10338
10339 struct aarch64_long_option_table
10340 {
10341 const char *option; /* Substring to match. */
10342 const char *help; /* Help information. */
10343 int (*func) (const char *subopt); /* Function to decode sub-option. */
10344 char *deprecated; /* If non-null, print this message. */
10345 };
10346
10347 /* Transitive closure of features depending on set. */
10348 static aarch64_feature_set
10349 aarch64_feature_disable_set (aarch64_feature_set set)
10350 {
10351 const struct aarch64_option_cpu_value_table *opt;
10352 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10353
10354 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10355 {
10356 prev = set;
10357 for (opt = aarch64_features; opt->name != NULL; opt++)
10358 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10359 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10360 }
10361 return set;
10362 }
10363
10364 /* Transitive closure of dependencies of set. */
10365 static aarch64_feature_set
10366 aarch64_feature_enable_set (aarch64_feature_set set)
10367 {
10368 const struct aarch64_option_cpu_value_table *opt;
10369 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10370
10371 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10372 {
10373 prev = set;
10374 for (opt = aarch64_features; opt->name != NULL; opt++)
10375 if (AARCH64_CPU_HAS_ALL_FEATURES (set, opt->value))
10376 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10377 }
10378 return set;
10379 }
10380
10381 static int
10382 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10383 bool ext_only)
10384 {
10385 /* We insist on extensions being added before being removed. We achieve
10386 this by using the ADDING_VALUE variable to indicate whether we are
10387 adding an extension (1) or removing it (0) and only allowing it to
10388 change in the order -1 -> 1 -> 0. */
10389 int adding_value = -1;
10390 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10391
10392 /* Copy the feature set, so that we can modify it. */
10393 *ext_set = **opt_p;
10394 *opt_p = ext_set;
10395
10396 while (str != NULL && *str != 0)
10397 {
10398 const struct aarch64_option_cpu_value_table *opt;
10399 const char *ext = NULL;
10400 int optlen;
10401
10402 if (!ext_only)
10403 {
10404 if (*str != '+')
10405 {
10406 as_bad (_("invalid architectural extension"));
10407 return 0;
10408 }
10409
10410 ext = strchr (++str, '+');
10411 }
10412
10413 if (ext != NULL)
10414 optlen = ext - str;
10415 else
10416 optlen = strlen (str);
10417
10418 if (optlen >= 2 && startswith (str, "no"))
10419 {
10420 if (adding_value != 0)
10421 adding_value = 0;
10422 optlen -= 2;
10423 str += 2;
10424 }
10425 else if (optlen > 0)
10426 {
10427 if (adding_value == -1)
10428 adding_value = 1;
10429 else if (adding_value != 1)
10430 {
10431 as_bad (_("must specify extensions to add before specifying "
10432 "those to remove"));
10433 return false;
10434 }
10435 }
10436
10437 if (optlen == 0)
10438 {
10439 as_bad (_("missing architectural extension"));
10440 return 0;
10441 }
10442
10443 gas_assert (adding_value != -1);
10444
10445 for (opt = aarch64_features; opt->name != NULL; opt++)
10446 if (optlen == (int) strlen(opt->name)
10447 && strncmp (opt->name, str, optlen) == 0)
10448 {
10449 aarch64_feature_set set;
10450
10451 /* Add or remove the extension. */
10452 if (adding_value)
10453 {
10454 set = aarch64_feature_enable_set (opt->value);
10455 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10456 }
10457 else
10458 {
10459 set = aarch64_feature_disable_set (opt->value);
10460 AARCH64_CLEAR_FEATURES (*ext_set, *ext_set, set);
10461 }
10462 break;
10463 }
10464
10465 if (opt->name == NULL)
10466 {
10467 as_bad (_("unknown architectural extension `%s'"), str);
10468 return 0;
10469 }
10470
10471 str = ext;
10472 };
10473
10474 return 1;
10475 }
10476
10477 static int
10478 aarch64_parse_cpu (const char *str)
10479 {
10480 const struct aarch64_cpu_option_table *opt;
10481 const char *ext = strchr (str, '+');
10482 size_t optlen;
10483
10484 if (ext != NULL)
10485 optlen = ext - str;
10486 else
10487 optlen = strlen (str);
10488
10489 if (optlen == 0)
10490 {
10491 as_bad (_("missing cpu name `%s'"), str);
10492 return 0;
10493 }
10494
10495 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10496 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10497 {
10498 mcpu_cpu_opt = &opt->value;
10499 if (ext != NULL)
10500 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10501
10502 return 1;
10503 }
10504
10505 as_bad (_("unknown cpu `%s'"), str);
10506 return 0;
10507 }
10508
10509 static int
10510 aarch64_parse_arch (const char *str)
10511 {
10512 const struct aarch64_arch_option_table *opt;
10513 const char *ext = strchr (str, '+');
10514 size_t optlen;
10515
10516 if (ext != NULL)
10517 optlen = ext - str;
10518 else
10519 optlen = strlen (str);
10520
10521 if (optlen == 0)
10522 {
10523 as_bad (_("missing architecture name `%s'"), str);
10524 return 0;
10525 }
10526
10527 for (opt = aarch64_archs; opt->name != NULL; opt++)
10528 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10529 {
10530 march_cpu_opt = &opt->value;
10531 if (ext != NULL)
10532 return aarch64_parse_features (ext, &march_cpu_opt, false);
10533
10534 return 1;
10535 }
10536
10537 as_bad (_("unknown architecture `%s'\n"), str);
10538 return 0;
10539 }
10540
10541 /* ABIs. */
10542 struct aarch64_option_abi_value_table
10543 {
10544 const char *name;
10545 enum aarch64_abi_type value;
10546 };
10547
10548 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10549 #ifdef OBJ_ELF
10550 {"ilp32", AARCH64_ABI_ILP32},
10551 {"lp64", AARCH64_ABI_LP64},
10552 #else
10553 {"llp64", AARCH64_ABI_LLP64},
10554 #endif
10555 };
10556
10557 static int
10558 aarch64_parse_abi (const char *str)
10559 {
10560 unsigned int i;
10561
10562 if (str[0] == '\0')
10563 {
10564 as_bad (_("missing abi name `%s'"), str);
10565 return 0;
10566 }
10567
10568 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10569 if (strcmp (str, aarch64_abis[i].name) == 0)
10570 {
10571 aarch64_abi = aarch64_abis[i].value;
10572 return 1;
10573 }
10574
10575 as_bad (_("unknown abi `%s'\n"), str);
10576 return 0;
10577 }
10578
10579 static struct aarch64_long_option_table aarch64_long_opts[] = {
10580 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10581 aarch64_parse_abi, NULL},
10582 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10583 aarch64_parse_cpu, NULL},
10584 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10585 aarch64_parse_arch, NULL},
10586 {NULL, NULL, 0, NULL}
10587 };
10588
10589 int
10590 md_parse_option (int c, const char *arg)
10591 {
10592 struct aarch64_option_table *opt;
10593 struct aarch64_long_option_table *lopt;
10594
10595 switch (c)
10596 {
10597 #ifdef OPTION_EB
10598 case OPTION_EB:
10599 target_big_endian = 1;
10600 break;
10601 #endif
10602
10603 #ifdef OPTION_EL
10604 case OPTION_EL:
10605 target_big_endian = 0;
10606 break;
10607 #endif
10608
10609 case 'a':
10610 /* Listing option. Just ignore these, we don't support additional
10611 ones. */
10612 return 0;
10613
10614 default:
10615 for (opt = aarch64_opts; opt->option != NULL; opt++)
10616 {
10617 if (c == opt->option[0]
10618 && ((arg == NULL && opt->option[1] == 0)
10619 || streq (arg, opt->option + 1)))
10620 {
10621 /* If the option is deprecated, tell the user. */
10622 if (opt->deprecated != NULL)
10623 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10624 arg ? arg : "", _(opt->deprecated));
10625
10626 if (opt->var != NULL)
10627 *opt->var = opt->value;
10628
10629 return 1;
10630 }
10631 }
10632
10633 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10634 {
10635 /* These options are expected to have an argument. */
10636 if (c == lopt->option[0]
10637 && arg != NULL
10638 && startswith (arg, lopt->option + 1))
10639 {
10640 /* If the option is deprecated, tell the user. */
10641 if (lopt->deprecated != NULL)
10642 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10643 _(lopt->deprecated));
10644
10645 /* Call the sup-option parser. */
10646 return lopt->func (arg + strlen (lopt->option) - 1);
10647 }
10648 }
10649
10650 return 0;
10651 }
10652
10653 return 1;
10654 }
10655
10656 void
10657 md_show_usage (FILE * fp)
10658 {
10659 struct aarch64_option_table *opt;
10660 struct aarch64_long_option_table *lopt;
10661
10662 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10663
10664 for (opt = aarch64_opts; opt->option != NULL; opt++)
10665 if (opt->help != NULL)
10666 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10667
10668 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10669 if (lopt->help != NULL)
10670 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10671
10672 #ifdef OPTION_EB
10673 fprintf (fp, _("\
10674 -EB assemble code for a big-endian cpu\n"));
10675 #endif
10676
10677 #ifdef OPTION_EL
10678 fprintf (fp, _("\
10679 -EL assemble code for a little-endian cpu\n"));
10680 #endif
10681 }
10682
10683 /* Parse a .cpu directive. */
10684
10685 static void
10686 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10687 {
10688 const struct aarch64_cpu_option_table *opt;
10689 char saved_char;
10690 char *name;
10691 char *ext;
10692 size_t optlen;
10693
10694 name = input_line_pointer;
10695 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10696 saved_char = *input_line_pointer;
10697 *input_line_pointer = 0;
10698
10699 ext = strchr (name, '+');
10700
10701 if (ext != NULL)
10702 optlen = ext - name;
10703 else
10704 optlen = strlen (name);
10705
10706 /* Skip the first "all" entry. */
10707 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10708 if (strlen (opt->name) == optlen
10709 && strncmp (name, opt->name, optlen) == 0)
10710 {
10711 mcpu_cpu_opt = &opt->value;
10712 if (ext != NULL)
10713 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10714 return;
10715
10716 cpu_variant = *mcpu_cpu_opt;
10717
10718 *input_line_pointer = saved_char;
10719 demand_empty_rest_of_line ();
10720 return;
10721 }
10722 as_bad (_("unknown cpu `%s'"), name);
10723 *input_line_pointer = saved_char;
10724 ignore_rest_of_line ();
10725 }
10726
10727
10728 /* Parse a .arch directive. */
10729
10730 static void
10731 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10732 {
10733 const struct aarch64_arch_option_table *opt;
10734 char saved_char;
10735 char *name;
10736 char *ext;
10737 size_t optlen;
10738
10739 name = input_line_pointer;
10740 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10741 saved_char = *input_line_pointer;
10742 *input_line_pointer = 0;
10743
10744 ext = strchr (name, '+');
10745
10746 if (ext != NULL)
10747 optlen = ext - name;
10748 else
10749 optlen = strlen (name);
10750
10751 /* Skip the first "all" entry. */
10752 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10753 if (strlen (opt->name) == optlen
10754 && strncmp (name, opt->name, optlen) == 0)
10755 {
10756 mcpu_cpu_opt = &opt->value;
10757 if (ext != NULL)
10758 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10759 return;
10760
10761 cpu_variant = *mcpu_cpu_opt;
10762
10763 *input_line_pointer = saved_char;
10764 demand_empty_rest_of_line ();
10765 return;
10766 }
10767
10768 as_bad (_("unknown architecture `%s'\n"), name);
10769 *input_line_pointer = saved_char;
10770 ignore_rest_of_line ();
10771 }
10772
10773 /* Parse a .arch_extension directive. */
10774
10775 static void
10776 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10777 {
10778 char saved_char;
10779 char *ext = input_line_pointer;
10780
10781 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10782 saved_char = *input_line_pointer;
10783 *input_line_pointer = 0;
10784
10785 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10786 return;
10787
10788 cpu_variant = *mcpu_cpu_opt;
10789
10790 *input_line_pointer = saved_char;
10791 demand_empty_rest_of_line ();
10792 }
10793
10794 /* Copy symbol information. */
10795
10796 void
10797 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10798 {
10799 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10800 }
10801
10802 #ifdef OBJ_ELF
10803 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10804 This is needed so AArch64 specific st_other values can be independently
10805 specified for an IFUNC resolver (that is called by the dynamic linker)
10806 and the symbol it resolves (aliased to the resolver). In particular,
10807 if a function symbol has special st_other value set via directives,
10808 then attaching an IFUNC resolver to that symbol should not override
10809 the st_other setting. Requiring the directive on the IFUNC resolver
10810 symbol would be unexpected and problematic in C code, where the two
10811 symbols appear as two independent function declarations. */
10812
10813 void
10814 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10815 {
10816 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10817 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10818 /* If size is unset, copy size from src. Because we don't track whether
10819 .size has been used, we can't differentiate .size dest, 0 from the case
10820 where dest's size is unset. */
10821 if (!destelf->size && S_GET_SIZE (dest) == 0)
10822 {
10823 if (srcelf->size)
10824 {
10825 destelf->size = XNEW (expressionS);
10826 *destelf->size = *srcelf->size;
10827 }
10828 S_SET_SIZE (dest, S_GET_SIZE (src));
10829 }
10830 }
10831 #endif