]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Add support for FEAT_SVE2p1.
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2024 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 #define streq(a, b) (strcmp (a, b) == 0)
42
43 #define END_OF_INSN '\0'
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = AARCH64_ARCH_FEATURES (V8A);
55
56 /* Currently active instruction sequence. */
57 static aarch64_instr_sequence *insn_sequence = NULL;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62 #endif
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_NONE = 0,
68 AARCH64_ABI_LP64 = 1,
69 AARCH64_ABI_ILP32 = 2,
70 AARCH64_ABI_LLP64 = 3
71 };
72
73 unsigned int aarch64_sframe_cfa_sp_reg;
74 /* The other CFA base register for SFrame stack trace info. */
75 unsigned int aarch64_sframe_cfa_fp_reg;
76 unsigned int aarch64_sframe_cfa_ra_reg;
77
78 #ifndef DEFAULT_ARCH
79 #define DEFAULT_ARCH "aarch64"
80 #endif
81
82 #ifdef OBJ_ELF
83 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
84 static const char *default_arch = DEFAULT_ARCH;
85 #endif
86
87 /* AArch64 ABI for the output file. */
88 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
89
90 /* When non-zero, program to a 32-bit model, in which the C data types
91 int, long and all pointer types are 32-bit objects (ILP32); or to a
92 64-bit model, in which the C int type is 32-bits but the C long type
93 and all pointer types are 64-bit objects (LP64). */
94 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
95
96 /* When non zero, C types int and long are 32 bit,
97 pointers, however are 64 bit */
98 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
99
100 enum vector_el_type
101 {
102 NT_invtype = -1,
103 NT_b,
104 NT_h,
105 NT_s,
106 NT_d,
107 NT_q,
108 NT_zero,
109 NT_merge
110 };
111
112 /* Bits for DEFINED field in vector_type_el. */
113 #define NTA_HASTYPE 1
114 #define NTA_HASINDEX 2
115 #define NTA_HASVARWIDTH 4
116
117 struct vector_type_el
118 {
119 enum vector_el_type type;
120 unsigned char defined;
121 unsigned element_size;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 aarch64_operand_error parsing_error;
144 /* The condition that appears in the assembly line. */
145 int cond;
146 /* Relocation information (including the GAS internal fixup). */
147 struct reloc reloc;
148 /* Need to generate an immediate in the literal pool. */
149 unsigned gen_lit_pool : 1;
150 };
151
152 typedef struct aarch64_instruction aarch64_instruction;
153
154 static aarch64_instruction inst;
155
156 static bool parse_operands (char *, const aarch64_opcode *);
157 static bool programmer_friendly_fixup (aarch64_instruction *);
158
159 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
160 data fields contain the following information:
161
162 data[0].i:
163 A mask of register types that would have been acceptable as bare
164 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
165 is set if a general parsing error occured for an operand (that is,
166 an error not related to registers, and having no error string).
167
168 data[1].i:
169 A mask of register types that would have been acceptable inside
170 a register list. In addition, SEF_IN_REGLIST is set if the
171 operand contained a '{' and if we got to the point of trying
172 to parse a register inside a list.
173
174 data[2].i:
175 The mask associated with the register that was actually seen, or 0
176 if none. A nonzero value describes a register inside a register
177 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
178 register.
179
180 The idea is that stringless errors from multiple opcode templates can
181 be ORed together to give a summary of the available alternatives. */
182 #define SEF_DEFAULT_ERROR (1U << 31)
183 #define SEF_IN_REGLIST (1U << 31)
184
185 /* Diagnostics inline function utilities.
186
187 These are lightweight utilities which should only be called by parse_operands
188 and other parsers. GAS processes each assembly line by parsing it against
189 instruction template(s), in the case of multiple templates (for the same
190 mnemonic name), those templates are tried one by one until one succeeds or
191 all fail. An assembly line may fail a few templates before being
192 successfully parsed; an error saved here in most cases is not a user error
193 but an error indicating the current template is not the right template.
194 Therefore it is very important that errors can be saved at a low cost during
195 the parsing; we don't want to slow down the whole parsing by recording
196 non-user errors in detail.
197
198 Remember that the objective is to help GAS pick up the most appropriate
199 error message in the case of multiple templates, e.g. FMOV which has 8
200 templates. */
201
202 static inline void
203 clear_error (void)
204 {
205 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
206 inst.parsing_error.kind = AARCH64_OPDE_NIL;
207 }
208
209 static inline bool
210 error_p (void)
211 {
212 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
213 }
214
215 static inline void
216 set_error (enum aarch64_operand_error_kind kind, const char *error)
217 {
218 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
219 inst.parsing_error.index = -1;
220 inst.parsing_error.kind = kind;
221 inst.parsing_error.error = error;
222 }
223
224 static inline void
225 set_recoverable_error (const char *error)
226 {
227 set_error (AARCH64_OPDE_RECOVERABLE, error);
228 }
229
230 /* Use the DESC field of the corresponding aarch64_operand entry to compose
231 the error message. */
232 static inline void
233 set_default_error (void)
234 {
235 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
236 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
237 }
238
239 static inline void
240 set_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
243 }
244
245 static inline void
246 set_first_syntax_error (const char *error)
247 {
248 if (! error_p ())
249 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
250 }
251
252 static inline void
253 set_fatal_syntax_error (const char *error)
254 {
255 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
256 }
257 \f
258 /* Return value for certain parsers when the parsing fails; those parsers
259 return the information of the parsed result, e.g. register number, on
260 success. */
261 #define PARSE_FAIL -1
262
263 /* This is an invalid condition code that means no conditional field is
264 present. */
265 #define COND_ALWAYS 0x10
266
267 typedef struct
268 {
269 const char *template;
270 uint32_t value;
271 } asm_nzcv;
272
273 struct reloc_entry
274 {
275 char *name;
276 bfd_reloc_code_real_type reloc;
277 };
278
279 /* Macros to define the register types and masks for the purpose
280 of parsing. */
281
282 #undef AARCH64_REG_TYPES
283 #define AARCH64_REG_TYPES \
284 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
285 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
286 BASIC_REG_TYPE(SP_32) /* wsp */ \
287 BASIC_REG_TYPE(SP_64) /* sp */ \
288 BASIC_REG_TYPE(ZR_32) /* wzr */ \
289 BASIC_REG_TYPE(ZR_64) /* xzr */ \
290 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
291 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
292 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
293 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
294 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
295 BASIC_REG_TYPE(V) /* v[0-31] */ \
296 BASIC_REG_TYPE(Z) /* z[0-31] */ \
297 BASIC_REG_TYPE(P) /* p[0-15] */ \
298 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
299 BASIC_REG_TYPE(ZA) /* za */ \
300 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
301 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
302 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
303 BASIC_REG_TYPE(ZT0) /* zt0 */ \
304 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
305 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
306 /* Typecheck: same, plus SVE registers. */ \
307 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z)) \
309 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
310 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
312 /* Typecheck: same, plus SVE registers. */ \
313 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
315 | REG_TYPE(Z)) \
316 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
317 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
319 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
320 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
323 /* Typecheck: any [BHSDQ]P FP. */ \
324 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
325 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
326 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
327 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
329 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
330 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
331 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
332 be used for SVE instructions, since Zn and Pn are valid symbols \
333 in other contexts. */ \
334 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
335 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
336 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
337 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
338 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
339 | REG_TYPE(Z) | REG_TYPE(P)) \
340 /* Likewise, but with predicate-as-counter registers added. */ \
341 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
342 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
343 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
344 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
345 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
346 | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
347 /* Any integer register; used for error messages only. */ \
348 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
349 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
350 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
351 /* Any vector register. */ \
352 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
353 /* An SVE vector or predicate register. */ \
354 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
355 /* Any vector or predicate register. */ \
356 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
357 /* The whole of ZA or a single tile. */ \
358 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
359 /* A horizontal or vertical slice of a ZA tile. */ \
360 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
361 /* Pseudo type to mark the end of the enumerator sequence. */ \
362 END_REG_TYPE(MAX)
363
364 #undef BASIC_REG_TYPE
365 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
366 #undef MULTI_REG_TYPE
367 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
368 #undef END_REG_TYPE
369 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
370
371 /* Register type enumerators. */
372 typedef enum aarch64_reg_type_
373 {
374 /* A list of REG_TYPE_*. */
375 AARCH64_REG_TYPES
376 } aarch64_reg_type;
377
378 #undef BASIC_REG_TYPE
379 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
380 #undef REG_TYPE
381 #define REG_TYPE(T) (1 << REG_TYPE_##T)
382 #undef MULTI_REG_TYPE
383 #define MULTI_REG_TYPE(T,V) V,
384 #undef END_REG_TYPE
385 #define END_REG_TYPE(T) 0
386
387 /* Structure for a hash table entry for a register. */
388 typedef struct
389 {
390 const char *name;
391 unsigned char number;
392 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
393 unsigned char builtin;
394 } reg_entry;
395
396 /* Values indexed by aarch64_reg_type to assist the type checking. */
397 static const unsigned reg_type_masks[] =
398 {
399 AARCH64_REG_TYPES
400 };
401
402 #undef BASIC_REG_TYPE
403 #undef REG_TYPE
404 #undef MULTI_REG_TYPE
405 #undef END_REG_TYPE
406 #undef AARCH64_REG_TYPES
407
408 /* We expected one of the registers in MASK to be specified. If a register
409 of some kind was specified, SEEN is a mask that contains that register,
410 otherwise it is zero.
411
412 If it is possible to provide a relatively pithy message that describes
413 the error exactly, return a string that does so, reporting the error
414 against "operand %d". Return null otherwise.
415
416 From a QoI perspective, any REG_TYPE_* that is passed as the first
417 argument to set_expected_reg_error should generally have its own message.
418 Providing messages for combinations of such REG_TYPE_*s can be useful if
419 it is possible to summarize the combination in a relatively natural way.
420 On the other hand, it seems better to avoid long lists of unrelated
421 things. */
422
423 static const char *
424 get_reg_expected_msg (unsigned int mask, unsigned int seen)
425 {
426 /* First handle messages that use SEEN. */
427 if ((mask & reg_type_masks[REG_TYPE_ZAT])
428 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
429 return N_("expected an unsuffixed ZA tile at operand %d");
430
431 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
432 && (seen & reg_type_masks[REG_TYPE_ZAT]))
433 return N_("missing horizontal or vertical suffix at operand %d");
434
435 if ((mask & reg_type_masks[REG_TYPE_ZA])
436 && (seen & (reg_type_masks[REG_TYPE_ZAT]
437 | reg_type_masks[REG_TYPE_ZATHV])))
438 return N_("expected 'za' rather than a ZA tile at operand %d");
439
440 if ((mask & reg_type_masks[REG_TYPE_PN])
441 && (seen & reg_type_masks[REG_TYPE_P]))
442 return N_("expected a predicate-as-counter rather than predicate-as-mask"
443 " register at operand %d");
444
445 if ((mask & reg_type_masks[REG_TYPE_P])
446 && (seen & reg_type_masks[REG_TYPE_PN]))
447 return N_("expected a predicate-as-mask rather than predicate-as-counter"
448 " register at operand %d");
449
450 /* Integer, zero and stack registers. */
451 if (mask == reg_type_masks[REG_TYPE_R_64])
452 return N_("expected a 64-bit integer register at operand %d");
453 if (mask == reg_type_masks[REG_TYPE_R_ZR])
454 return N_("expected an integer or zero register at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_R_SP])
456 return N_("expected an integer or stack pointer register at operand %d");
457
458 /* Floating-point and SIMD registers. */
459 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
460 return N_("expected a scalar SIMD or floating-point register"
461 " at operand %d");
462 if (mask == reg_type_masks[REG_TYPE_V])
463 return N_("expected an Advanced SIMD vector register at operand %d");
464 if (mask == reg_type_masks[REG_TYPE_Z])
465 return N_("expected an SVE vector register at operand %d");
466 if (mask == reg_type_masks[REG_TYPE_P]
467 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
468 /* Use this error for "predicate-as-mask only" and "either kind of
469 predicate". We report a more specific error if P is used where
470 PN is expected, and vice versa, so the issue at this point is
471 "predicate-like" vs. "not predicate-like". */
472 return N_("expected an SVE predicate register at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_PN])
474 return N_("expected an SVE predicate-as-counter register at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_VZ])
476 return N_("expected a vector register at operand %d");
477 if (mask == reg_type_masks[REG_TYPE_ZP])
478 return N_("expected an SVE vector or predicate register at operand %d");
479 if (mask == reg_type_masks[REG_TYPE_VZP])
480 return N_("expected a vector or predicate register at operand %d");
481
482 /* SME-related registers. */
483 if (mask == reg_type_masks[REG_TYPE_ZA])
484 return N_("expected a ZA array vector at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
486 return N_("expected ZT0 or a ZA mask at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_ZAT])
488 return N_("expected a ZA tile at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZATHV])
490 return N_("expected a ZA tile slice at operand %d");
491
492 /* Integer and vector combos. */
493 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
494 return N_("expected an integer register or Advanced SIMD vector register"
495 " at operand %d");
496 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
497 return N_("expected an integer register or SVE vector register"
498 " at operand %d");
499 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
500 return N_("expected an integer or vector register at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
502 return N_("expected an integer or predicate register at operand %d");
503 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
504 return N_("expected an integer, vector or predicate register"
505 " at operand %d");
506
507 /* SVE and SME combos. */
508 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
509 return N_("expected an SVE vector register or ZA tile slice"
510 " at operand %d");
511
512 return NULL;
513 }
514
515 /* Record that we expected a register of type TYPE but didn't see one.
516 REG is the register that we actually saw, or null if we didn't see a
517 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
518 contents of a register list, otherwise it is zero. */
519
520 static inline void
521 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
522 unsigned int flags)
523 {
524 assert (flags == 0 || flags == SEF_IN_REGLIST);
525 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
526 if (flags & SEF_IN_REGLIST)
527 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
528 else
529 inst.parsing_error.data[0].i = reg_type_masks[type];
530 if (reg)
531 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
532 }
533
534 /* Record that we expected a register list containing registers of type TYPE,
535 but didn't see the opening '{'. If we saw a register instead, REG is the
536 register that we saw, otherwise it is null. */
537
538 static inline void
539 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
540 {
541 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
542 inst.parsing_error.data[1].i = reg_type_masks[type];
543 if (reg)
544 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
545 }
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 31
549 #define REG_ZR 31
550
551 /* Instructions take 4 bytes in the object file. */
552 #define INSN_SIZE 4
553
554 static htab_t aarch64_ops_hsh;
555 static htab_t aarch64_cond_hsh;
556 static htab_t aarch64_shift_hsh;
557 static htab_t aarch64_sys_regs_hsh;
558 static htab_t aarch64_pstatefield_hsh;
559 static htab_t aarch64_sys_regs_ic_hsh;
560 static htab_t aarch64_sys_regs_dc_hsh;
561 static htab_t aarch64_sys_regs_at_hsh;
562 static htab_t aarch64_sys_regs_tlbi_hsh;
563 static htab_t aarch64_sys_regs_sr_hsh;
564 static htab_t aarch64_reg_hsh;
565 static htab_t aarch64_barrier_opt_hsh;
566 static htab_t aarch64_nzcv_hsh;
567 static htab_t aarch64_pldop_hsh;
568 static htab_t aarch64_hint_opt_hsh;
569
570 /* Stuff needed to resolve the label ambiguity
571 As:
572 ...
573 label: <insn>
574 may differ from:
575 ...
576 label:
577 <insn> */
578
579 static symbolS *last_label_seen;
580
581 /* Literal pool structure. Held on a per-section
582 and per-sub-section basis. */
583
584 #define MAX_LITERAL_POOL_SIZE 1024
585 typedef struct literal_expression
586 {
587 expressionS exp;
588 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
589 LITTLENUM_TYPE * bignum;
590 } literal_expression;
591
592 typedef struct literal_pool
593 {
594 literal_expression literals[MAX_LITERAL_POOL_SIZE];
595 unsigned int next_free_entry;
596 unsigned int id;
597 symbolS *symbol;
598 segT section;
599 subsegT sub_section;
600 int size;
601 struct literal_pool *next;
602 } literal_pool;
603
604 /* Pointer to a linked list of literal pools. */
605 static literal_pool *list_of_pools = NULL;
606 \f
607 /* Pure syntax. */
608
609 /* This array holds the chars that always start a comment. If the
610 pre-processor is disabled, these aren't very useful. */
611 const char comment_chars[] = "";
612
613 /* This array holds the chars that only start a comment at the beginning of
614 a line. If the line seems to have the form '# 123 filename'
615 .line and .file directives will appear in the pre-processed output. */
616 /* Note that input_file.c hand checks for '#' at the beginning of the
617 first line of the input file. This is because the compiler outputs
618 #NO_APP at the beginning of its output. */
619 /* Also note that comments like this one will always work. */
620 const char line_comment_chars[] = "#";
621
622 const char line_separator_chars[] = ";";
623
624 /* Chars that can be used to separate mant
625 from exp in floating point numbers. */
626 const char EXP_CHARS[] = "eE";
627
628 /* Chars that mean this number is a floating point constant. */
629 /* As in 0f12.456 */
630 /* or 0d1.2345e12 */
631
632 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
633
634 /* Prefix character that indicates the start of an immediate value. */
635 #define is_immediate_prefix(C) ((C) == '#')
636
637 /* Separator character handling. */
638
639 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
640
641 static inline bool
642 skip_past_char (char **str, char c)
643 {
644 if (**str == c)
645 {
646 (*str)++;
647 return true;
648 }
649 else
650 return false;
651 }
652
653 #define skip_past_comma(str) skip_past_char (str, ',')
654
655 /* Arithmetic expressions (possibly involving symbols). */
656
657 static bool in_aarch64_get_expression = false;
658
659 /* Third argument to aarch64_get_expression. */
660 #define GE_NO_PREFIX false
661 #define GE_OPT_PREFIX true
662
663 /* Fourth argument to aarch64_get_expression. */
664 #define ALLOW_ABSENT false
665 #define REJECT_ABSENT true
666
667 /* Return TRUE if the string pointed by *STR is successfully parsed
668 as an valid expression; *EP will be filled with the information of
669 such an expression. Otherwise return FALSE.
670
671 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
672 If REJECT_ABSENT is true then trat missing expressions as an error. */
673
674 static bool
675 aarch64_get_expression (expressionS * ep,
676 char ** str,
677 bool allow_immediate_prefix,
678 bool reject_absent)
679 {
680 char *save_in;
681 segT seg;
682 bool prefix_present = false;
683
684 if (allow_immediate_prefix)
685 {
686 if (is_immediate_prefix (**str))
687 {
688 (*str)++;
689 prefix_present = true;
690 }
691 }
692
693 memset (ep, 0, sizeof (expressionS));
694
695 save_in = input_line_pointer;
696 input_line_pointer = *str;
697 in_aarch64_get_expression = true;
698 seg = expression (ep);
699 in_aarch64_get_expression = false;
700
701 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
702 {
703 /* We found a bad expression in md_operand(). */
704 *str = input_line_pointer;
705 input_line_pointer = save_in;
706 if (prefix_present && ! error_p ())
707 set_fatal_syntax_error (_("bad expression"));
708 else
709 set_first_syntax_error (_("bad expression"));
710 return false;
711 }
712
713 #ifdef OBJ_AOUT
714 if (seg != absolute_section
715 && seg != text_section
716 && seg != data_section
717 && seg != bss_section
718 && seg != undefined_section)
719 {
720 set_syntax_error (_("bad segment"));
721 *str = input_line_pointer;
722 input_line_pointer = save_in;
723 return false;
724 }
725 #else
726 (void) seg;
727 #endif
728
729 *str = input_line_pointer;
730 input_line_pointer = save_in;
731 return true;
732 }
733
734 /* Turn a string in input_line_pointer into a floating point constant
735 of type TYPE, and store the appropriate bytes in *LITP. The number
736 of LITTLENUMS emitted is stored in *SIZEP. An error message is
737 returned, or NULL on OK. */
738
739 const char *
740 md_atof (int type, char *litP, int *sizeP)
741 {
742 return ieee_md_atof (type, litP, sizeP, target_big_endian);
743 }
744
745 /* We handle all bad expressions here, so that we can report the faulty
746 instruction in the error message. */
747 void
748 md_operand (expressionS * exp)
749 {
750 if (in_aarch64_get_expression)
751 exp->X_op = O_illegal;
752 }
753
754 /* Immediate values. */
755
756 /* Errors may be set multiple times during parsing or bit encoding
757 (particularly in the Neon bits), but usually the earliest error which is set
758 will be the most meaningful. Avoid overwriting it with later (cascading)
759 errors by calling this function. */
760
761 static void
762 first_error (const char *error)
763 {
764 if (! error_p ())
765 set_syntax_error (error);
766 }
767
768 /* Similar to first_error, but this function accepts formatted error
769 message. */
770 static void
771 first_error_fmt (const char *format, ...)
772 {
773 va_list args;
774 enum
775 { size = 100 };
776 /* N.B. this single buffer will not cause error messages for different
777 instructions to pollute each other; this is because at the end of
778 processing of each assembly line, error message if any will be
779 collected by as_bad. */
780 static char buffer[size];
781
782 if (! error_p ())
783 {
784 int ret ATTRIBUTE_UNUSED;
785 va_start (args, format);
786 ret = vsnprintf (buffer, size, format, args);
787 know (ret <= size - 1 && ret >= 0);
788 va_end (args);
789 set_syntax_error (buffer);
790 }
791 }
792
793 /* Internal helper routine converting a vector_type_el structure *VECTYPE
794 to a corresponding operand qualifier. */
795
796 static inline aarch64_opnd_qualifier_t
797 vectype_to_qualifier (const struct vector_type_el *vectype)
798 {
799 /* Element size in bytes indexed by vector_el_type. */
800 const unsigned char ele_size[5]
801 = {1, 2, 4, 8, 16};
802 const unsigned int ele_base [5] =
803 {
804 AARCH64_OPND_QLF_V_4B,
805 AARCH64_OPND_QLF_V_2H,
806 AARCH64_OPND_QLF_V_2S,
807 AARCH64_OPND_QLF_V_1D,
808 AARCH64_OPND_QLF_V_1Q
809 };
810
811 if (!vectype->defined || vectype->type == NT_invtype)
812 goto vectype_conversion_fail;
813
814 if (vectype->type == NT_zero)
815 return AARCH64_OPND_QLF_P_Z;
816 if (vectype->type == NT_merge)
817 return AARCH64_OPND_QLF_P_M;
818
819 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
820
821 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
822 {
823 /* Special case S_4B. */
824 if (vectype->type == NT_b && vectype->width == 4)
825 return AARCH64_OPND_QLF_S_4B;
826
827 /* Special case S_2H. */
828 if (vectype->type == NT_h && vectype->width == 2)
829 return AARCH64_OPND_QLF_S_2H;
830
831 /* Vector element register. */
832 return AARCH64_OPND_QLF_S_B + vectype->type;
833 }
834 else
835 {
836 /* Vector register. */
837 int reg_size = ele_size[vectype->type] * vectype->width;
838 unsigned offset;
839 unsigned shift;
840 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
841 goto vectype_conversion_fail;
842
843 /* The conversion is by calculating the offset from the base operand
844 qualifier for the vector type. The operand qualifiers are regular
845 enough that the offset can established by shifting the vector width by
846 a vector-type dependent amount. */
847 shift = 0;
848 if (vectype->type == NT_b)
849 shift = 3;
850 else if (vectype->type == NT_h || vectype->type == NT_s)
851 shift = 2;
852 else if (vectype->type >= NT_d)
853 shift = 1;
854 else
855 gas_assert (0);
856
857 offset = ele_base [vectype->type] + (vectype->width >> shift);
858 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
859 && offset <= AARCH64_OPND_QLF_V_1Q);
860 return offset;
861 }
862
863 vectype_conversion_fail:
864 first_error (_("bad vector arrangement type"));
865 return AARCH64_OPND_QLF_NIL;
866 }
867
868 /* Register parsing. */
869
870 /* Generic register parser which is called by other specialized
871 register parsers.
872 CCP points to what should be the beginning of a register name.
873 If it is indeed a valid register name, advance CCP over it and
874 return the reg_entry structure; otherwise return NULL.
875 It does not issue diagnostics. */
876
877 static reg_entry *
878 parse_reg (char **ccp)
879 {
880 char *start = *ccp;
881 char *p;
882 reg_entry *reg;
883
884 #ifdef REGISTER_PREFIX
885 if (*start != REGISTER_PREFIX)
886 return NULL;
887 start++;
888 #endif
889
890 p = start;
891 if (!ISALPHA (*p) || !is_name_beginner (*p))
892 return NULL;
893
894 do
895 p++;
896 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
897
898 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
899
900 if (!reg)
901 return NULL;
902
903 *ccp = p;
904 return reg;
905 }
906
907 /* Return the operand qualifier associated with all uses of REG, or
908 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
909 that qualifiers don't apply to REG or that qualifiers are added
910 using suffixes. */
911
912 static aarch64_opnd_qualifier_t
913 inherent_reg_qualifier (const reg_entry *reg)
914 {
915 switch (reg->type)
916 {
917 case REG_TYPE_R_32:
918 case REG_TYPE_SP_32:
919 case REG_TYPE_ZR_32:
920 return AARCH64_OPND_QLF_W;
921
922 case REG_TYPE_R_64:
923 case REG_TYPE_SP_64:
924 case REG_TYPE_ZR_64:
925 return AARCH64_OPND_QLF_X;
926
927 case REG_TYPE_FP_B:
928 case REG_TYPE_FP_H:
929 case REG_TYPE_FP_S:
930 case REG_TYPE_FP_D:
931 case REG_TYPE_FP_Q:
932 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
933
934 default:
935 return AARCH64_OPND_QLF_NIL;
936 }
937 }
938
939 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
940 return FALSE. */
941 static bool
942 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
943 {
944 return (reg_type_masks[type] & (1 << reg->type)) != 0;
945 }
946
947 /* Try to parse a base or offset register. Allow SVE base and offset
948 registers if REG_TYPE includes SVE registers. Return the register
949 entry on success, setting *QUALIFIER to the register qualifier.
950 Return null otherwise.
951
952 Note that this function does not issue any diagnostics. */
953
954 static const reg_entry *
955 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
956 aarch64_opnd_qualifier_t *qualifier)
957 {
958 char *str = *ccp;
959 const reg_entry *reg = parse_reg (&str);
960
961 if (reg == NULL)
962 return NULL;
963
964 switch (reg->type)
965 {
966 case REG_TYPE_Z:
967 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
968 || str[0] != '.')
969 return NULL;
970 switch (TOLOWER (str[1]))
971 {
972 case 's':
973 *qualifier = AARCH64_OPND_QLF_S_S;
974 break;
975 case 'd':
976 *qualifier = AARCH64_OPND_QLF_S_D;
977 break;
978 default:
979 return NULL;
980 }
981 str += 2;
982 break;
983
984 default:
985 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
986 return NULL;
987 *qualifier = inherent_reg_qualifier (reg);
988 break;
989 }
990
991 *ccp = str;
992
993 return reg;
994 }
995
996 /* Try to parse a base or offset register. Return the register entry
997 on success, setting *QUALIFIER to the register qualifier. Return null
998 otherwise.
999
1000 Note that this function does not issue any diagnostics. */
1001
1002 static const reg_entry *
1003 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1004 {
1005 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1006 }
1007
1008 /* Parse the qualifier of a vector register or vector element of type
1009 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1010 succeeds; otherwise return FALSE.
1011
1012 Accept only one occurrence of:
1013 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1014 b h s d q */
1015 static bool
1016 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1017 struct vector_type_el *parsed_type, char **str)
1018 {
1019 char *ptr = *str;
1020 unsigned width;
1021 unsigned element_size;
1022 enum vector_el_type type;
1023
1024 /* skip '.' */
1025 gas_assert (*ptr == '.');
1026 ptr++;
1027
1028 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1029 {
1030 width = 0;
1031 goto elt_size;
1032 }
1033 width = strtoul (ptr, &ptr, 10);
1034 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1035 {
1036 first_error_fmt (_("bad size %d in vector width specifier"), width);
1037 return false;
1038 }
1039
1040 elt_size:
1041 switch (TOLOWER (*ptr))
1042 {
1043 case 'b':
1044 type = NT_b;
1045 element_size = 8;
1046 break;
1047 case 'h':
1048 type = NT_h;
1049 element_size = 16;
1050 break;
1051 case 's':
1052 type = NT_s;
1053 element_size = 32;
1054 break;
1055 case 'd':
1056 type = NT_d;
1057 element_size = 64;
1058 break;
1059 case 'q':
1060 if (reg_type != REG_TYPE_V || width == 1)
1061 {
1062 type = NT_q;
1063 element_size = 128;
1064 break;
1065 }
1066 /* fall through. */
1067 default:
1068 if (*ptr != '\0')
1069 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1070 else
1071 first_error (_("missing element size"));
1072 return false;
1073 }
1074 if (width != 0 && width * element_size != 64
1075 && width * element_size != 128
1076 && !(width == 2 && element_size == 16)
1077 && !(width == 4 && element_size == 8))
1078 {
1079 first_error_fmt (_
1080 ("invalid element size %d and vector size combination %c"),
1081 width, *ptr);
1082 return false;
1083 }
1084 ptr++;
1085
1086 parsed_type->type = type;
1087 parsed_type->width = width;
1088 parsed_type->element_size = element_size;
1089
1090 *str = ptr;
1091
1092 return true;
1093 }
1094
1095 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1096 *PARSED_TYPE and point *STR at the end of the suffix. */
1097
1098 static bool
1099 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1100 {
1101 char *ptr = *str;
1102
1103 /* Skip '/'. */
1104 gas_assert (*ptr == '/');
1105 ptr++;
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'z':
1109 parsed_type->type = NT_zero;
1110 break;
1111 case 'm':
1112 parsed_type->type = NT_merge;
1113 break;
1114 default:
1115 if (*ptr != '\0' && *ptr != ',')
1116 first_error_fmt (_("unexpected character `%c' in predication type"),
1117 *ptr);
1118 else
1119 first_error (_("missing predication type"));
1120 return false;
1121 }
1122 parsed_type->width = 0;
1123 *str = ptr + 1;
1124 return true;
1125 }
1126
1127 /* Return true if CH is a valid suffix character for registers of
1128 type TYPE. */
1129
1130 static bool
1131 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1132 {
1133 switch (type)
1134 {
1135 case REG_TYPE_V:
1136 case REG_TYPE_Z:
1137 case REG_TYPE_ZA:
1138 case REG_TYPE_ZAT:
1139 case REG_TYPE_ZATH:
1140 case REG_TYPE_ZATV:
1141 return ch == '.';
1142
1143 case REG_TYPE_P:
1144 case REG_TYPE_PN:
1145 return ch == '.' || ch == '/';
1146
1147 default:
1148 return false;
1149 }
1150 }
1151
1152 /* Parse an index expression at *STR, storing it in *IMM on success. */
1153
1154 static bool
1155 parse_index_expression (char **str, int64_t *imm)
1156 {
1157 expressionS exp;
1158
1159 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1160 if (exp.X_op != O_constant)
1161 {
1162 first_error (_("constant expression required"));
1163 return false;
1164 }
1165 *imm = exp.X_add_number;
1166 return true;
1167 }
1168
1169 /* Parse a register of the type TYPE.
1170
1171 Return null if the string pointed to by *CCP is not a valid register
1172 name or the parsed register is not of TYPE.
1173
1174 Otherwise return the register, and optionally return the register
1175 shape and element index information in *TYPEINFO.
1176
1177 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1178
1179 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1180 register index.
1181
1182 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1183 an operand that we can be confident that it is a good match. */
1184
1185 #define PTR_IN_REGLIST (1U << 0)
1186 #define PTR_FULL_REG (1U << 1)
1187 #define PTR_GOOD_MATCH (1U << 2)
1188
1189 static const reg_entry *
1190 parse_typed_reg (char **ccp, aarch64_reg_type type,
1191 struct vector_type_el *typeinfo, unsigned int flags)
1192 {
1193 char *str = *ccp;
1194 bool is_alpha = ISALPHA (*str);
1195 const reg_entry *reg = parse_reg (&str);
1196 struct vector_type_el atype;
1197 struct vector_type_el parsetype;
1198 bool is_typed_vecreg = false;
1199 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1200
1201 atype.defined = 0;
1202 atype.type = NT_invtype;
1203 atype.width = -1;
1204 atype.element_size = 0;
1205 atype.index = 0;
1206
1207 if (reg == NULL)
1208 {
1209 if (typeinfo)
1210 *typeinfo = atype;
1211 if (!is_alpha && (flags & PTR_IN_REGLIST))
1212 set_fatal_syntax_error (_("syntax error in register list"));
1213 else if (flags & PTR_GOOD_MATCH)
1214 set_fatal_syntax_error (NULL);
1215 else
1216 set_expected_reg_error (type, reg, err_flags);
1217 return NULL;
1218 }
1219
1220 if (! aarch64_check_reg_type (reg, type))
1221 {
1222 DEBUG_TRACE ("reg type check failed");
1223 if (flags & PTR_GOOD_MATCH)
1224 set_fatal_syntax_error (NULL);
1225 else
1226 set_expected_reg_error (type, reg, err_flags);
1227 return NULL;
1228 }
1229 type = reg->type;
1230
1231 if (aarch64_valid_suffix_char_p (reg->type, *str))
1232 {
1233 if (*str == '.')
1234 {
1235 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1236 return NULL;
1237 if ((reg->type == REG_TYPE_ZAT
1238 || reg->type == REG_TYPE_ZATH
1239 || reg->type == REG_TYPE_ZATV)
1240 && reg->number * 8U >= parsetype.element_size)
1241 {
1242 set_syntax_error (_("ZA tile number out of range"));
1243 return NULL;
1244 }
1245 }
1246 else
1247 {
1248 if (!parse_predication_for_operand (&parsetype, &str))
1249 return NULL;
1250 }
1251
1252 /* Register if of the form Vn.[bhsdq]. */
1253 is_typed_vecreg = true;
1254
1255 if (type != REG_TYPE_V)
1256 {
1257 /* The width is always variable; we don't allow an integer width
1258 to be specified. */
1259 gas_assert (parsetype.width == 0);
1260 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1261 }
1262 else if (parsetype.width == 0)
1263 /* Expect index. In the new scheme we cannot have
1264 Vn.[bhsdq] represent a scalar. Therefore any
1265 Vn.[bhsdq] should have an index following it.
1266 Except in reglists of course. */
1267 atype.defined |= NTA_HASINDEX;
1268 else
1269 atype.defined |= NTA_HASTYPE;
1270
1271 atype.type = parsetype.type;
1272 atype.width = parsetype.width;
1273 }
1274
1275 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1276 {
1277 /* Reject Sn[index] syntax. */
1278 if (reg->type != REG_TYPE_Z
1279 && reg->type != REG_TYPE_PN
1280 && reg->type != REG_TYPE_ZT0
1281 && !is_typed_vecreg)
1282 {
1283 first_error (_("this type of register can't be indexed"));
1284 return NULL;
1285 }
1286
1287 if (flags & PTR_IN_REGLIST)
1288 {
1289 first_error (_("index not allowed inside register list"));
1290 return NULL;
1291 }
1292
1293 atype.defined |= NTA_HASINDEX;
1294
1295 if (!parse_index_expression (&str, &atype.index))
1296 return NULL;
1297
1298 if (! skip_past_char (&str, ']'))
1299 return NULL;
1300 }
1301 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1302 {
1303 /* Indexed vector register expected. */
1304 first_error (_("indexed vector register expected"));
1305 return NULL;
1306 }
1307
1308 /* A vector reg Vn should be typed or indexed. */
1309 if (type == REG_TYPE_V && atype.defined == 0)
1310 {
1311 first_error (_("invalid use of vector register"));
1312 }
1313
1314 if (typeinfo)
1315 *typeinfo = atype;
1316
1317 *ccp = str;
1318
1319 return reg;
1320 }
1321
1322 /* Parse register.
1323
1324 Return the register on success; return null otherwise.
1325
1326 If this is a NEON vector register with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328
1329 This parser does not handle register lists. */
1330
1331 static const reg_entry *
1332 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1333 struct vector_type_el *vectype)
1334 {
1335 return parse_typed_reg (ccp, type, vectype, 0);
1336 }
1337
1338 static inline bool
1339 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1340 {
1341 return (e1.type == e2.type
1342 && e1.defined == e2.defined
1343 && e1.width == e2.width
1344 && e1.element_size == e2.element_size
1345 && e1.index == e2.index);
1346 }
1347
1348 /* Return the register number mask for registers of type REG_TYPE. */
1349
1350 static inline int
1351 reg_type_mask (aarch64_reg_type reg_type)
1352 {
1353 return reg_type == REG_TYPE_P ? 15 : 31;
1354 }
1355
1356 /* This function parses a list of vector registers of type TYPE.
1357 On success, it returns the parsed register list information in the
1358 following encoded format:
1359
1360 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1361 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1362
1363 The information of the register shape and/or index is returned in
1364 *VECTYPE.
1365
1366 It returns PARSE_FAIL if the register list is invalid.
1367
1368 The list contains one to four registers.
1369 Each register can be one of:
1370 <Vt>.<T>[<index>]
1371 <Vt>.<T>
1372 All <T> should be identical.
1373 All <index> should be identical.
1374 There are restrictions on <Vt> numbers which are checked later
1375 (by reg_list_valid_p). */
1376
1377 static int
1378 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1379 struct vector_type_el *vectype)
1380 {
1381 char *str = *ccp;
1382 int nb_regs;
1383 struct vector_type_el typeinfo, typeinfo_first;
1384 uint32_t val, val_range, mask;
1385 int in_range;
1386 int ret_val;
1387 bool error = false;
1388 bool expect_index = false;
1389 unsigned int ptr_flags = PTR_IN_REGLIST;
1390
1391 if (*str != '{')
1392 {
1393 set_expected_reglist_error (type, parse_reg (&str));
1394 return PARSE_FAIL;
1395 }
1396 str++;
1397
1398 nb_regs = 0;
1399 typeinfo_first.defined = 0;
1400 typeinfo_first.type = NT_invtype;
1401 typeinfo_first.width = -1;
1402 typeinfo_first.element_size = 0;
1403 typeinfo_first.index = 0;
1404 ret_val = 0;
1405 val = -1u;
1406 val_range = -1u;
1407 in_range = 0;
1408 mask = reg_type_mask (type);
1409 do
1410 {
1411 if (in_range)
1412 {
1413 str++; /* skip over '-' */
1414 val_range = val;
1415 }
1416 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1417 ptr_flags);
1418 if (!reg)
1419 {
1420 set_first_syntax_error (_("invalid vector register in list"));
1421 error = true;
1422 continue;
1423 }
1424 val = reg->number;
1425 /* reject [bhsd]n */
1426 if (type == REG_TYPE_V && typeinfo.defined == 0)
1427 {
1428 set_first_syntax_error (_("invalid scalar register in list"));
1429 error = true;
1430 continue;
1431 }
1432
1433 if (typeinfo.defined & NTA_HASINDEX)
1434 expect_index = true;
1435
1436 if (in_range)
1437 {
1438 if (val == val_range)
1439 {
1440 set_first_syntax_error
1441 (_("invalid range in vector register list"));
1442 error = true;
1443 }
1444 val_range = (val_range + 1) & mask;
1445 }
1446 else
1447 {
1448 val_range = val;
1449 if (nb_regs == 0)
1450 typeinfo_first = typeinfo;
1451 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1452 {
1453 set_first_syntax_error
1454 (_("type mismatch in vector register list"));
1455 error = true;
1456 }
1457 }
1458 if (! error)
1459 for (;;)
1460 {
1461 ret_val |= val_range << ((5 * nb_regs) & 31);
1462 nb_regs++;
1463 if (val_range == val)
1464 break;
1465 val_range = (val_range + 1) & mask;
1466 }
1467 in_range = 0;
1468 ptr_flags |= PTR_GOOD_MATCH;
1469 }
1470 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1471
1472 skip_whitespace (str);
1473 if (*str != '}')
1474 {
1475 set_first_syntax_error (_("end of vector register list not found"));
1476 error = true;
1477 }
1478 str++;
1479
1480 skip_whitespace (str);
1481
1482 if (expect_index)
1483 {
1484 if (skip_past_char (&str, '['))
1485 {
1486 if (!parse_index_expression (&str, &typeinfo_first.index))
1487 error = true;
1488 if (! skip_past_char (&str, ']'))
1489 error = true;
1490 }
1491 else
1492 {
1493 set_first_syntax_error (_("expected index"));
1494 error = true;
1495 }
1496 }
1497
1498 if (nb_regs > 4)
1499 {
1500 set_first_syntax_error (_("too many registers in vector register list"));
1501 error = true;
1502 }
1503 else if (nb_regs == 0)
1504 {
1505 set_first_syntax_error (_("empty vector register list"));
1506 error = true;
1507 }
1508
1509 *ccp = str;
1510 if (! error)
1511 *vectype = typeinfo_first;
1512
1513 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1514 }
1515
1516 /* Directives: register aliases. */
1517
1518 static reg_entry *
1519 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1520 {
1521 reg_entry *new;
1522 const char *name;
1523
1524 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1525 {
1526 if (new->builtin)
1527 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1528 str);
1529
1530 /* Only warn about a redefinition if it's not defined as the
1531 same register. */
1532 else if (new->number != number || new->type != type)
1533 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1534
1535 return NULL;
1536 }
1537
1538 name = xstrdup (str);
1539 new = XNEW (reg_entry);
1540
1541 new->name = name;
1542 new->number = number;
1543 new->type = type;
1544 new->builtin = false;
1545
1546 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1547
1548 return new;
1549 }
1550
1551 /* Look for the .req directive. This is of the form:
1552
1553 new_register_name .req existing_register_name
1554
1555 If we find one, or if it looks sufficiently like one that we want to
1556 handle any error here, return TRUE. Otherwise return FALSE. */
1557
1558 static bool
1559 create_register_alias (char *newname, char *p)
1560 {
1561 const reg_entry *old;
1562 char *oldname, *nbuf;
1563 size_t nlen;
1564
1565 /* The input scrubber ensures that whitespace after the mnemonic is
1566 collapsed to single spaces. */
1567 oldname = p;
1568 if (!startswith (oldname, " .req "))
1569 return false;
1570
1571 oldname += 6;
1572 if (*oldname == '\0')
1573 return false;
1574
1575 old = str_hash_find (aarch64_reg_hsh, oldname);
1576 if (!old)
1577 {
1578 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1579 return true;
1580 }
1581
1582 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1583 the desired alias name, and p points to its end. If not, then
1584 the desired alias name is in the global original_case_string. */
1585 #ifdef TC_CASE_SENSITIVE
1586 nlen = p - newname;
1587 #else
1588 newname = original_case_string;
1589 nlen = strlen (newname);
1590 #endif
1591
1592 nbuf = xmemdup0 (newname, nlen);
1593
1594 /* Create aliases under the new name as stated; an all-lowercase
1595 version of the new name; and an all-uppercase version of the new
1596 name. */
1597 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1598 {
1599 for (p = nbuf; *p; p++)
1600 *p = TOUPPER (*p);
1601
1602 if (strncmp (nbuf, newname, nlen))
1603 {
1604 /* If this attempt to create an additional alias fails, do not bother
1605 trying to create the all-lower case alias. We will fail and issue
1606 a second, duplicate error message. This situation arises when the
1607 programmer does something like:
1608 foo .req r0
1609 Foo .req r1
1610 The second .req creates the "Foo" alias but then fails to create
1611 the artificial FOO alias because it has already been created by the
1612 first .req. */
1613 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1614 {
1615 free (nbuf);
1616 return true;
1617 }
1618 }
1619
1620 for (p = nbuf; *p; p++)
1621 *p = TOLOWER (*p);
1622
1623 if (strncmp (nbuf, newname, nlen))
1624 insert_reg_alias (nbuf, old->number, old->type);
1625 }
1626
1627 free (nbuf);
1628 return true;
1629 }
1630
1631 /* Should never be called, as .req goes between the alias and the
1632 register name, not at the beginning of the line. */
1633 static void
1634 s_req (int a ATTRIBUTE_UNUSED)
1635 {
1636 as_bad (_("invalid syntax for .req directive"));
1637 }
1638
1639 /* The .unreq directive deletes an alias which was previously defined
1640 by .req. For example:
1641
1642 my_alias .req r11
1643 .unreq my_alias */
1644
1645 static void
1646 s_unreq (int a ATTRIBUTE_UNUSED)
1647 {
1648 char *name;
1649 char saved_char;
1650
1651 name = input_line_pointer;
1652 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1653 saved_char = *input_line_pointer;
1654 *input_line_pointer = 0;
1655
1656 if (!*name)
1657 as_bad (_("invalid syntax for .unreq directive"));
1658 else
1659 {
1660 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1661
1662 if (!reg)
1663 as_bad (_("unknown register alias '%s'"), name);
1664 else if (reg->builtin)
1665 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1666 name);
1667 else
1668 {
1669 char *p;
1670 char *nbuf;
1671
1672 str_hash_delete (aarch64_reg_hsh, name);
1673 free ((char *) reg->name);
1674 free (reg);
1675
1676 /* Also locate the all upper case and all lower case versions.
1677 Do not complain if we cannot find one or the other as it
1678 was probably deleted above. */
1679
1680 nbuf = strdup (name);
1681 for (p = nbuf; *p; p++)
1682 *p = TOUPPER (*p);
1683 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1684 if (reg)
1685 {
1686 str_hash_delete (aarch64_reg_hsh, nbuf);
1687 free ((char *) reg->name);
1688 free (reg);
1689 }
1690
1691 for (p = nbuf; *p; p++)
1692 *p = TOLOWER (*p);
1693 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1694 if (reg)
1695 {
1696 str_hash_delete (aarch64_reg_hsh, nbuf);
1697 free ((char *) reg->name);
1698 free (reg);
1699 }
1700
1701 free (nbuf);
1702 }
1703 }
1704
1705 *input_line_pointer = saved_char;
1706 demand_empty_rest_of_line ();
1707 }
1708
1709 /* Directives: Instruction set selection. */
1710
1711 #if defined OBJ_ELF || defined OBJ_COFF
1712 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1713 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1714 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1715 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1716
1717 /* Create a new mapping symbol for the transition to STATE. */
1718
1719 static void
1720 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1721 {
1722 symbolS *symbolP;
1723 const char *symname;
1724 int type;
1725
1726 switch (state)
1727 {
1728 case MAP_DATA:
1729 symname = "$d";
1730 type = BSF_NO_FLAGS;
1731 break;
1732 case MAP_INSN:
1733 symname = "$x";
1734 type = BSF_NO_FLAGS;
1735 break;
1736 default:
1737 abort ();
1738 }
1739
1740 symbolP = symbol_new (symname, now_seg, frag, value);
1741 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1742
1743 /* Save the mapping symbols for future reference. Also check that
1744 we do not place two mapping symbols at the same offset within a
1745 frag. We'll handle overlap between frags in
1746 check_mapping_symbols.
1747
1748 If .fill or other data filling directive generates zero sized data,
1749 the mapping symbol for the following code will have the same value
1750 as the one generated for the data filling directive. In this case,
1751 we replace the old symbol with the new one at the same address. */
1752 if (value == 0)
1753 {
1754 if (frag->tc_frag_data.first_map != NULL)
1755 {
1756 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1757 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1758 &symbol_lastP);
1759 }
1760 frag->tc_frag_data.first_map = symbolP;
1761 }
1762 if (frag->tc_frag_data.last_map != NULL)
1763 {
1764 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1765 S_GET_VALUE (symbolP));
1766 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1767 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1768 &symbol_lastP);
1769 }
1770 frag->tc_frag_data.last_map = symbolP;
1771 }
1772
1773 /* We must sometimes convert a region marked as code to data during
1774 code alignment, if an odd number of bytes have to be padded. The
1775 code mapping symbol is pushed to an aligned address. */
1776
1777 static void
1778 insert_data_mapping_symbol (enum mstate state,
1779 valueT value, fragS * frag, offsetT bytes)
1780 {
1781 /* If there was already a mapping symbol, remove it. */
1782 if (frag->tc_frag_data.last_map != NULL
1783 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1784 frag->fr_address + value)
1785 {
1786 symbolS *symp = frag->tc_frag_data.last_map;
1787
1788 if (value == 0)
1789 {
1790 know (frag->tc_frag_data.first_map == symp);
1791 frag->tc_frag_data.first_map = NULL;
1792 }
1793 frag->tc_frag_data.last_map = NULL;
1794 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1795 }
1796
1797 make_mapping_symbol (MAP_DATA, value, frag);
1798 make_mapping_symbol (state, value + bytes, frag);
1799 }
1800
1801 static void mapping_state_2 (enum mstate state, int max_chars);
1802
1803 /* Set the mapping state to STATE. Only call this when about to
1804 emit some STATE bytes to the file. */
1805
1806 void
1807 mapping_state (enum mstate state)
1808 {
1809 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1810
1811 if (state == MAP_INSN)
1812 /* AArch64 instructions require 4-byte alignment. When emitting
1813 instructions into any section, record the appropriate section
1814 alignment. */
1815 record_alignment (now_seg, 2);
1816
1817 if (mapstate == state)
1818 /* The mapping symbol has already been emitted.
1819 There is nothing else to do. */
1820 return;
1821
1822 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1823 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1824 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1825 evaluated later in the next else. */
1826 return;
1827 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1828 {
1829 /* Only add the symbol if the offset is > 0:
1830 if we're at the first frag, check it's size > 0;
1831 if we're not at the first frag, then for sure
1832 the offset is > 0. */
1833 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1834 const int add_symbol = (frag_now != frag_first)
1835 || (frag_now_fix () > 0);
1836
1837 if (add_symbol)
1838 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1839 }
1840 #undef TRANSITION
1841
1842 mapping_state_2 (state, 0);
1843 }
1844
1845 /* Same as mapping_state, but MAX_CHARS bytes have already been
1846 allocated. Put the mapping symbol that far back. */
1847
1848 static void
1849 mapping_state_2 (enum mstate state, int max_chars)
1850 {
1851 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1852
1853 if (!SEG_NORMAL (now_seg))
1854 return;
1855
1856 if (mapstate == state)
1857 /* The mapping symbol has already been emitted.
1858 There is nothing else to do. */
1859 return;
1860
1861 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1862 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1863 }
1864 #else
1865 #define mapping_state(x) /* nothing */
1866 #define mapping_state_2(x, y) /* nothing */
1867 #endif
1868
1869 /* Directives: alignment. */
1870
1871 static void
1872 s_even (int ignore ATTRIBUTE_UNUSED)
1873 {
1874 /* Never make frag if expect extra pass. */
1875 if (!need_pass_2)
1876 frag_align (1, 0, 0);
1877
1878 record_alignment (now_seg, 1);
1879
1880 demand_empty_rest_of_line ();
1881 }
1882
1883 /* Directives: Literal pools. */
1884
1885 static literal_pool *
1886 find_literal_pool (int size)
1887 {
1888 literal_pool *pool;
1889
1890 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1891 {
1892 if (pool->section == now_seg
1893 && pool->sub_section == now_subseg && pool->size == size)
1894 break;
1895 }
1896
1897 return pool;
1898 }
1899
1900 static literal_pool *
1901 find_or_make_literal_pool (int size)
1902 {
1903 /* Next literal pool ID number. */
1904 static unsigned int latest_pool_num = 1;
1905 literal_pool *pool;
1906
1907 pool = find_literal_pool (size);
1908
1909 if (pool == NULL)
1910 {
1911 /* Create a new pool. */
1912 pool = XNEW (literal_pool);
1913 if (!pool)
1914 return NULL;
1915
1916 /* Currently we always put the literal pool in the current text
1917 section. If we were generating "small" model code where we
1918 knew that all code and initialised data was within 1MB then
1919 we could output literals to mergeable, read-only data
1920 sections. */
1921
1922 pool->next_free_entry = 0;
1923 pool->section = now_seg;
1924 pool->sub_section = now_subseg;
1925 pool->size = size;
1926 pool->next = list_of_pools;
1927 pool->symbol = NULL;
1928
1929 /* Add it to the list. */
1930 list_of_pools = pool;
1931 }
1932
1933 /* New pools, and emptied pools, will have a NULL symbol. */
1934 if (pool->symbol == NULL)
1935 {
1936 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1937 &zero_address_frag, 0);
1938 pool->id = latest_pool_num++;
1939 }
1940
1941 /* Done. */
1942 return pool;
1943 }
1944
1945 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1946 Return TRUE on success, otherwise return FALSE. */
1947 static bool
1948 add_to_lit_pool (expressionS *exp, int size)
1949 {
1950 literal_pool *pool;
1951 unsigned int entry;
1952
1953 pool = find_or_make_literal_pool (size);
1954
1955 /* Check if this literal value is already in the pool. */
1956 for (entry = 0; entry < pool->next_free_entry; entry++)
1957 {
1958 expressionS * litexp = & pool->literals[entry].exp;
1959
1960 if ((litexp->X_op == exp->X_op)
1961 && (exp->X_op == O_constant)
1962 && (litexp->X_add_number == exp->X_add_number)
1963 && (litexp->X_unsigned == exp->X_unsigned))
1964 break;
1965
1966 if ((litexp->X_op == exp->X_op)
1967 && (exp->X_op == O_symbol)
1968 && (litexp->X_add_number == exp->X_add_number)
1969 && (litexp->X_add_symbol == exp->X_add_symbol)
1970 && (litexp->X_op_symbol == exp->X_op_symbol))
1971 break;
1972 }
1973
1974 /* Do we need to create a new entry? */
1975 if (entry == pool->next_free_entry)
1976 {
1977 if (entry >= MAX_LITERAL_POOL_SIZE)
1978 {
1979 set_syntax_error (_("literal pool overflow"));
1980 return false;
1981 }
1982
1983 pool->literals[entry].exp = *exp;
1984 pool->next_free_entry += 1;
1985 if (exp->X_op == O_big)
1986 {
1987 /* PR 16688: Bignums are held in a single global array. We must
1988 copy and preserve that value now, before it is overwritten. */
1989 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1990 exp->X_add_number);
1991 memcpy (pool->literals[entry].bignum, generic_bignum,
1992 CHARS_PER_LITTLENUM * exp->X_add_number);
1993 }
1994 else
1995 pool->literals[entry].bignum = NULL;
1996 }
1997
1998 exp->X_op = O_symbol;
1999 exp->X_add_number = ((int) entry) * size;
2000 exp->X_add_symbol = pool->symbol;
2001
2002 return true;
2003 }
2004
2005 /* Can't use symbol_new here, so have to create a symbol and then at
2006 a later date assign it a value. That's what these functions do. */
2007
2008 static void
2009 symbol_locate (symbolS * symbolP,
2010 const char *name,/* It is copied, the caller can modify. */
2011 segT segment, /* Segment identifier (SEG_<something>). */
2012 valueT valu, /* Symbol value. */
2013 fragS * frag) /* Associated fragment. */
2014 {
2015 size_t name_length;
2016 char *preserved_copy_of_name;
2017
2018 name_length = strlen (name) + 1; /* +1 for \0. */
2019 obstack_grow (&notes, name, name_length);
2020 preserved_copy_of_name = obstack_finish (&notes);
2021
2022 #ifdef tc_canonicalize_symbol_name
2023 preserved_copy_of_name =
2024 tc_canonicalize_symbol_name (preserved_copy_of_name);
2025 #endif
2026
2027 S_SET_NAME (symbolP, preserved_copy_of_name);
2028
2029 S_SET_SEGMENT (symbolP, segment);
2030 S_SET_VALUE (symbolP, valu);
2031 symbol_clear_list_pointers (symbolP);
2032
2033 symbol_set_frag (symbolP, frag);
2034
2035 /* Link to end of symbol chain. */
2036 {
2037 extern int symbol_table_frozen;
2038
2039 if (symbol_table_frozen)
2040 abort ();
2041 }
2042
2043 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2044
2045 obj_symbol_new_hook (symbolP);
2046
2047 #ifdef tc_symbol_new_hook
2048 tc_symbol_new_hook (symbolP);
2049 #endif
2050
2051 #ifdef DEBUG_SYMS
2052 verify_symbol_chain (symbol_rootP, symbol_lastP);
2053 #endif /* DEBUG_SYMS */
2054 }
2055
2056
2057 static void
2058 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 unsigned int entry;
2061 literal_pool *pool;
2062 char sym_name[20];
2063 int align;
2064
2065 for (align = 2; align <= 4; align++)
2066 {
2067 int size = 1 << align;
2068
2069 pool = find_literal_pool (size);
2070 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2071 continue;
2072
2073 /* Align pool as you have word accesses.
2074 Only make a frag if we have to. */
2075 if (!need_pass_2)
2076 frag_align (align, 0, 0);
2077
2078 mapping_state (MAP_DATA);
2079
2080 record_alignment (now_seg, align);
2081
2082 sprintf (sym_name, "$$lit_\002%x", pool->id);
2083
2084 symbol_locate (pool->symbol, sym_name, now_seg,
2085 (valueT) frag_now_fix (), frag_now);
2086 symbol_table_insert (pool->symbol);
2087
2088 for (entry = 0; entry < pool->next_free_entry; entry++)
2089 {
2090 expressionS * exp = & pool->literals[entry].exp;
2091
2092 if (exp->X_op == O_big)
2093 {
2094 /* PR 16688: Restore the global bignum value. */
2095 gas_assert (pool->literals[entry].bignum != NULL);
2096 memcpy (generic_bignum, pool->literals[entry].bignum,
2097 CHARS_PER_LITTLENUM * exp->X_add_number);
2098 }
2099
2100 /* First output the expression in the instruction to the pool. */
2101 emit_expr (exp, size); /* .word|.xword */
2102
2103 if (exp->X_op == O_big)
2104 {
2105 free (pool->literals[entry].bignum);
2106 pool->literals[entry].bignum = NULL;
2107 }
2108 }
2109
2110 /* Mark the pool as empty. */
2111 pool->next_free_entry = 0;
2112 pool->symbol = NULL;
2113 }
2114 }
2115
2116 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2117 /* Forward declarations for functions below, in the MD interface
2118 section. */
2119 static struct reloc_table_entry * find_reloc_table_entry (char **);
2120
2121 /* Directives: Data. */
2122 /* N.B. the support for relocation suffix in this directive needs to be
2123 implemented properly. */
2124
2125 static void
2126 s_aarch64_cons (int nbytes)
2127 {
2128 expressionS exp;
2129
2130 #ifdef md_flush_pending_output
2131 md_flush_pending_output ();
2132 #endif
2133
2134 if (is_it_end_of_statement ())
2135 {
2136 demand_empty_rest_of_line ();
2137 return;
2138 }
2139
2140 #ifdef md_cons_align
2141 md_cons_align (nbytes);
2142 #endif
2143
2144 mapping_state (MAP_DATA);
2145 do
2146 {
2147 struct reloc_table_entry *reloc;
2148
2149 expression (&exp);
2150
2151 if (exp.X_op != O_symbol)
2152 emit_expr (&exp, (unsigned int) nbytes);
2153 else
2154 {
2155 skip_past_char (&input_line_pointer, '#');
2156 if (skip_past_char (&input_line_pointer, ':'))
2157 {
2158 reloc = find_reloc_table_entry (&input_line_pointer);
2159 if (reloc == NULL)
2160 as_bad (_("unrecognized relocation suffix"));
2161 else
2162 as_bad (_("unimplemented relocation suffix"));
2163 ignore_rest_of_line ();
2164 return;
2165 }
2166 else
2167 emit_expr (&exp, (unsigned int) nbytes);
2168 }
2169 }
2170 while (*input_line_pointer++ == ',');
2171
2172 /* Put terminator back into stream. */
2173 input_line_pointer--;
2174 demand_empty_rest_of_line ();
2175 }
2176 #endif
2177
2178 #ifdef OBJ_ELF
2179 /* Forward declarations for functions below, in the MD interface
2180 section. */
2181 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2182
2183 /* Mark symbol that it follows a variant PCS convention. */
2184
2185 static void
2186 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2187 {
2188 char *name;
2189 char c;
2190 symbolS *sym;
2191 asymbol *bfdsym;
2192 elf_symbol_type *elfsym;
2193
2194 c = get_symbol_name (&name);
2195 if (!*name)
2196 as_bad (_("Missing symbol name in directive"));
2197 sym = symbol_find_or_make (name);
2198 restore_line_pointer (c);
2199 demand_empty_rest_of_line ();
2200 bfdsym = symbol_get_bfdsym (sym);
2201 elfsym = elf_symbol_from (bfdsym);
2202 gas_assert (elfsym);
2203 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2204 }
2205 #endif /* OBJ_ELF */
2206
2207 /* Output a 32-bit word, but mark as an instruction. */
2208
2209 static void
2210 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2211 {
2212 expressionS exp;
2213 unsigned n = 0;
2214
2215 #ifdef md_flush_pending_output
2216 md_flush_pending_output ();
2217 #endif
2218
2219 if (is_it_end_of_statement ())
2220 {
2221 demand_empty_rest_of_line ();
2222 return;
2223 }
2224
2225 /* Sections are assumed to start aligned. In executable section, there is no
2226 MAP_DATA symbol pending. So we only align the address during
2227 MAP_DATA --> MAP_INSN transition.
2228 For other sections, this is not guaranteed. */
2229 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2230 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2231 frag_align_code (2, 0);
2232
2233 #ifdef OBJ_ELF
2234 mapping_state (MAP_INSN);
2235 #endif
2236
2237 do
2238 {
2239 expression (&exp);
2240 if (exp.X_op != O_constant)
2241 {
2242 as_bad (_("constant expression required"));
2243 ignore_rest_of_line ();
2244 return;
2245 }
2246
2247 if (target_big_endian)
2248 {
2249 unsigned int val = exp.X_add_number;
2250 exp.X_add_number = SWAP_32 (val);
2251 }
2252 emit_expr (&exp, INSN_SIZE);
2253 ++n;
2254 }
2255 while (*input_line_pointer++ == ',');
2256
2257 dwarf2_emit_insn (n * INSN_SIZE);
2258
2259 /* Put terminator back into stream. */
2260 input_line_pointer--;
2261 demand_empty_rest_of_line ();
2262 }
2263
2264 static void
2265 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2266 {
2267 demand_empty_rest_of_line ();
2268 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2269 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2270 }
2271
2272 #ifdef OBJ_ELF
2273 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2274
2275 static void
2276 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2277 {
2278 expressionS exp;
2279
2280 expression (&exp);
2281 frag_grow (4);
2282 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2283 BFD_RELOC_AARCH64_TLSDESC_ADD);
2284
2285 demand_empty_rest_of_line ();
2286 }
2287
2288 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2289
2290 static void
2291 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2292 {
2293 expressionS exp;
2294
2295 /* Since we're just labelling the code, there's no need to define a
2296 mapping symbol. */
2297 expression (&exp);
2298 /* Make sure there is enough room in this frag for the following
2299 blr. This trick only works if the blr follows immediately after
2300 the .tlsdesc directive. */
2301 frag_grow (4);
2302 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2303 BFD_RELOC_AARCH64_TLSDESC_CALL);
2304
2305 demand_empty_rest_of_line ();
2306 }
2307
2308 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2309
2310 static void
2311 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2312 {
2313 expressionS exp;
2314
2315 expression (&exp);
2316 frag_grow (4);
2317 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2318 BFD_RELOC_AARCH64_TLSDESC_LDR);
2319
2320 demand_empty_rest_of_line ();
2321 }
2322 #endif /* OBJ_ELF */
2323
2324 #ifdef TE_PE
2325 static void
2326 s_secrel (int dummy ATTRIBUTE_UNUSED)
2327 {
2328 expressionS exp;
2329
2330 do
2331 {
2332 expression (&exp);
2333 if (exp.X_op == O_symbol)
2334 exp.X_op = O_secrel;
2335
2336 emit_expr (&exp, 4);
2337 }
2338 while (*input_line_pointer++ == ',');
2339
2340 input_line_pointer--;
2341 demand_empty_rest_of_line ();
2342 }
2343
2344 void
2345 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2346 {
2347 expressionS exp;
2348
2349 exp.X_op = O_secrel;
2350 exp.X_add_symbol = symbol;
2351 exp.X_add_number = 0;
2352 emit_expr (&exp, size);
2353 }
2354
2355 static void
2356 s_secidx (int dummy ATTRIBUTE_UNUSED)
2357 {
2358 expressionS exp;
2359
2360 do
2361 {
2362 expression (&exp);
2363 if (exp.X_op == O_symbol)
2364 exp.X_op = O_secidx;
2365
2366 emit_expr (&exp, 2);
2367 }
2368 while (*input_line_pointer++ == ',');
2369
2370 input_line_pointer--;
2371 demand_empty_rest_of_line ();
2372 }
2373 #endif /* TE_PE */
2374
2375 static void s_aarch64_arch (int);
2376 static void s_aarch64_cpu (int);
2377 static void s_aarch64_arch_extension (int);
2378
2379 /* This table describes all the machine specific pseudo-ops the assembler
2380 has to support. The fields are:
2381 pseudo-op name without dot
2382 function to call to execute this pseudo-op
2383 Integer arg to pass to the function. */
2384
2385 const pseudo_typeS md_pseudo_table[] = {
2386 /* Never called because '.req' does not start a line. */
2387 {"req", s_req, 0},
2388 {"unreq", s_unreq, 0},
2389 {"even", s_even, 0},
2390 {"ltorg", s_ltorg, 0},
2391 {"pool", s_ltorg, 0},
2392 {"cpu", s_aarch64_cpu, 0},
2393 {"arch", s_aarch64_arch, 0},
2394 {"arch_extension", s_aarch64_arch_extension, 0},
2395 {"inst", s_aarch64_inst, 0},
2396 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2397 #ifdef OBJ_ELF
2398 {"tlsdescadd", s_tlsdescadd, 0},
2399 {"tlsdesccall", s_tlsdesccall, 0},
2400 {"tlsdescldr", s_tlsdescldr, 0},
2401 {"variant_pcs", s_variant_pcs, 0},
2402 #endif
2403 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2404 {"word", s_aarch64_cons, 4},
2405 {"long", s_aarch64_cons, 4},
2406 {"xword", s_aarch64_cons, 8},
2407 {"dword", s_aarch64_cons, 8},
2408 #endif
2409 #ifdef TE_PE
2410 {"secrel32", s_secrel, 0},
2411 {"secidx", s_secidx, 0},
2412 #endif
2413 {"float16", float_cons, 'h'},
2414 {"bfloat16", float_cons, 'b'},
2415 {0, 0, 0}
2416 };
2417 \f
2418
2419 /* Check whether STR points to a register name followed by a comma or the
2420 end of line; REG_TYPE indicates which register types are checked
2421 against. Return TRUE if STR is such a register name; otherwise return
2422 FALSE. The function does not intend to produce any diagnostics, but since
2423 the register parser aarch64_reg_parse, which is called by this function,
2424 does produce diagnostics, we call clear_error to clear any diagnostics
2425 that may be generated by aarch64_reg_parse.
2426 Also, the function returns FALSE directly if there is any user error
2427 present at the function entry. This prevents the existing diagnostics
2428 state from being spoiled.
2429 The function currently serves parse_constant_immediate and
2430 parse_big_immediate only. */
2431 static bool
2432 reg_name_p (char *str, aarch64_reg_type reg_type)
2433 {
2434 const reg_entry *reg;
2435
2436 /* Prevent the diagnostics state from being spoiled. */
2437 if (error_p ())
2438 return false;
2439
2440 reg = aarch64_reg_parse (&str, reg_type, NULL);
2441
2442 /* Clear the parsing error that may be set by the reg parser. */
2443 clear_error ();
2444
2445 if (!reg)
2446 return false;
2447
2448 skip_whitespace (str);
2449 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2450 return true;
2451
2452 return false;
2453 }
2454
2455 /* Parser functions used exclusively in instruction operands. */
2456
2457 /* Parse an immediate expression which may not be constant.
2458
2459 To prevent the expression parser from pushing a register name
2460 into the symbol table as an undefined symbol, firstly a check is
2461 done to find out whether STR is a register of type REG_TYPE followed
2462 by a comma or the end of line. Return FALSE if STR is such a string. */
2463
2464 static bool
2465 parse_immediate_expression (char **str, expressionS *exp,
2466 aarch64_reg_type reg_type)
2467 {
2468 if (reg_name_p (*str, reg_type))
2469 {
2470 set_recoverable_error (_("immediate operand required"));
2471 return false;
2472 }
2473
2474 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2475
2476 if (exp->X_op == O_absent)
2477 {
2478 set_fatal_syntax_error (_("missing immediate expression"));
2479 return false;
2480 }
2481
2482 return true;
2483 }
2484
2485 /* Constant immediate-value read function for use in insn parsing.
2486 STR points to the beginning of the immediate (with the optional
2487 leading #); *VAL receives the value. REG_TYPE says which register
2488 names should be treated as registers rather than as symbolic immediates.
2489
2490 Return TRUE on success; otherwise return FALSE. */
2491
2492 static bool
2493 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2494 {
2495 expressionS exp;
2496
2497 if (! parse_immediate_expression (str, &exp, reg_type))
2498 return false;
2499
2500 if (exp.X_op != O_constant)
2501 {
2502 set_syntax_error (_("constant expression required"));
2503 return false;
2504 }
2505
2506 *val = exp.X_add_number;
2507 return true;
2508 }
2509
2510 static uint32_t
2511 encode_imm_float_bits (uint32_t imm)
2512 {
2513 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2514 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2515 }
2516
2517 /* Return TRUE if the single-precision floating-point value encoded in IMM
2518 can be expressed in the AArch64 8-bit signed floating-point format with
2519 3-bit exponent and normalized 4 bits of precision; in other words, the
2520 floating-point value must be expressable as
2521 (+/-) n / 16 * power (2, r)
2522 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2523
2524 static bool
2525 aarch64_imm_float_p (uint32_t imm)
2526 {
2527 /* If a single-precision floating-point value has the following bit
2528 pattern, it can be expressed in the AArch64 8-bit floating-point
2529 format:
2530
2531 3 32222222 2221111111111
2532 1 09876543 21098765432109876543210
2533 n Eeeeeexx xxxx0000000000000000000
2534
2535 where n, e and each x are either 0 or 1 independently, with
2536 E == ~ e. */
2537
2538 uint32_t pattern;
2539
2540 /* Prepare the pattern for 'Eeeeee'. */
2541 if (((imm >> 30) & 0x1) == 0)
2542 pattern = 0x3e000000;
2543 else
2544 pattern = 0x40000000;
2545
2546 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2547 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2548 }
2549
2550 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2551 as an IEEE float without any loss of precision. Store the value in
2552 *FPWORD if so. */
2553
2554 static bool
2555 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2556 {
2557 /* If a double-precision floating-point value has the following bit
2558 pattern, it can be expressed in a float:
2559
2560 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2561 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2562 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2563
2564 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2565 if Eeee_eeee != 1111_1111
2566
2567 where n, e, s and S are either 0 or 1 independently and where ~ is the
2568 inverse of E. */
2569
2570 uint32_t pattern;
2571 uint32_t high32 = imm >> 32;
2572 uint32_t low32 = imm;
2573
2574 /* Lower 29 bits need to be 0s. */
2575 if ((imm & 0x1fffffff) != 0)
2576 return false;
2577
2578 /* Prepare the pattern for 'Eeeeeeeee'. */
2579 if (((high32 >> 30) & 0x1) == 0)
2580 pattern = 0x38000000;
2581 else
2582 pattern = 0x40000000;
2583
2584 /* Check E~~~. */
2585 if ((high32 & 0x78000000) != pattern)
2586 return false;
2587
2588 /* Check Eeee_eeee != 1111_1111. */
2589 if ((high32 & 0x7ff00000) == 0x47f00000)
2590 return false;
2591
2592 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2593 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2594 | (low32 >> 29)); /* 3 S bits. */
2595 return true;
2596 }
2597
2598 /* Return true if we should treat OPERAND as a double-precision
2599 floating-point operand rather than a single-precision one. */
2600 static bool
2601 double_precision_operand_p (const aarch64_opnd_info *operand)
2602 {
2603 /* Check for unsuffixed SVE registers, which are allowed
2604 for LDR and STR but not in instructions that require an
2605 immediate. We get better error messages if we arbitrarily
2606 pick one size, parse the immediate normally, and then
2607 report the match failure in the normal way. */
2608 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2609 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2610 }
2611
2612 /* Parse a floating-point immediate. Return TRUE on success and return the
2613 value in *IMMED in the format of IEEE754 single-precision encoding.
2614 *CCP points to the start of the string; DP_P is TRUE when the immediate
2615 is expected to be in double-precision (N.B. this only matters when
2616 hexadecimal representation is involved). REG_TYPE says which register
2617 names should be treated as registers rather than as symbolic immediates.
2618
2619 This routine accepts any IEEE float; it is up to the callers to reject
2620 invalid ones. */
2621
2622 static bool
2623 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2624 aarch64_reg_type reg_type)
2625 {
2626 char *str = *ccp;
2627 char *fpnum;
2628 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2629 int64_t val = 0;
2630 unsigned fpword = 0;
2631 bool hex_p = false;
2632
2633 skip_past_char (&str, '#');
2634
2635 fpnum = str;
2636 skip_whitespace (fpnum);
2637
2638 if (startswith (fpnum, "0x"))
2639 {
2640 /* Support the hexadecimal representation of the IEEE754 encoding.
2641 Double-precision is expected when DP_P is TRUE, otherwise the
2642 representation should be in single-precision. */
2643 if (! parse_constant_immediate (&str, &val, reg_type))
2644 goto invalid_fp;
2645
2646 if (dp_p)
2647 {
2648 if (!can_convert_double_to_float (val, &fpword))
2649 goto invalid_fp;
2650 }
2651 else if ((uint64_t) val > 0xffffffff)
2652 goto invalid_fp;
2653 else
2654 fpword = val;
2655
2656 hex_p = true;
2657 }
2658 else if (reg_name_p (str, reg_type))
2659 {
2660 set_recoverable_error (_("immediate operand required"));
2661 return false;
2662 }
2663
2664 if (! hex_p)
2665 {
2666 int i;
2667
2668 if ((str = atof_ieee (str, 's', words)) == NULL)
2669 goto invalid_fp;
2670
2671 /* Our FP word must be 32 bits (single-precision FP). */
2672 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2673 {
2674 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2675 fpword |= words[i];
2676 }
2677 }
2678
2679 *immed = fpword;
2680 *ccp = str;
2681 return true;
2682
2683 invalid_fp:
2684 set_fatal_syntax_error (_("invalid floating-point constant"));
2685 return false;
2686 }
2687
2688 /* Less-generic immediate-value read function with the possibility of loading
2689 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2690 instructions.
2691
2692 To prevent the expression parser from pushing a register name into the
2693 symbol table as an undefined symbol, a check is firstly done to find
2694 out whether STR is a register of type REG_TYPE followed by a comma or
2695 the end of line. Return FALSE if STR is such a register. */
2696
2697 static bool
2698 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2699 {
2700 char *ptr = *str;
2701
2702 if (reg_name_p (ptr, reg_type))
2703 {
2704 set_syntax_error (_("immediate operand required"));
2705 return false;
2706 }
2707
2708 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2709
2710 if (inst.reloc.exp.X_op == O_constant)
2711 *imm = inst.reloc.exp.X_add_number;
2712
2713 *str = ptr;
2714
2715 return true;
2716 }
2717
2718 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2719 if NEED_LIBOPCODES is non-zero, the fixup will need
2720 assistance from the libopcodes. */
2721
2722 static inline void
2723 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2724 const aarch64_opnd_info *operand,
2725 int need_libopcodes_p)
2726 {
2727 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2728 reloc->opnd = operand->type;
2729 if (need_libopcodes_p)
2730 reloc->need_libopcodes_p = 1;
2731 };
2732
2733 /* Return TRUE if the instruction needs to be fixed up later internally by
2734 the GAS; otherwise return FALSE. */
2735
2736 static inline bool
2737 aarch64_gas_internal_fixup_p (void)
2738 {
2739 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2740 }
2741
2742 /* Assign the immediate value to the relevant field in *OPERAND if
2743 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2744 needs an internal fixup in a later stage.
2745 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2746 IMM.VALUE that may get assigned with the constant. */
2747 static inline void
2748 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2749 aarch64_opnd_info *operand,
2750 int addr_off_p,
2751 int need_libopcodes_p,
2752 int skip_p)
2753 {
2754 if (reloc->exp.X_op == O_constant)
2755 {
2756 if (addr_off_p)
2757 operand->addr.offset.imm = reloc->exp.X_add_number;
2758 else
2759 operand->imm.value = reloc->exp.X_add_number;
2760 reloc->type = BFD_RELOC_UNUSED;
2761 }
2762 else
2763 {
2764 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2765 /* Tell libopcodes to ignore this operand or not. This is helpful
2766 when one of the operands needs to be fixed up later but we need
2767 libopcodes to check the other operands. */
2768 operand->skip = skip_p;
2769 }
2770 }
2771
2772 /* Relocation modifiers. Each entry in the table contains the textual
2773 name for the relocation which may be placed before a symbol used as
2774 a load/store offset, or add immediate. It must be surrounded by a
2775 leading and trailing colon, for example:
2776
2777 ldr x0, [x1, #:rello:varsym]
2778 add x0, x1, #:rello:varsym */
2779
2780 struct reloc_table_entry
2781 {
2782 const char *name;
2783 int pc_rel;
2784 bfd_reloc_code_real_type adr_type;
2785 bfd_reloc_code_real_type adrp_type;
2786 bfd_reloc_code_real_type movw_type;
2787 bfd_reloc_code_real_type add_type;
2788 bfd_reloc_code_real_type ldst_type;
2789 bfd_reloc_code_real_type ld_literal_type;
2790 };
2791
2792 static struct reloc_table_entry reloc_table[] =
2793 {
2794 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2795 {"lo12", 0,
2796 0, /* adr_type */
2797 0,
2798 0,
2799 BFD_RELOC_AARCH64_ADD_LO12,
2800 BFD_RELOC_AARCH64_LDST_LO12,
2801 0},
2802
2803 /* Higher 21 bits of pc-relative page offset: ADRP */
2804 {"pg_hi21", 1,
2805 0, /* adr_type */
2806 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2807 0,
2808 0,
2809 0,
2810 0},
2811
2812 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2813 {"pg_hi21_nc", 1,
2814 0, /* adr_type */
2815 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2816 0,
2817 0,
2818 0,
2819 0},
2820
2821 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2822 {"abs_g0", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_G0,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2831 {"abs_g0_s", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_G0_S,
2835 0,
2836 0,
2837 0},
2838
2839 /* Less significant bits 0-15 of address/value: MOVK, no check */
2840 {"abs_g0_nc", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_MOVW_G0_NC,
2844 0,
2845 0,
2846 0},
2847
2848 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2849 {"abs_g1", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_MOVW_G1,
2853 0,
2854 0,
2855 0},
2856
2857 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2858 {"abs_g1_s", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_G1_S,
2862 0,
2863 0,
2864 0},
2865
2866 /* Less significant bits 16-31 of address/value: MOVK, no check */
2867 {"abs_g1_nc", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_G1_NC,
2871 0,
2872 0,
2873 0},
2874
2875 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2876 {"abs_g2", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_MOVW_G2,
2880 0,
2881 0,
2882 0},
2883
2884 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2885 {"abs_g2_s", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_MOVW_G2_S,
2889 0,
2890 0,
2891 0},
2892
2893 /* Less significant bits 32-47 of address/value: MOVK, no check */
2894 {"abs_g2_nc", 0,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_MOVW_G2_NC,
2898 0,
2899 0,
2900 0},
2901
2902 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2903 {"abs_g3", 0,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_MOVW_G3,
2907 0,
2908 0,
2909 0},
2910
2911 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2912 {"prel_g0", 1,
2913 0, /* adr_type */
2914 0,
2915 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2916 0,
2917 0,
2918 0},
2919
2920 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2921 {"prel_g0_nc", 1,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2930 {"prel_g1", 1,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2939 {"prel_g1_nc", 1,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2948 {"prel_g2", 1,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2952 0,
2953 0,
2954 0},
2955
2956 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2957 {"prel_g2_nc", 1,
2958 0, /* adr_type */
2959 0,
2960 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2961 0,
2962 0,
2963 0},
2964
2965 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2966 {"prel_g3", 1,
2967 0, /* adr_type */
2968 0,
2969 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2970 0,
2971 0,
2972 0},
2973
2974 /* Get to the page containing GOT entry for a symbol. */
2975 {"got", 1,
2976 0, /* adr_type */
2977 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2978 0,
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2982
2983 /* 12 bit offset into the page containing GOT entry for that symbol. */
2984 {"got_lo12", 0,
2985 0, /* adr_type */
2986 0,
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2990 0},
2991
2992 /* 0-15 bits of address/value: MOVk, no check. */
2993 {"gotoff_g0_nc", 0,
2994 0, /* adr_type */
2995 0,
2996 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2997 0,
2998 0,
2999 0},
3000
3001 /* Most significant bits 16-31 of address/value: MOVZ. */
3002 {"gotoff_g1", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3006 0,
3007 0,
3008 0},
3009
3010 /* 15 bit offset into the page containing GOT entry for that symbol. */
3011 {"gotoff_lo15", 0,
3012 0, /* adr_type */
3013 0,
3014 0,
3015 0,
3016 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3017 0},
3018
3019 /* Get to the page containing GOT TLS entry for a symbol */
3020 {"gottprel_g0_nc", 0,
3021 0, /* adr_type */
3022 0,
3023 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3024 0,
3025 0,
3026 0},
3027
3028 /* Get to the page containing GOT TLS entry for a symbol */
3029 {"gottprel_g1", 0,
3030 0, /* adr_type */
3031 0,
3032 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3033 0,
3034 0,
3035 0},
3036
3037 /* Get to the page containing GOT TLS entry for a symbol */
3038 {"tlsgd", 0,
3039 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3040 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3041 0,
3042 0,
3043 0,
3044 0},
3045
3046 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3047 {"tlsgd_lo12", 0,
3048 0, /* adr_type */
3049 0,
3050 0,
3051 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3052 0,
3053 0},
3054
3055 /* Lower 16 bits address/value: MOVk. */
3056 {"tlsgd_g0_nc", 0,
3057 0, /* adr_type */
3058 0,
3059 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3060 0,
3061 0,
3062 0},
3063
3064 /* Most significant bits 16-31 of address/value: MOVZ. */
3065 {"tlsgd_g1", 0,
3066 0, /* adr_type */
3067 0,
3068 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3069 0,
3070 0,
3071 0},
3072
3073 /* Get to the page containing GOT TLS entry for a symbol */
3074 {"tlsdesc", 0,
3075 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3076 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3077 0,
3078 0,
3079 0,
3080 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3081
3082 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3083 {"tlsdesc_lo12", 0,
3084 0, /* adr_type */
3085 0,
3086 0,
3087 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3088 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3089 0},
3090
3091 /* Get to the page containing GOT TLS entry for a symbol.
3092 The same as GD, we allocate two consecutive GOT slots
3093 for module index and module offset, the only difference
3094 with GD is the module offset should be initialized to
3095 zero without any outstanding runtime relocation. */
3096 {"tlsldm", 0,
3097 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3098 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3099 0,
3100 0,
3101 0,
3102 0},
3103
3104 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3105 {"tlsldm_lo12_nc", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3110 0,
3111 0},
3112
3113 /* 12 bit offset into the module TLS base address. */
3114 {"dtprel_lo12", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3119 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3120 0},
3121
3122 /* Same as dtprel_lo12, no overflow check. */
3123 {"dtprel_lo12_nc", 0,
3124 0, /* adr_type */
3125 0,
3126 0,
3127 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3128 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3129 0},
3130
3131 /* bits[23:12] of offset to the module TLS base address. */
3132 {"dtprel_hi12", 0,
3133 0, /* adr_type */
3134 0,
3135 0,
3136 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3137 0,
3138 0},
3139
3140 /* bits[15:0] of offset to the module TLS base address. */
3141 {"dtprel_g0", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3145 0,
3146 0,
3147 0},
3148
3149 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3150 {"dtprel_g0_nc", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3154 0,
3155 0,
3156 0},
3157
3158 /* bits[31:16] of offset to the module TLS base address. */
3159 {"dtprel_g1", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3163 0,
3164 0,
3165 0},
3166
3167 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3168 {"dtprel_g1_nc", 0,
3169 0, /* adr_type */
3170 0,
3171 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3172 0,
3173 0,
3174 0},
3175
3176 /* bits[47:32] of offset to the module TLS base address. */
3177 {"dtprel_g2", 0,
3178 0, /* adr_type */
3179 0,
3180 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3181 0,
3182 0,
3183 0},
3184
3185 /* Lower 16 bit offset into GOT entry for a symbol */
3186 {"tlsdesc_off_g0_nc", 0,
3187 0, /* adr_type */
3188 0,
3189 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3190 0,
3191 0,
3192 0},
3193
3194 /* Higher 16 bit offset into GOT entry for a symbol */
3195 {"tlsdesc_off_g1", 0,
3196 0, /* adr_type */
3197 0,
3198 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3199 0,
3200 0,
3201 0},
3202
3203 /* Get to the page containing GOT TLS entry for a symbol */
3204 {"gottprel", 0,
3205 0, /* adr_type */
3206 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3207 0,
3208 0,
3209 0,
3210 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3211
3212 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3213 {"gottprel_lo12", 0,
3214 0, /* adr_type */
3215 0,
3216 0,
3217 0,
3218 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3219 0},
3220
3221 /* Get tp offset for a symbol. */
3222 {"tprel", 0,
3223 0, /* adr_type */
3224 0,
3225 0,
3226 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3227 0,
3228 0},
3229
3230 /* Get tp offset for a symbol. */
3231 {"tprel_lo12", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3236 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3237 0},
3238
3239 /* Get tp offset for a symbol. */
3240 {"tprel_hi12", 0,
3241 0, /* adr_type */
3242 0,
3243 0,
3244 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3245 0,
3246 0},
3247
3248 /* Get tp offset for a symbol. */
3249 {"tprel_lo12_nc", 0,
3250 0, /* adr_type */
3251 0,
3252 0,
3253 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3254 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3255 0},
3256
3257 /* Most significant bits 32-47 of address/value: MOVZ. */
3258 {"tprel_g2", 0,
3259 0, /* adr_type */
3260 0,
3261 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3262 0,
3263 0,
3264 0},
3265
3266 /* Most significant bits 16-31 of address/value: MOVZ. */
3267 {"tprel_g1", 0,
3268 0, /* adr_type */
3269 0,
3270 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3271 0,
3272 0,
3273 0},
3274
3275 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3276 {"tprel_g1_nc", 0,
3277 0, /* adr_type */
3278 0,
3279 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3280 0,
3281 0,
3282 0},
3283
3284 /* Most significant bits 0-15 of address/value: MOVZ. */
3285 {"tprel_g0", 0,
3286 0, /* adr_type */
3287 0,
3288 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3289 0,
3290 0,
3291 0},
3292
3293 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3294 {"tprel_g0_nc", 0,
3295 0, /* adr_type */
3296 0,
3297 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3298 0,
3299 0,
3300 0},
3301
3302 /* 15bit offset from got entry to base address of GOT table. */
3303 {"gotpage_lo15", 0,
3304 0,
3305 0,
3306 0,
3307 0,
3308 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3309 0},
3310
3311 /* 14bit offset from got entry to base address of GOT table. */
3312 {"gotpage_lo14", 0,
3313 0,
3314 0,
3315 0,
3316 0,
3317 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3318 0},
3319 };
3320
3321 /* Given the address of a pointer pointing to the textual name of a
3322 relocation as may appear in assembler source, attempt to find its
3323 details in reloc_table. The pointer will be updated to the character
3324 after the trailing colon. On failure, NULL will be returned;
3325 otherwise return the reloc_table_entry. */
3326
3327 static struct reloc_table_entry *
3328 find_reloc_table_entry (char **str)
3329 {
3330 unsigned int i;
3331 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3332 {
3333 int length = strlen (reloc_table[i].name);
3334
3335 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3336 && (*str)[length] == ':')
3337 {
3338 *str += (length + 1);
3339 return &reloc_table[i];
3340 }
3341 }
3342
3343 return NULL;
3344 }
3345
3346 /* Returns 0 if the relocation should never be forced,
3347 1 if the relocation must be forced, and -1 if either
3348 result is OK. */
3349
3350 static signed int
3351 aarch64_force_reloc (unsigned int type)
3352 {
3353 switch (type)
3354 {
3355 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3356 /* Perform these "immediate" internal relocations
3357 even if the symbol is extern or weak. */
3358 return 0;
3359
3360 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3361 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3362 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3363 /* Pseudo relocs that need to be fixed up according to
3364 ilp32_p. */
3365 return 1;
3366
3367 case BFD_RELOC_AARCH64_ADD_LO12:
3368 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3369 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3370 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3371 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3372 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3373 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3374 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3375 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3376 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3377 case BFD_RELOC_AARCH64_LDST128_LO12:
3378 case BFD_RELOC_AARCH64_LDST16_LO12:
3379 case BFD_RELOC_AARCH64_LDST32_LO12:
3380 case BFD_RELOC_AARCH64_LDST64_LO12:
3381 case BFD_RELOC_AARCH64_LDST8_LO12:
3382 case BFD_RELOC_AARCH64_LDST_LO12:
3383 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3384 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3385 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3386 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3387 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3388 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3389 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3390 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3391 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3392 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3393 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3394 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3395 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3396 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3397 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3399 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3400 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3401 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3402 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3403 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3404 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3405 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3406 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3407 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3408 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3409 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3410 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3411 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3412 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3413 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3414 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3415 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3416 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3417 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3418 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3419 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3420 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3421 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3422 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3423 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3424 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3425 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3426 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3427 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3428 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3429 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3430 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3431 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3432 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3433 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3434 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3435 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3436 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3437 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3438 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3439 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3440 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3441 /* Always leave these relocations for the linker. */
3442 return 1;
3443
3444 default:
3445 return -1;
3446 }
3447 }
3448
3449 int
3450 aarch64_force_relocation (struct fix *fixp)
3451 {
3452 int res = aarch64_force_reloc (fixp->fx_r_type);
3453
3454 if (res == -1)
3455 return generic_force_reloc (fixp);
3456 return res;
3457 }
3458
3459 /* Mode argument to parse_shift and parser_shifter_operand. */
3460 enum parse_shift_mode
3461 {
3462 SHIFTED_NONE, /* no shifter allowed */
3463 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3464 "#imm{,lsl #n}" */
3465 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3466 "#imm" */
3467 SHIFTED_LSL, /* bare "lsl #n" */
3468 SHIFTED_MUL, /* bare "mul #n" */
3469 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3470 SHIFTED_MUL_VL, /* "mul vl" */
3471 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3472 };
3473
3474 /* Parse a <shift> operator on an AArch64 data processing instruction.
3475 Return TRUE on success; otherwise return FALSE. */
3476 static bool
3477 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3478 {
3479 const struct aarch64_name_value_pair *shift_op;
3480 enum aarch64_modifier_kind kind;
3481 expressionS exp;
3482 int exp_has_prefix;
3483 char *s = *str;
3484 char *p = s;
3485
3486 for (p = *str; ISALPHA (*p); p++)
3487 ;
3488
3489 if (p == *str)
3490 {
3491 set_syntax_error (_("shift expression expected"));
3492 return false;
3493 }
3494
3495 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3496
3497 if (shift_op == NULL)
3498 {
3499 set_syntax_error (_("shift operator expected"));
3500 return false;
3501 }
3502
3503 kind = aarch64_get_operand_modifier (shift_op);
3504
3505 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3506 {
3507 set_syntax_error (_("invalid use of 'MSL'"));
3508 return false;
3509 }
3510
3511 if (kind == AARCH64_MOD_MUL
3512 && mode != SHIFTED_MUL
3513 && mode != SHIFTED_MUL_VL)
3514 {
3515 set_syntax_error (_("invalid use of 'MUL'"));
3516 return false;
3517 }
3518
3519 switch (mode)
3520 {
3521 case SHIFTED_LOGIC_IMM:
3522 if (aarch64_extend_operator_p (kind))
3523 {
3524 set_syntax_error (_("extending shift is not permitted"));
3525 return false;
3526 }
3527 break;
3528
3529 case SHIFTED_ARITH_IMM:
3530 if (kind == AARCH64_MOD_ROR)
3531 {
3532 set_syntax_error (_("'ROR' shift is not permitted"));
3533 return false;
3534 }
3535 break;
3536
3537 case SHIFTED_LSL:
3538 if (kind != AARCH64_MOD_LSL)
3539 {
3540 set_syntax_error (_("only 'LSL' shift is permitted"));
3541 return false;
3542 }
3543 break;
3544
3545 case SHIFTED_MUL:
3546 if (kind != AARCH64_MOD_MUL)
3547 {
3548 set_syntax_error (_("only 'MUL' is permitted"));
3549 return false;
3550 }
3551 break;
3552
3553 case SHIFTED_MUL_VL:
3554 /* "MUL VL" consists of two separate tokens. Require the first
3555 token to be "MUL" and look for a following "VL". */
3556 if (kind == AARCH64_MOD_MUL)
3557 {
3558 skip_whitespace (p);
3559 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3560 {
3561 p += 2;
3562 kind = AARCH64_MOD_MUL_VL;
3563 break;
3564 }
3565 }
3566 set_syntax_error (_("only 'MUL VL' is permitted"));
3567 return false;
3568
3569 case SHIFTED_REG_OFFSET:
3570 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3571 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3572 {
3573 set_fatal_syntax_error
3574 (_("invalid shift for the register offset addressing mode"));
3575 return false;
3576 }
3577 break;
3578
3579 case SHIFTED_LSL_MSL:
3580 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3581 {
3582 set_syntax_error (_("invalid shift operator"));
3583 return false;
3584 }
3585 break;
3586
3587 default:
3588 abort ();
3589 }
3590
3591 /* Whitespace can appear here if the next thing is a bare digit. */
3592 skip_whitespace (p);
3593
3594 /* Parse shift amount. */
3595 exp_has_prefix = 0;
3596 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3597 exp.X_op = O_absent;
3598 else
3599 {
3600 if (is_immediate_prefix (*p))
3601 {
3602 p++;
3603 exp_has_prefix = 1;
3604 }
3605 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3606 }
3607 if (kind == AARCH64_MOD_MUL_VL)
3608 /* For consistency, give MUL VL the same shift amount as an implicit
3609 MUL #1. */
3610 operand->shifter.amount = 1;
3611 else if (exp.X_op == O_absent)
3612 {
3613 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3614 {
3615 set_syntax_error (_("missing shift amount"));
3616 return false;
3617 }
3618 operand->shifter.amount = 0;
3619 }
3620 else if (exp.X_op != O_constant)
3621 {
3622 set_syntax_error (_("constant shift amount required"));
3623 return false;
3624 }
3625 /* For parsing purposes, MUL #n has no inherent range. The range
3626 depends on the operand and will be checked by operand-specific
3627 routines. */
3628 else if (kind != AARCH64_MOD_MUL
3629 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3630 {
3631 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3632 return false;
3633 }
3634 else
3635 {
3636 operand->shifter.amount = exp.X_add_number;
3637 operand->shifter.amount_present = 1;
3638 }
3639
3640 operand->shifter.operator_present = 1;
3641 operand->shifter.kind = kind;
3642
3643 *str = p;
3644 return true;
3645 }
3646
3647 /* Parse a <shifter_operand> for a data processing instruction:
3648
3649 #<immediate>
3650 #<immediate>, LSL #imm
3651
3652 Validation of immediate operands is deferred to md_apply_fix.
3653
3654 Return TRUE on success; otherwise return FALSE. */
3655
3656 static bool
3657 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3658 enum parse_shift_mode mode)
3659 {
3660 char *p;
3661
3662 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3663 return false;
3664
3665 p = *str;
3666
3667 /* Accept an immediate expression. */
3668 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3669 REJECT_ABSENT))
3670 return false;
3671
3672 /* Accept optional LSL for arithmetic immediate values. */
3673 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3674 if (! parse_shift (&p, operand, SHIFTED_LSL))
3675 return false;
3676
3677 /* Not accept any shifter for logical immediate values. */
3678 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3679 && parse_shift (&p, operand, mode))
3680 {
3681 set_syntax_error (_("unexpected shift operator"));
3682 return false;
3683 }
3684
3685 *str = p;
3686 return true;
3687 }
3688
3689 /* Parse a <shifter_operand> for a data processing instruction:
3690
3691 <Rm>
3692 <Rm>, <shift>
3693 #<immediate>
3694 #<immediate>, LSL #imm
3695
3696 where <shift> is handled by parse_shift above, and the last two
3697 cases are handled by the function above.
3698
3699 Validation of immediate operands is deferred to md_apply_fix.
3700
3701 Return TRUE on success; otherwise return FALSE. */
3702
3703 static bool
3704 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3705 enum parse_shift_mode mode)
3706 {
3707 const reg_entry *reg;
3708 aarch64_opnd_qualifier_t qualifier;
3709 enum aarch64_operand_class opd_class
3710 = aarch64_get_operand_class (operand->type);
3711
3712 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3713 if (reg)
3714 {
3715 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3716 {
3717 set_syntax_error (_("unexpected register in the immediate operand"));
3718 return false;
3719 }
3720
3721 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3722 {
3723 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3724 return false;
3725 }
3726
3727 operand->reg.regno = reg->number;
3728 operand->qualifier = qualifier;
3729
3730 /* Accept optional shift operation on register. */
3731 if (! skip_past_comma (str))
3732 return true;
3733
3734 if (! parse_shift (str, operand, mode))
3735 return false;
3736
3737 return true;
3738 }
3739 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3740 {
3741 set_syntax_error
3742 (_("integer register expected in the extended/shifted operand "
3743 "register"));
3744 return false;
3745 }
3746
3747 /* We have a shifted immediate variable. */
3748 return parse_shifter_operand_imm (str, operand, mode);
3749 }
3750
3751 /* Return TRUE on success; return FALSE otherwise. */
3752
3753 static bool
3754 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3755 enum parse_shift_mode mode)
3756 {
3757 char *p = *str;
3758
3759 /* Determine if we have the sequence of characters #: or just :
3760 coming next. If we do, then we check for a :rello: relocation
3761 modifier. If we don't, punt the whole lot to
3762 parse_shifter_operand. */
3763
3764 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3765 {
3766 struct reloc_table_entry *entry;
3767
3768 if (p[0] == '#')
3769 p += 2;
3770 else
3771 p++;
3772 *str = p;
3773
3774 /* Try to parse a relocation. Anything else is an error. */
3775 if (!(entry = find_reloc_table_entry (str)))
3776 {
3777 set_syntax_error (_("unknown relocation modifier"));
3778 return false;
3779 }
3780
3781 if (entry->add_type == 0)
3782 {
3783 set_syntax_error
3784 (_("this relocation modifier is not allowed on this instruction"));
3785 return false;
3786 }
3787
3788 /* Save str before we decompose it. */
3789 p = *str;
3790
3791 /* Next, we parse the expression. */
3792 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3793 REJECT_ABSENT))
3794 return false;
3795
3796 /* Record the relocation type (use the ADD variant here). */
3797 inst.reloc.type = entry->add_type;
3798 inst.reloc.pc_rel = entry->pc_rel;
3799
3800 /* If str is empty, we've reached the end, stop here. */
3801 if (**str == '\0')
3802 return true;
3803
3804 /* Otherwise, we have a shifted reloc modifier, so rewind to
3805 recover the variable name and continue parsing for the shifter. */
3806 *str = p;
3807 return parse_shifter_operand_imm (str, operand, mode);
3808 }
3809
3810 return parse_shifter_operand (str, operand, mode);
3811 }
3812
3813 /* Parse all forms of an address expression. Information is written
3814 to *OPERAND and/or inst.reloc.
3815
3816 The A64 instruction set has the following addressing modes:
3817
3818 Offset
3819 [base] // in SIMD ld/st structure
3820 [base{,#0}] // in ld/st exclusive
3821 [base{,#imm}]
3822 [base,Xm{,LSL #imm}]
3823 [base,Xm,SXTX {#imm}]
3824 [base,Wm,(S|U)XTW {#imm}]
3825 Pre-indexed
3826 [base]! // in ldraa/ldrab exclusive
3827 [base,#imm]!
3828 Post-indexed
3829 [base],#imm
3830 [base],Xm // in SIMD ld/st structure
3831 PC-relative (literal)
3832 label
3833 SVE:
3834 [base,#imm,MUL VL]
3835 [base,Zm.D{,LSL #imm}]
3836 [base,Zm.S,(S|U)XTW {#imm}]
3837 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3838 [Zn.S,#imm]
3839 [Zn.D,#imm]
3840 [Zn.S{, Xm}]
3841 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3842 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3843 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3844
3845 (As a convenience, the notation "=immediate" is permitted in conjunction
3846 with the pc-relative literal load instructions to automatically place an
3847 immediate value or symbolic address in a nearby literal pool and generate
3848 a hidden label which references it.)
3849
3850 Upon a successful parsing, the address structure in *OPERAND will be
3851 filled in the following way:
3852
3853 .base_regno = <base>
3854 .offset.is_reg // 1 if the offset is a register
3855 .offset.imm = <imm>
3856 .offset.regno = <Rm>
3857
3858 For different addressing modes defined in the A64 ISA:
3859
3860 Offset
3861 .pcrel=0; .preind=1; .postind=0; .writeback=0
3862 Pre-indexed
3863 .pcrel=0; .preind=1; .postind=0; .writeback=1
3864 Post-indexed
3865 .pcrel=0; .preind=0; .postind=1; .writeback=1
3866 PC-relative (literal)
3867 .pcrel=1; .preind=1; .postind=0; .writeback=0
3868
3869 The shift/extension information, if any, will be stored in .shifter.
3870 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3871 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3872 corresponding register.
3873
3874 BASE_TYPE says which types of base register should be accepted and
3875 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3876 is the type of shifter that is allowed for immediate offsets,
3877 or SHIFTED_NONE if none.
3878
3879 In all other respects, it is the caller's responsibility to check
3880 for addressing modes not supported by the instruction, and to set
3881 inst.reloc.type. */
3882
3883 static bool
3884 parse_address_main (char **str, aarch64_opnd_info *operand,
3885 aarch64_opnd_qualifier_t *base_qualifier,
3886 aarch64_opnd_qualifier_t *offset_qualifier,
3887 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3888 enum parse_shift_mode imm_shift_mode)
3889 {
3890 char *p = *str;
3891 const reg_entry *reg;
3892 expressionS *exp = &inst.reloc.exp;
3893
3894 *base_qualifier = AARCH64_OPND_QLF_NIL;
3895 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3896 if (! skip_past_char (&p, '['))
3897 {
3898 /* =immediate or label. */
3899 operand->addr.pcrel = 1;
3900 operand->addr.preind = 1;
3901
3902 /* #:<reloc_op>:<symbol> */
3903 skip_past_char (&p, '#');
3904 if (skip_past_char (&p, ':'))
3905 {
3906 bfd_reloc_code_real_type ty;
3907 struct reloc_table_entry *entry;
3908
3909 /* Try to parse a relocation modifier. Anything else is
3910 an error. */
3911 entry = find_reloc_table_entry (&p);
3912 if (! entry)
3913 {
3914 set_syntax_error (_("unknown relocation modifier"));
3915 return false;
3916 }
3917
3918 switch (operand->type)
3919 {
3920 case AARCH64_OPND_ADDR_PCREL21:
3921 /* adr */
3922 ty = entry->adr_type;
3923 break;
3924
3925 default:
3926 ty = entry->ld_literal_type;
3927 break;
3928 }
3929
3930 if (ty == 0)
3931 {
3932 set_syntax_error
3933 (_("this relocation modifier is not allowed on this "
3934 "instruction"));
3935 return false;
3936 }
3937
3938 /* #:<reloc_op>: */
3939 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3940 {
3941 set_syntax_error (_("invalid relocation expression"));
3942 return false;
3943 }
3944 /* #:<reloc_op>:<expr> */
3945 /* Record the relocation type. */
3946 inst.reloc.type = ty;
3947 inst.reloc.pc_rel = entry->pc_rel;
3948 }
3949 else
3950 {
3951 if (skip_past_char (&p, '='))
3952 /* =immediate; need to generate the literal in the literal pool. */
3953 inst.gen_lit_pool = 1;
3954
3955 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3956 {
3957 set_syntax_error (_("invalid address"));
3958 return false;
3959 }
3960 }
3961
3962 *str = p;
3963 return true;
3964 }
3965
3966 /* [ */
3967
3968 bool alpha_base_p = ISALPHA (*p);
3969 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3970 if (!reg || !aarch64_check_reg_type (reg, base_type))
3971 {
3972 if (reg
3973 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3974 && *base_qualifier == AARCH64_OPND_QLF_W)
3975 set_syntax_error (_("expected a 64-bit base register"));
3976 else if (alpha_base_p)
3977 set_syntax_error (_("invalid base register"));
3978 else
3979 set_syntax_error (_("expected a base register"));
3980 return false;
3981 }
3982 operand->addr.base_regno = reg->number;
3983
3984 /* [Xn */
3985 if (skip_past_comma (&p))
3986 {
3987 /* [Xn, */
3988 operand->addr.preind = 1;
3989
3990 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3991 if (reg)
3992 {
3993 if (!aarch64_check_reg_type (reg, offset_type))
3994 {
3995 set_syntax_error (_("invalid offset register"));
3996 return false;
3997 }
3998
3999 /* [Xn,Rm */
4000 operand->addr.offset.regno = reg->number;
4001 operand->addr.offset.is_reg = 1;
4002 /* Shifted index. */
4003 if (skip_past_comma (&p))
4004 {
4005 /* [Xn,Rm, */
4006 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4007 /* Use the diagnostics set in parse_shift, so not set new
4008 error message here. */
4009 return false;
4010 }
4011 /* We only accept:
4012 [base,Xm] # For vector plus scalar SVE2 indexing.
4013 [base,Xm{,LSL #imm}]
4014 [base,Xm,SXTX {#imm}]
4015 [base,Wm,(S|U)XTW {#imm}] */
4016 if (operand->shifter.kind == AARCH64_MOD_NONE
4017 || operand->shifter.kind == AARCH64_MOD_LSL
4018 || operand->shifter.kind == AARCH64_MOD_SXTX)
4019 {
4020 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4021 {
4022 set_syntax_error (_("invalid use of 32-bit register offset"));
4023 return false;
4024 }
4025 if (aarch64_get_qualifier_esize (*base_qualifier)
4026 != aarch64_get_qualifier_esize (*offset_qualifier)
4027 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4028 || *base_qualifier != AARCH64_OPND_QLF_S_S
4029 || *offset_qualifier != AARCH64_OPND_QLF_X))
4030 {
4031 set_syntax_error (_("offset has different size from base"));
4032 return false;
4033 }
4034 }
4035 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4036 {
4037 set_syntax_error (_("invalid use of 64-bit register offset"));
4038 return false;
4039 }
4040 }
4041 else
4042 {
4043 /* [Xn,#:<reloc_op>:<symbol> */
4044 skip_past_char (&p, '#');
4045 if (skip_past_char (&p, ':'))
4046 {
4047 struct reloc_table_entry *entry;
4048
4049 /* Try to parse a relocation modifier. Anything else is
4050 an error. */
4051 if (!(entry = find_reloc_table_entry (&p)))
4052 {
4053 set_syntax_error (_("unknown relocation modifier"));
4054 return false;
4055 }
4056
4057 if (entry->ldst_type == 0)
4058 {
4059 set_syntax_error
4060 (_("this relocation modifier is not allowed on this "
4061 "instruction"));
4062 return false;
4063 }
4064
4065 /* [Xn,#:<reloc_op>: */
4066 /* We now have the group relocation table entry corresponding to
4067 the name in the assembler source. Next, we parse the
4068 expression. */
4069 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4070 {
4071 set_syntax_error (_("invalid relocation expression"));
4072 return false;
4073 }
4074
4075 /* [Xn,#:<reloc_op>:<expr> */
4076 /* Record the load/store relocation type. */
4077 inst.reloc.type = entry->ldst_type;
4078 inst.reloc.pc_rel = entry->pc_rel;
4079 }
4080 else
4081 {
4082 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4083 {
4084 set_syntax_error (_("invalid expression in the address"));
4085 return false;
4086 }
4087 /* [Xn,<expr> */
4088 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4089 /* [Xn,<expr>,<shifter> */
4090 if (! parse_shift (&p, operand, imm_shift_mode))
4091 return false;
4092 }
4093 }
4094 }
4095
4096 if (! skip_past_char (&p, ']'))
4097 {
4098 set_syntax_error (_("']' expected"));
4099 return false;
4100 }
4101
4102 if (skip_past_char (&p, '!'))
4103 {
4104 if (operand->addr.preind && operand->addr.offset.is_reg)
4105 {
4106 set_syntax_error (_("register offset not allowed in pre-indexed "
4107 "addressing mode"));
4108 return false;
4109 }
4110 /* [Xn]! */
4111 operand->addr.writeback = 1;
4112 }
4113 else if (skip_past_comma (&p))
4114 {
4115 /* [Xn], */
4116 operand->addr.postind = 1;
4117 operand->addr.writeback = 1;
4118
4119 if (operand->addr.preind)
4120 {
4121 set_syntax_error (_("cannot combine pre- and post-indexing"));
4122 return false;
4123 }
4124
4125 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4126 if (reg)
4127 {
4128 /* [Xn],Xm */
4129 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4130 {
4131 set_syntax_error (_("invalid offset register"));
4132 return false;
4133 }
4134
4135 operand->addr.offset.regno = reg->number;
4136 operand->addr.offset.is_reg = 1;
4137 }
4138 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4139 {
4140 /* [Xn],#expr */
4141 set_syntax_error (_("invalid expression in the address"));
4142 return false;
4143 }
4144 }
4145
4146 /* If at this point neither .preind nor .postind is set, we have a
4147 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4148 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4149 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4150 [Zn.<T>, xzr]. */
4151 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4152 {
4153 if (operand->addr.writeback)
4154 {
4155 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4156 {
4157 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4158 operand->addr.offset.is_reg = 0;
4159 operand->addr.offset.imm = 0;
4160 operand->addr.preind = 1;
4161 }
4162 else
4163 {
4164 /* Reject [Rn]! */
4165 set_syntax_error (_("missing offset in the pre-indexed address"));
4166 return false;
4167 }
4168 }
4169 else
4170 {
4171 operand->addr.preind = 1;
4172 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4173 {
4174 operand->addr.offset.is_reg = 1;
4175 operand->addr.offset.regno = REG_ZR;
4176 *offset_qualifier = AARCH64_OPND_QLF_X;
4177 }
4178 else
4179 {
4180 inst.reloc.exp.X_op = O_constant;
4181 inst.reloc.exp.X_add_number = 0;
4182 }
4183 }
4184 }
4185
4186 *str = p;
4187 return true;
4188 }
4189
4190 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4191 on success. */
4192 static bool
4193 parse_address (char **str, aarch64_opnd_info *operand)
4194 {
4195 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4196 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4197 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4198 }
4199
4200 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4201 The arguments have the same meaning as for parse_address_main.
4202 Return TRUE on success. */
4203 static bool
4204 parse_sve_address (char **str, aarch64_opnd_info *operand,
4205 aarch64_opnd_qualifier_t *base_qualifier,
4206 aarch64_opnd_qualifier_t *offset_qualifier)
4207 {
4208 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4209 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4210 SHIFTED_MUL_VL);
4211 }
4212
4213 /* Parse a register X0-X30. The register must be 64-bit and register 31
4214 is unallocated. */
4215 static bool
4216 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4217 {
4218 const reg_entry *reg = parse_reg (str);
4219 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4220 {
4221 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4222 return false;
4223 }
4224 operand->reg.regno = reg->number;
4225 operand->qualifier = AARCH64_OPND_QLF_X;
4226 return true;
4227 }
4228
4229 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4230 Return TRUE on success; otherwise return FALSE. */
4231 static bool
4232 parse_half (char **str, int *internal_fixup_p)
4233 {
4234 char *p = *str;
4235
4236 skip_past_char (&p, '#');
4237
4238 gas_assert (internal_fixup_p);
4239 *internal_fixup_p = 0;
4240
4241 if (*p == ':')
4242 {
4243 struct reloc_table_entry *entry;
4244
4245 /* Try to parse a relocation. Anything else is an error. */
4246 ++p;
4247
4248 if (!(entry = find_reloc_table_entry (&p)))
4249 {
4250 set_syntax_error (_("unknown relocation modifier"));
4251 return false;
4252 }
4253
4254 if (entry->movw_type == 0)
4255 {
4256 set_syntax_error
4257 (_("this relocation modifier is not allowed on this instruction"));
4258 return false;
4259 }
4260
4261 inst.reloc.type = entry->movw_type;
4262 }
4263 else
4264 *internal_fixup_p = 1;
4265
4266 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4267 return false;
4268
4269 *str = p;
4270 return true;
4271 }
4272
4273 /* Parse an operand for an ADRP instruction:
4274 ADRP <Xd>, <label>
4275 Return TRUE on success; otherwise return FALSE. */
4276
4277 static bool
4278 parse_adrp (char **str)
4279 {
4280 char *p;
4281
4282 p = *str;
4283 if (*p == ':')
4284 {
4285 struct reloc_table_entry *entry;
4286
4287 /* Try to parse a relocation. Anything else is an error. */
4288 ++p;
4289 if (!(entry = find_reloc_table_entry (&p)))
4290 {
4291 set_syntax_error (_("unknown relocation modifier"));
4292 return false;
4293 }
4294
4295 if (entry->adrp_type == 0)
4296 {
4297 set_syntax_error
4298 (_("this relocation modifier is not allowed on this instruction"));
4299 return false;
4300 }
4301
4302 inst.reloc.type = entry->adrp_type;
4303 }
4304 else
4305 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4306
4307 inst.reloc.pc_rel = 1;
4308 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4309 return false;
4310 *str = p;
4311 return true;
4312 }
4313
4314 /* Miscellaneous. */
4315
4316 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4317 of SIZE tokens in which index I gives the token for field value I,
4318 or is null if field value I is invalid. If the symbolic operand
4319 can also be given as a 0-based integer, REG_TYPE says which register
4320 names should be treated as registers rather than as symbolic immediates
4321 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4322
4323 Return true on success, moving *STR past the operand and storing the
4324 field value in *VAL. */
4325
4326 static int
4327 parse_enum_string (char **str, int64_t *val, const char *const *array,
4328 size_t size, aarch64_reg_type reg_type)
4329 {
4330 expressionS exp;
4331 char *p, *q;
4332 size_t i;
4333
4334 /* Match C-like tokens. */
4335 p = q = *str;
4336 while (ISALNUM (*q))
4337 q++;
4338
4339 for (i = 0; i < size; ++i)
4340 if (array[i]
4341 && strncasecmp (array[i], p, q - p) == 0
4342 && array[i][q - p] == 0)
4343 {
4344 *val = i;
4345 *str = q;
4346 return true;
4347 }
4348
4349 if (reg_type == REG_TYPE_MAX)
4350 return false;
4351
4352 if (!parse_immediate_expression (&p, &exp, reg_type))
4353 return false;
4354
4355 if (exp.X_op == O_constant
4356 && (uint64_t) exp.X_add_number < size)
4357 {
4358 *val = exp.X_add_number;
4359 *str = p;
4360 return true;
4361 }
4362
4363 /* Use the default error for this operand. */
4364 return false;
4365 }
4366
4367 /* Parse an option for a preload instruction. Returns the encoding for the
4368 option, or PARSE_FAIL. */
4369
4370 static int
4371 parse_pldop (char **str)
4372 {
4373 char *p, *q;
4374 const struct aarch64_name_value_pair *o;
4375
4376 p = q = *str;
4377 while (ISALNUM (*q))
4378 q++;
4379
4380 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4381 if (!o)
4382 return PARSE_FAIL;
4383
4384 *str = q;
4385 return o->value;
4386 }
4387
4388 /* Parse an option for a barrier instruction. Returns the encoding for the
4389 option, or PARSE_FAIL. */
4390
4391 static int
4392 parse_barrier (char **str)
4393 {
4394 char *p, *q;
4395 const struct aarch64_name_value_pair *o;
4396
4397 p = q = *str;
4398 while (ISALPHA (*q))
4399 q++;
4400
4401 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4402 if (!o)
4403 return PARSE_FAIL;
4404
4405 *str = q;
4406 return o->value;
4407 }
4408
4409 /* Parse an option for barrier, bti and guarded control stack data
4410 synchronization instructions. Return true on matching the target
4411 options else return false. */
4412
4413 static bool
4414 parse_hint_opt (const char *name, char **str,
4415 const struct aarch64_name_value_pair ** hint_opt)
4416 {
4417 char *p, *q;
4418 const struct aarch64_name_value_pair *o;
4419
4420 p = q = *str;
4421 while (ISALPHA (*q))
4422 q++;
4423
4424 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4425 if (!o)
4426 return false;
4427
4428 if ((strcmp ("gcsb", name) == 0 && o->value != HINT_OPD_DSYNC)
4429 || ((strcmp ("psb", name) == 0 || strcmp ("tsb", name) == 0)
4430 && o->value != HINT_OPD_CSYNC)
4431 || ((strcmp ("bti", name) == 0)
4432 && (o->value != HINT_OPD_C && o->value != HINT_OPD_J
4433 && o->value != HINT_OPD_JC)))
4434 return false;
4435
4436 *str = q;
4437 *hint_opt = o;
4438 return true;
4439 }
4440
4441 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4442 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4443 on failure. Format:
4444
4445 REG_TYPE.QUALIFIER
4446
4447 Side effect: Update STR with current parse position of success.
4448
4449 FLAGS is as for parse_typed_reg. */
4450
4451 static const reg_entry *
4452 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4453 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4454 {
4455 struct vector_type_el vectype;
4456 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4457 PTR_FULL_REG | flags);
4458 if (!reg)
4459 return NULL;
4460
4461 if (vectype.type == NT_invtype)
4462 *qualifier = AARCH64_OPND_QLF_NIL;
4463 else
4464 {
4465 *qualifier = vectype_to_qualifier (&vectype);
4466 if (*qualifier == AARCH64_OPND_QLF_NIL)
4467 return NULL;
4468 }
4469
4470 return reg;
4471 }
4472
4473 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4474
4475 #<imm>
4476 <imm>
4477
4478 Function return TRUE if immediate was found, or FALSE.
4479 */
4480 static bool
4481 parse_sme_immediate (char **str, int64_t *imm)
4482 {
4483 int64_t val;
4484 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4485 return false;
4486
4487 *imm = val;
4488 return true;
4489 }
4490
4491 /* Parse index with selection register and immediate offset:
4492
4493 [<Wv>, <imm>]
4494 [<Wv>, #<imm>]
4495 [<Ws>, <offsf>:<offsl>]
4496
4497 Return true on success, populating OPND with the parsed index. */
4498
4499 static bool
4500 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4501 {
4502 const reg_entry *reg;
4503
4504 if (!skip_past_char (str, '['))
4505 {
4506 set_syntax_error (_("expected '['"));
4507 return false;
4508 }
4509
4510 /* The selection register, encoded in the 2-bit Rv field. */
4511 reg = parse_reg (str);
4512 if (reg == NULL || reg->type != REG_TYPE_R_32)
4513 {
4514 set_syntax_error (_("expected a 32-bit selection register"));
4515 return false;
4516 }
4517 opnd->index.regno = reg->number;
4518
4519 if (!skip_past_char (str, ','))
4520 {
4521 set_syntax_error (_("missing immediate offset"));
4522 return false;
4523 }
4524
4525 if (!parse_sme_immediate (str, &opnd->index.imm))
4526 {
4527 set_syntax_error (_("expected a constant immediate offset"));
4528 return false;
4529 }
4530
4531 if (skip_past_char (str, ':'))
4532 {
4533 int64_t end;
4534 if (!parse_sme_immediate (str, &end))
4535 {
4536 set_syntax_error (_("expected a constant immediate offset"));
4537 return false;
4538 }
4539 if (end < opnd->index.imm)
4540 {
4541 set_syntax_error (_("the last offset is less than the"
4542 " first offset"));
4543 return false;
4544 }
4545 if (end == opnd->index.imm)
4546 {
4547 set_syntax_error (_("the last offset is equal to the"
4548 " first offset"));
4549 return false;
4550 }
4551 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4552 }
4553
4554 opnd->group_size = 0;
4555 if (skip_past_char (str, ','))
4556 {
4557 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4558 {
4559 *str += 4;
4560 opnd->group_size = 2;
4561 }
4562 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4563 {
4564 *str += 4;
4565 opnd->group_size = 4;
4566 }
4567 else
4568 {
4569 set_syntax_error (_("invalid vector group size"));
4570 return false;
4571 }
4572 }
4573
4574 if (!skip_past_char (str, ']'))
4575 {
4576 set_syntax_error (_("expected ']'"));
4577 return false;
4578 }
4579
4580 return true;
4581 }
4582
4583 /* Parse a register of type REG_TYPE that might have an element type
4584 qualifier and that is indexed by two values: a 32-bit register,
4585 followed by an immediate. The ranges of the register and the
4586 immediate vary by opcode and are checked in libopcodes.
4587
4588 Return true on success, populating OPND with information about
4589 the operand and setting QUALIFIER to the register qualifier.
4590
4591 Field format examples:
4592
4593 <Pm>.<T>[<Wv>< #<imm>]
4594 ZA[<Wv>, #<imm>]
4595 <ZAn><HV>.<T>[<Wv>, #<imm>]
4596 <ZAn><HV>.<T>[<Ws>, <offsf>:<offsl>]
4597
4598 FLAGS is as for parse_typed_reg. */
4599
4600 static bool
4601 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4602 struct aarch64_indexed_za *opnd,
4603 aarch64_opnd_qualifier_t *qualifier,
4604 unsigned int flags)
4605 {
4606 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4607 if (!reg)
4608 return false;
4609
4610 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4611 opnd->regno = reg->number;
4612
4613 return parse_sme_za_index (str, opnd);
4614 }
4615
4616 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4617 operand. */
4618
4619 static bool
4620 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4621 struct aarch64_indexed_za *opnd,
4622 aarch64_opnd_qualifier_t *qualifier)
4623 {
4624 if (!skip_past_char (str, '{'))
4625 {
4626 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4627 return false;
4628 }
4629
4630 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4631 PTR_IN_REGLIST))
4632 return false;
4633
4634 if (!skip_past_char (str, '}'))
4635 {
4636 set_syntax_error (_("expected '}'"));
4637 return false;
4638 }
4639
4640 return true;
4641 }
4642
4643 /* Parse list of up to eight 64-bit element tile names separated by commas in
4644 SME's ZERO instruction:
4645
4646 ZERO { <mask> }
4647
4648 Function returns <mask>:
4649
4650 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4651 */
4652 static int
4653 parse_sme_zero_mask(char **str)
4654 {
4655 char *q;
4656 int mask;
4657 aarch64_opnd_qualifier_t qualifier;
4658 unsigned int ptr_flags = PTR_IN_REGLIST;
4659
4660 mask = 0x00;
4661 q = *str;
4662 do
4663 {
4664 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4665 &qualifier, ptr_flags);
4666 if (!reg)
4667 return PARSE_FAIL;
4668
4669 if (reg->type == REG_TYPE_ZA)
4670 {
4671 if (qualifier != AARCH64_OPND_QLF_NIL)
4672 {
4673 set_syntax_error ("ZA should not have a size suffix");
4674 return PARSE_FAIL;
4675 }
4676 /* { ZA } is assembled as all-ones immediate. */
4677 mask = 0xff;
4678 }
4679 else
4680 {
4681 int regno = reg->number;
4682 if (qualifier == AARCH64_OPND_QLF_S_B)
4683 {
4684 /* { ZA0.B } is assembled as all-ones immediate. */
4685 mask = 0xff;
4686 }
4687 else if (qualifier == AARCH64_OPND_QLF_S_H)
4688 mask |= 0x55 << regno;
4689 else if (qualifier == AARCH64_OPND_QLF_S_S)
4690 mask |= 0x11 << regno;
4691 else if (qualifier == AARCH64_OPND_QLF_S_D)
4692 mask |= 0x01 << regno;
4693 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4694 {
4695 set_syntax_error (_("ZA tile masks do not operate at .Q"
4696 " granularity"));
4697 return PARSE_FAIL;
4698 }
4699 else if (qualifier == AARCH64_OPND_QLF_NIL)
4700 {
4701 set_syntax_error (_("missing ZA tile size"));
4702 return PARSE_FAIL;
4703 }
4704 else
4705 {
4706 set_syntax_error (_("invalid ZA tile"));
4707 return PARSE_FAIL;
4708 }
4709 }
4710 ptr_flags |= PTR_GOOD_MATCH;
4711 }
4712 while (skip_past_char (&q, ','));
4713
4714 *str = q;
4715 return mask;
4716 }
4717
4718 /* Wraps in curly braces <mask> operand ZERO instruction:
4719
4720 ZERO { <mask> }
4721
4722 Function returns value of <mask> bit-field.
4723 */
4724 static int
4725 parse_sme_list_of_64bit_tiles (char **str)
4726 {
4727 int regno;
4728
4729 if (!skip_past_char (str, '{'))
4730 {
4731 set_syntax_error (_("expected '{'"));
4732 return PARSE_FAIL;
4733 }
4734
4735 /* Empty <mask> list is an all-zeros immediate. */
4736 if (!skip_past_char (str, '}'))
4737 {
4738 regno = parse_sme_zero_mask (str);
4739 if (regno == PARSE_FAIL)
4740 return PARSE_FAIL;
4741
4742 if (!skip_past_char (str, '}'))
4743 {
4744 set_syntax_error (_("expected '}'"));
4745 return PARSE_FAIL;
4746 }
4747 }
4748 else
4749 regno = 0x00;
4750
4751 return regno;
4752 }
4753
4754 /* Parse streaming mode operand for SMSTART and SMSTOP.
4755
4756 {SM | ZA}
4757
4758 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4759 */
4760 static int
4761 parse_sme_sm_za (char **str)
4762 {
4763 char *p, *q;
4764
4765 p = q = *str;
4766 while (ISALPHA (*q))
4767 q++;
4768
4769 if ((q - p != 2)
4770 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4771 {
4772 set_syntax_error (_("expected SM or ZA operand"));
4773 return PARSE_FAIL;
4774 }
4775
4776 *str = q;
4777 return TOLOWER (p[0]);
4778 }
4779
4780 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4781 Returns the encoding for the option, or PARSE_FAIL.
4782
4783 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4784 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4785
4786 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4787 field, otherwise as a system register.
4788 */
4789
4790 static int
4791 parse_sys_reg (char **str, htab_t sys_regs,
4792 int imple_defined_p, int pstatefield_p,
4793 uint32_t* flags, bool sysreg128_p)
4794 {
4795 char *p, *q;
4796 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4797 const aarch64_sys_reg *o;
4798 int value;
4799
4800 p = buf;
4801 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4802 if (p < buf + (sizeof (buf) - 1))
4803 *p++ = TOLOWER (*q);
4804 *p = '\0';
4805
4806 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4807 valid system register. This is enforced by construction of the hash
4808 table. */
4809 if (p - buf != q - *str)
4810 return PARSE_FAIL;
4811
4812 o = str_hash_find (sys_regs, buf);
4813 if (!o)
4814 {
4815 if (!imple_defined_p)
4816 return PARSE_FAIL;
4817 else
4818 {
4819 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4820 unsigned int op0, op1, cn, cm, op2;
4821
4822 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4823 != 5)
4824 return PARSE_FAIL;
4825 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4826 return PARSE_FAIL;
4827 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4828 if (flags)
4829 *flags = 0;
4830 }
4831 }
4832 else
4833 {
4834 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4835 as_bad (_("selected processor does not support PSTATE field "
4836 "name '%s'"), buf);
4837 if (!pstatefield_p
4838 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4839 o->value, o->flags,
4840 &o->features))
4841 as_bad (_("selected processor does not support system register "
4842 "name '%s'"), buf);
4843 if (sysreg128_p && !aarch64_sys_reg_128bit_p (o->flags))
4844 as_bad (_("128-bit-wide accsess not allowed on selected system"
4845 " register '%s'"), buf);
4846 if (aarch64_sys_reg_deprecated_p (o->flags))
4847 as_warn (_("system register name '%s' is deprecated and may be "
4848 "removed in a future release"), buf);
4849 value = o->value;
4850 if (flags)
4851 *flags = o->flags;
4852 }
4853
4854 *str = q;
4855 return value;
4856 }
4857
4858 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4859 for the option, or NULL. */
4860
4861 static const aarch64_sys_ins_reg *
4862 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4863 {
4864 char *p, *q;
4865 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4866 const aarch64_sys_ins_reg *o;
4867
4868 p = buf;
4869 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4870 if (p < buf + (sizeof (buf) - 1))
4871 *p++ = TOLOWER (*q);
4872 *p = '\0';
4873
4874 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4875 valid system register. This is enforced by construction of the hash
4876 table. */
4877 if (p - buf != q - *str)
4878 return NULL;
4879
4880 o = str_hash_find (sys_ins_regs, buf);
4881 if (!o)
4882 return NULL;
4883
4884 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4885 o->name, o->value, o->flags, 0))
4886 as_bad (_("selected processor does not support system register "
4887 "name '%s'"), buf);
4888 if (aarch64_sys_reg_deprecated_p (o->flags))
4889 as_warn (_("system register name '%s' is deprecated and may be "
4890 "removed in a future release"), buf);
4891
4892 *str = q;
4893 return o;
4894 }
4895 \f
4896 #define po_char_or_fail(chr) do { \
4897 if (! skip_past_char (&str, chr)) \
4898 goto failure; \
4899 } while (0)
4900
4901 #define po_reg_or_fail(regtype) do { \
4902 reg = aarch64_reg_parse (&str, regtype, NULL); \
4903 if (!reg) \
4904 goto failure; \
4905 } while (0)
4906
4907 #define po_int_fp_reg_or_fail(reg_type) do { \
4908 reg = parse_reg (&str); \
4909 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4910 { \
4911 set_expected_reg_error (reg_type, reg, 0); \
4912 goto failure; \
4913 } \
4914 info->reg.regno = reg->number; \
4915 info->qualifier = inherent_reg_qualifier (reg); \
4916 } while (0)
4917
4918 #define po_imm_nc_or_fail() do { \
4919 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4920 goto failure; \
4921 } while (0)
4922
4923 #define po_imm_or_fail(min, max) do { \
4924 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4925 goto failure; \
4926 if (val < min || val > max) \
4927 { \
4928 set_fatal_syntax_error (_("immediate value out of range "\
4929 #min " to "#max)); \
4930 goto failure; \
4931 } \
4932 } while (0)
4933
4934 #define po_enum_or_fail(array) do { \
4935 if (!parse_enum_string (&str, &val, array, \
4936 ARRAY_SIZE (array), imm_reg_type)) \
4937 goto failure; \
4938 } while (0)
4939
4940 #define po_strict_enum_or_fail(array) do { \
4941 if (!parse_enum_string (&str, &val, array, \
4942 ARRAY_SIZE (array), REG_TYPE_MAX)) \
4943 goto failure; \
4944 } while (0)
4945
4946 #define po_misc_or_fail(expr) do { \
4947 if (!expr) \
4948 goto failure; \
4949 } while (0)
4950 \f
4951 /* A primitive log calculator. */
4952
4953 static inline unsigned int
4954 get_log2 (unsigned int n)
4955 {
4956 unsigned int count = 0;
4957 while (n > 1)
4958 {
4959 n >>= 1;
4960 count += 1;
4961 }
4962 return count;
4963 }
4964
4965 /* encode the 12-bit imm field of Add/sub immediate */
4966 static inline uint32_t
4967 encode_addsub_imm (uint32_t imm)
4968 {
4969 return imm << 10;
4970 }
4971
4972 /* encode the shift amount field of Add/sub immediate */
4973 static inline uint32_t
4974 encode_addsub_imm_shift_amount (uint32_t cnt)
4975 {
4976 return cnt << 22;
4977 }
4978
4979
4980 /* encode the imm field of Adr instruction */
4981 static inline uint32_t
4982 encode_adr_imm (uint32_t imm)
4983 {
4984 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4985 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4986 }
4987
4988 /* encode the immediate field of Move wide immediate */
4989 static inline uint32_t
4990 encode_movw_imm (uint32_t imm)
4991 {
4992 return imm << 5;
4993 }
4994
4995 /* encode the 26-bit offset of unconditional branch */
4996 static inline uint32_t
4997 encode_branch_ofs_26 (uint32_t ofs)
4998 {
4999 return ofs & ((1 << 26) - 1);
5000 }
5001
5002 /* encode the 19-bit offset of conditional branch and compare & branch */
5003 static inline uint32_t
5004 encode_cond_branch_ofs_19 (uint32_t ofs)
5005 {
5006 return (ofs & ((1 << 19) - 1)) << 5;
5007 }
5008
5009 /* encode the 19-bit offset of ld literal */
5010 static inline uint32_t
5011 encode_ld_lit_ofs_19 (uint32_t ofs)
5012 {
5013 return (ofs & ((1 << 19) - 1)) << 5;
5014 }
5015
5016 /* Encode the 14-bit offset of test & branch. */
5017 static inline uint32_t
5018 encode_tst_branch_ofs_14 (uint32_t ofs)
5019 {
5020 return (ofs & ((1 << 14) - 1)) << 5;
5021 }
5022
5023 /* Encode the 16-bit imm field of svc/hvc/smc. */
5024 static inline uint32_t
5025 encode_svc_imm (uint32_t imm)
5026 {
5027 return imm << 5;
5028 }
5029
5030 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5031 static inline uint32_t
5032 reencode_addsub_switch_add_sub (uint32_t opcode)
5033 {
5034 return opcode ^ (1 << 30);
5035 }
5036
5037 static inline uint32_t
5038 reencode_movzn_to_movz (uint32_t opcode)
5039 {
5040 return opcode | (1 << 30);
5041 }
5042
5043 static inline uint32_t
5044 reencode_movzn_to_movn (uint32_t opcode)
5045 {
5046 return opcode & ~(1 << 30);
5047 }
5048
5049 /* Overall per-instruction processing. */
5050
5051 /* We need to be able to fix up arbitrary expressions in some statements.
5052 This is so that we can handle symbols that are an arbitrary distance from
5053 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5054 which returns part of an address in a form which will be valid for
5055 a data instruction. We do this by pushing the expression into a symbol
5056 in the expr_section, and creating a fix for that. */
5057
5058 static fixS *
5059 fix_new_aarch64 (fragS * frag,
5060 int where,
5061 short int size,
5062 expressionS * exp,
5063 int pc_rel,
5064 int reloc)
5065 {
5066 fixS *new_fix;
5067
5068 switch (exp->X_op)
5069 {
5070 case O_constant:
5071 case O_symbol:
5072 case O_add:
5073 case O_subtract:
5074 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5075 break;
5076
5077 default:
5078 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5079 pc_rel, reloc);
5080 break;
5081 }
5082 return new_fix;
5083 }
5084 \f
5085 /* Diagnostics on operands errors. */
5086
5087 /* By default, output verbose error message.
5088 Disable the verbose error message by -mno-verbose-error. */
5089 static int verbose_error_p = 1;
5090
5091 #ifdef DEBUG_AARCH64
5092 /* N.B. this is only for the purpose of debugging. */
5093 const char* operand_mismatch_kind_names[] =
5094 {
5095 "AARCH64_OPDE_NIL",
5096 "AARCH64_OPDE_RECOVERABLE",
5097 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5098 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5099 "AARCH64_OPDE_SYNTAX_ERROR",
5100 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5101 "AARCH64_OPDE_INVALID_VARIANT",
5102 "AARCH64_OPDE_INVALID_VG_SIZE",
5103 "AARCH64_OPDE_REG_LIST_LENGTH",
5104 "AARCH64_OPDE_REG_LIST_STRIDE",
5105 "AARCH64_OPDE_UNTIED_IMMS",
5106 "AARCH64_OPDE_UNTIED_OPERAND",
5107 "AARCH64_OPDE_OUT_OF_RANGE",
5108 "AARCH64_OPDE_UNALIGNED",
5109 "AARCH64_OPDE_OTHER_ERROR",
5110 "AARCH64_OPDE_INVALID_REGNO",
5111 };
5112 #endif /* DEBUG_AARCH64 */
5113
5114 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5115
5116 When multiple errors of different kinds are found in the same assembly
5117 line, only the error of the highest severity will be picked up for
5118 issuing the diagnostics. */
5119
5120 static inline bool
5121 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5122 enum aarch64_operand_error_kind rhs)
5123 {
5124 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5125 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5126 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5127 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5128 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5129 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5130 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5131 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5132 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5133 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5134 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5135 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5136 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5137 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5138 return lhs > rhs;
5139 }
5140
5141 /* Helper routine to get the mnemonic name from the assembly instruction
5142 line; should only be called for the diagnosis purpose, as there is
5143 string copy operation involved, which may affect the runtime
5144 performance if used in elsewhere. */
5145
5146 static const char*
5147 get_mnemonic_name (const char *str)
5148 {
5149 static char mnemonic[32];
5150 char *ptr;
5151
5152 /* Get the first 15 bytes and assume that the full name is included. */
5153 strncpy (mnemonic, str, 31);
5154 mnemonic[31] = '\0';
5155
5156 /* Scan up to the end of the mnemonic, which must end in white space,
5157 '.', or end of string. */
5158 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5159 ;
5160
5161 *ptr = '\0';
5162
5163 /* Append '...' to the truncated long name. */
5164 if (ptr - mnemonic == 31)
5165 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5166
5167 return mnemonic;
5168 }
5169
5170 static void
5171 reset_aarch64_instruction (aarch64_instruction *instruction)
5172 {
5173 memset (instruction, '\0', sizeof (aarch64_instruction));
5174 instruction->reloc.type = BFD_RELOC_UNUSED;
5175 }
5176
5177 /* Data structures storing one user error in the assembly code related to
5178 operands. */
5179
5180 struct operand_error_record
5181 {
5182 const aarch64_opcode *opcode;
5183 aarch64_operand_error detail;
5184 struct operand_error_record *next;
5185 };
5186
5187 typedef struct operand_error_record operand_error_record;
5188
5189 struct operand_errors
5190 {
5191 operand_error_record *head;
5192 operand_error_record *tail;
5193 };
5194
5195 typedef struct operand_errors operand_errors;
5196
5197 /* Top-level data structure reporting user errors for the current line of
5198 the assembly code.
5199 The way md_assemble works is that all opcodes sharing the same mnemonic
5200 name are iterated to find a match to the assembly line. In this data
5201 structure, each of the such opcodes will have one operand_error_record
5202 allocated and inserted. In other words, excessive errors related with
5203 a single opcode are disregarded. */
5204 operand_errors operand_error_report;
5205
5206 /* Free record nodes. */
5207 static operand_error_record *free_opnd_error_record_nodes = NULL;
5208
5209 /* Initialize the data structure that stores the operand mismatch
5210 information on assembling one line of the assembly code. */
5211 static void
5212 init_operand_error_report (void)
5213 {
5214 if (operand_error_report.head != NULL)
5215 {
5216 gas_assert (operand_error_report.tail != NULL);
5217 operand_error_report.tail->next = free_opnd_error_record_nodes;
5218 free_opnd_error_record_nodes = operand_error_report.head;
5219 operand_error_report.head = NULL;
5220 operand_error_report.tail = NULL;
5221 return;
5222 }
5223 gas_assert (operand_error_report.tail == NULL);
5224 }
5225
5226 /* Return TRUE if some operand error has been recorded during the
5227 parsing of the current assembly line using the opcode *OPCODE;
5228 otherwise return FALSE. */
5229 static inline bool
5230 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5231 {
5232 operand_error_record *record = operand_error_report.head;
5233 return record && record->opcode == opcode;
5234 }
5235
5236 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5237 OPCODE field is initialized with OPCODE.
5238 N.B. only one record for each opcode, i.e. the maximum of one error is
5239 recorded for each instruction template. */
5240
5241 static void
5242 add_operand_error_record (const operand_error_record* new_record)
5243 {
5244 const aarch64_opcode *opcode = new_record->opcode;
5245 operand_error_record* record = operand_error_report.head;
5246
5247 /* The record may have been created for this opcode. If not, we need
5248 to prepare one. */
5249 if (! opcode_has_operand_error_p (opcode))
5250 {
5251 /* Get one empty record. */
5252 if (free_opnd_error_record_nodes == NULL)
5253 {
5254 record = XNEW (operand_error_record);
5255 }
5256 else
5257 {
5258 record = free_opnd_error_record_nodes;
5259 free_opnd_error_record_nodes = record->next;
5260 }
5261 record->opcode = opcode;
5262 /* Insert at the head. */
5263 record->next = operand_error_report.head;
5264 operand_error_report.head = record;
5265 if (operand_error_report.tail == NULL)
5266 operand_error_report.tail = record;
5267 }
5268 else if (record->detail.kind != AARCH64_OPDE_NIL
5269 && record->detail.index <= new_record->detail.index
5270 && operand_error_higher_severity_p (record->detail.kind,
5271 new_record->detail.kind))
5272 {
5273 /* In the case of multiple errors found on operands related with a
5274 single opcode, only record the error of the leftmost operand and
5275 only if the error is of higher severity. */
5276 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5277 " the existing error %s on operand %d",
5278 operand_mismatch_kind_names[new_record->detail.kind],
5279 new_record->detail.index,
5280 operand_mismatch_kind_names[record->detail.kind],
5281 record->detail.index);
5282 return;
5283 }
5284
5285 record->detail = new_record->detail;
5286 }
5287
5288 static inline void
5289 record_operand_error_info (const aarch64_opcode *opcode,
5290 aarch64_operand_error *error_info)
5291 {
5292 operand_error_record record;
5293 record.opcode = opcode;
5294 record.detail = *error_info;
5295 add_operand_error_record (&record);
5296 }
5297
5298 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5299 error message *ERROR, for operand IDX (count from 0). */
5300
5301 static void
5302 record_operand_error (const aarch64_opcode *opcode, int idx,
5303 enum aarch64_operand_error_kind kind,
5304 const char* error)
5305 {
5306 aarch64_operand_error info;
5307 memset(&info, 0, sizeof (info));
5308 info.index = idx;
5309 info.kind = kind;
5310 info.error = error;
5311 info.non_fatal = false;
5312 record_operand_error_info (opcode, &info);
5313 }
5314
5315 static void
5316 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5317 enum aarch64_operand_error_kind kind,
5318 const char* error, const int *extra_data)
5319 {
5320 aarch64_operand_error info;
5321 info.index = idx;
5322 info.kind = kind;
5323 info.error = error;
5324 info.data[0].i = extra_data[0];
5325 info.data[1].i = extra_data[1];
5326 info.data[2].i = extra_data[2];
5327 info.non_fatal = false;
5328 record_operand_error_info (opcode, &info);
5329 }
5330
5331 static void
5332 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5333 const char* error, int lower_bound,
5334 int upper_bound)
5335 {
5336 int data[3] = {lower_bound, upper_bound, 0};
5337 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5338 error, data);
5339 }
5340
5341 /* Remove the operand error record for *OPCODE. */
5342 static void ATTRIBUTE_UNUSED
5343 remove_operand_error_record (const aarch64_opcode *opcode)
5344 {
5345 if (opcode_has_operand_error_p (opcode))
5346 {
5347 operand_error_record* record = operand_error_report.head;
5348 gas_assert (record != NULL && operand_error_report.tail != NULL);
5349 operand_error_report.head = record->next;
5350 record->next = free_opnd_error_record_nodes;
5351 free_opnd_error_record_nodes = record;
5352 if (operand_error_report.head == NULL)
5353 {
5354 gas_assert (operand_error_report.tail == record);
5355 operand_error_report.tail = NULL;
5356 }
5357 }
5358 }
5359
5360 /* Given the instruction in *INSTR, return the index of the best matched
5361 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5362
5363 Return -1 if there is no qualifier sequence; return the first match
5364 if there is multiple matches found. */
5365
5366 static int
5367 find_best_match (const aarch64_inst *instr,
5368 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5369 {
5370 int i, num_opnds, max_num_matched, idx;
5371
5372 num_opnds = aarch64_num_of_operands (instr->opcode);
5373 if (num_opnds == 0)
5374 {
5375 DEBUG_TRACE ("no operand");
5376 return -1;
5377 }
5378
5379 max_num_matched = 0;
5380 idx = 0;
5381
5382 /* For each pattern. */
5383 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5384 {
5385 int j, num_matched;
5386 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5387
5388 /* Most opcodes has much fewer patterns in the list. */
5389 if (empty_qualifier_sequence_p (qualifiers))
5390 {
5391 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5392 break;
5393 }
5394
5395 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5396 if (*qualifiers == instr->operands[j].qualifier)
5397 ++num_matched;
5398
5399 if (num_matched > max_num_matched)
5400 {
5401 max_num_matched = num_matched;
5402 idx = i;
5403 }
5404 }
5405
5406 DEBUG_TRACE ("return with %d", idx);
5407 return idx;
5408 }
5409
5410 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5411 corresponding operands in *INSTR. */
5412
5413 static inline void
5414 assign_qualifier_sequence (aarch64_inst *instr,
5415 const aarch64_opnd_qualifier_t *qualifiers)
5416 {
5417 int i = 0;
5418 int num_opnds = aarch64_num_of_operands (instr->opcode);
5419 gas_assert (num_opnds);
5420 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5421 instr->operands[i].qualifier = *qualifiers;
5422 }
5423
5424 /* Callback used by aarch64_print_operand to apply STYLE to the
5425 disassembler output created from FMT and ARGS. The STYLER object holds
5426 any required state. Must return a pointer to a string (created from FMT
5427 and ARGS) that will continue to be valid until the complete disassembled
5428 instruction has been printed.
5429
5430 We don't currently add any styling to the output of the disassembler as
5431 used within assembler error messages, and so STYLE is ignored here. A
5432 new string is allocated on the obstack help within STYLER and returned
5433 to the caller. */
5434
5435 static const char *aarch64_apply_style
5436 (struct aarch64_styler *styler,
5437 enum disassembler_style style ATTRIBUTE_UNUSED,
5438 const char *fmt, va_list args)
5439 {
5440 int res;
5441 char *ptr;
5442 struct obstack *stack = (struct obstack *) styler->state;
5443 va_list ap;
5444
5445 /* Calculate the required space. */
5446 va_copy (ap, args);
5447 res = vsnprintf (NULL, 0, fmt, ap);
5448 va_end (ap);
5449 gas_assert (res >= 0);
5450
5451 /* Allocate space on the obstack and format the result. */
5452 ptr = (char *) obstack_alloc (stack, res + 1);
5453 res = vsnprintf (ptr, (res + 1), fmt, args);
5454 gas_assert (res >= 0);
5455
5456 return ptr;
5457 }
5458
5459 /* Print operands for the diagnosis purpose. */
5460
5461 static void
5462 print_operands (char *buf, const aarch64_opcode *opcode,
5463 const aarch64_opnd_info *opnds)
5464 {
5465 int i;
5466 struct aarch64_styler styler;
5467 struct obstack content;
5468 obstack_init (&content);
5469
5470 styler.apply_style = aarch64_apply_style;
5471 styler.state = (void *) &content;
5472
5473 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5474 {
5475 char str[128];
5476 char cmt[128];
5477
5478 /* We regard the opcode operand info more, however we also look into
5479 the inst->operands to support the disassembling of the optional
5480 operand.
5481 The two operand code should be the same in all cases, apart from
5482 when the operand can be optional. */
5483 if (opcode->operands[i] == AARCH64_OPND_NIL
5484 || opnds[i].type == AARCH64_OPND_NIL)
5485 break;
5486
5487 /* Generate the operand string in STR. */
5488 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5489 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5490
5491 /* Delimiter. */
5492 if (str[0] != '\0')
5493 strcat (buf, i == 0 ? " " : ", ");
5494
5495 /* Append the operand string. */
5496 strcat (buf, str);
5497
5498 /* Append a comment. This works because only the last operand ever
5499 adds a comment. If that ever changes then we'll need to be
5500 smarter here. */
5501 if (cmt[0] != '\0')
5502 {
5503 strcat (buf, "\t// ");
5504 strcat (buf, cmt);
5505 }
5506 }
5507
5508 obstack_free (&content, NULL);
5509 }
5510
5511 /* Send to stderr a string as information. */
5512
5513 static void
5514 output_info (const char *format, ...)
5515 {
5516 const char *file;
5517 unsigned int line;
5518 va_list args;
5519
5520 file = as_where (&line);
5521 if (file)
5522 {
5523 if (line != 0)
5524 fprintf (stderr, "%s:%u: ", file, line);
5525 else
5526 fprintf (stderr, "%s: ", file);
5527 }
5528 fprintf (stderr, _("Info: "));
5529 va_start (args, format);
5530 vfprintf (stderr, format, args);
5531 va_end (args);
5532 (void) putc ('\n', stderr);
5533 }
5534
5535 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5536 relates to registers or register lists. If so, return a string that
5537 reports the error against "operand %d", otherwise return null. */
5538
5539 static const char *
5540 get_reg_error_message (const aarch64_operand_error *detail)
5541 {
5542 /* Handle the case where we found a register that was expected
5543 to be in a register list outside of a register list. */
5544 if ((detail->data[1].i & detail->data[2].i) != 0
5545 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5546 return _("missing braces at operand %d");
5547
5548 /* If some opcodes expected a register, and we found a register,
5549 complain about the difference. */
5550 if (detail->data[2].i)
5551 {
5552 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5553 ? detail->data[1].i & ~SEF_IN_REGLIST
5554 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5555 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5556 if (!msg)
5557 msg = N_("unexpected register type at operand %d");
5558 return msg;
5559 }
5560
5561 /* Handle the case where we got to the point of trying to parse a
5562 register within a register list, but didn't find a known register. */
5563 if (detail->data[1].i & SEF_IN_REGLIST)
5564 {
5565 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5566 const char *msg = get_reg_expected_msg (expected, 0);
5567 if (!msg)
5568 msg = _("invalid register list at operand %d");
5569 return msg;
5570 }
5571
5572 /* Punt if register-related problems weren't the only errors. */
5573 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5574 return NULL;
5575
5576 /* Handle the case where the only acceptable things are registers. */
5577 if (detail->data[1].i == 0)
5578 {
5579 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5580 if (!msg)
5581 msg = _("expected a register at operand %d");
5582 return msg;
5583 }
5584
5585 /* Handle the case where the only acceptable things are register lists,
5586 and there was no opening '{'. */
5587 if (detail->data[0].i == 0)
5588 return _("expected '{' at operand %d");
5589
5590 return _("expected a register or register list at operand %d");
5591 }
5592
5593 /* Output one operand error record. */
5594
5595 static void
5596 output_operand_error_record (const operand_error_record *record, char *str)
5597 {
5598 const aarch64_operand_error *detail = &record->detail;
5599 int idx = detail->index;
5600 const aarch64_opcode *opcode = record->opcode;
5601 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5602 : AARCH64_OPND_NIL);
5603
5604 typedef void (*handler_t)(const char *format, ...);
5605 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5606 const char *msg = detail->error;
5607
5608 switch (detail->kind)
5609 {
5610 case AARCH64_OPDE_NIL:
5611 gas_assert (0);
5612 break;
5613
5614 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5615 handler (_("this `%s' should have an immediately preceding `%s'"
5616 " -- `%s'"),
5617 detail->data[0].s, detail->data[1].s, str);
5618 break;
5619
5620 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5621 handler (_("the preceding `%s' should be followed by `%s` rather"
5622 " than `%s` -- `%s'"),
5623 detail->data[1].s, detail->data[0].s, opcode->name, str);
5624 break;
5625
5626 case AARCH64_OPDE_SYNTAX_ERROR:
5627 if (!msg && idx >= 0)
5628 {
5629 msg = get_reg_error_message (detail);
5630 if (msg)
5631 {
5632 char *full_msg = xasprintf (msg, idx + 1);
5633 handler (_("%s -- `%s'"), full_msg, str);
5634 free (full_msg);
5635 break;
5636 }
5637 }
5638 /* Fall through. */
5639
5640 case AARCH64_OPDE_RECOVERABLE:
5641 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5642 case AARCH64_OPDE_OTHER_ERROR:
5643 /* Use the prepared error message if there is, otherwise use the
5644 operand description string to describe the error. */
5645 if (msg != NULL)
5646 {
5647 if (idx < 0)
5648 handler (_("%s -- `%s'"), msg, str);
5649 else
5650 handler (_("%s at operand %d -- `%s'"),
5651 msg, idx + 1, str);
5652 }
5653 else
5654 {
5655 gas_assert (idx >= 0);
5656 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5657 aarch64_get_operand_desc (opd_code), str);
5658 }
5659 break;
5660
5661 case AARCH64_OPDE_INVALID_VARIANT:
5662 handler (_("operand mismatch -- `%s'"), str);
5663 if (verbose_error_p)
5664 {
5665 /* We will try to correct the erroneous instruction and also provide
5666 more information e.g. all other valid variants.
5667
5668 The string representation of the corrected instruction and other
5669 valid variants are generated by
5670
5671 1) obtaining the intermediate representation of the erroneous
5672 instruction;
5673 2) manipulating the IR, e.g. replacing the operand qualifier;
5674 3) printing out the instruction by calling the printer functions
5675 shared with the disassembler.
5676
5677 The limitation of this method is that the exact input assembly
5678 line cannot be accurately reproduced in some cases, for example an
5679 optional operand present in the actual assembly line will be
5680 omitted in the output; likewise for the optional syntax rules,
5681 e.g. the # before the immediate. Another limitation is that the
5682 assembly symbols and relocation operations in the assembly line
5683 currently cannot be printed out in the error report. Last but not
5684 least, when there is other error(s) co-exist with this error, the
5685 'corrected' instruction may be still incorrect, e.g. given
5686 'ldnp h0,h1,[x0,#6]!'
5687 this diagnosis will provide the version:
5688 'ldnp s0,s1,[x0,#6]!'
5689 which is still not right. */
5690 size_t len = strlen (get_mnemonic_name (str));
5691 int i, qlf_idx;
5692 bool result;
5693 char buf[2048];
5694 aarch64_inst *inst_base = &inst.base;
5695 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5696
5697 /* Init inst. */
5698 reset_aarch64_instruction (&inst);
5699 inst_base->opcode = opcode;
5700
5701 /* Reset the error report so that there is no side effect on the
5702 following operand parsing. */
5703 init_operand_error_report ();
5704
5705 /* Fill inst. */
5706 result = parse_operands (str + len, opcode)
5707 && programmer_friendly_fixup (&inst);
5708 gas_assert (result);
5709 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5710 NULL, NULL, insn_sequence);
5711 gas_assert (!result);
5712
5713 /* Find the most matched qualifier sequence. */
5714 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5715 gas_assert (qlf_idx > -1);
5716
5717 /* Assign the qualifiers. */
5718 assign_qualifier_sequence (inst_base,
5719 opcode->qualifiers_list[qlf_idx]);
5720
5721 /* Print the hint. */
5722 output_info (_(" did you mean this?"));
5723 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5724 print_operands (buf, opcode, inst_base->operands);
5725 output_info (_(" %s"), buf);
5726
5727 /* Print out other variant(s) if there is any. */
5728 if (qlf_idx != 0 ||
5729 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5730 output_info (_(" other valid variant(s):"));
5731
5732 /* For each pattern. */
5733 qualifiers_list = opcode->qualifiers_list;
5734 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5735 {
5736 /* Most opcodes has much fewer patterns in the list.
5737 First NIL qualifier indicates the end in the list. */
5738 if (empty_qualifier_sequence_p (*qualifiers_list))
5739 break;
5740
5741 if (i != qlf_idx)
5742 {
5743 /* Mnemonics name. */
5744 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5745
5746 /* Assign the qualifiers. */
5747 assign_qualifier_sequence (inst_base, *qualifiers_list);
5748
5749 /* Print instruction. */
5750 print_operands (buf, opcode, inst_base->operands);
5751
5752 output_info (_(" %s"), buf);
5753 }
5754 }
5755 }
5756 break;
5757
5758 case AARCH64_OPDE_UNTIED_IMMS:
5759 handler (_("operand %d must have the same immediate value "
5760 "as operand 1 -- `%s'"),
5761 detail->index + 1, str);
5762 break;
5763
5764 case AARCH64_OPDE_UNTIED_OPERAND:
5765 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5766 detail->index + 1, str);
5767 break;
5768
5769 case AARCH64_OPDE_INVALID_REGNO:
5770 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5771 detail->data[0].s, detail->data[1].i,
5772 detail->data[0].s, detail->data[2].i, idx + 1, str);
5773 break;
5774
5775 case AARCH64_OPDE_OUT_OF_RANGE:
5776 if (detail->data[0].i != detail->data[1].i)
5777 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5778 msg ? msg : _("immediate value"),
5779 detail->data[0].i, detail->data[1].i, idx + 1, str);
5780 else
5781 handler (_("%s must be %d at operand %d -- `%s'"),
5782 msg ? msg : _("immediate value"),
5783 detail->data[0].i, idx + 1, str);
5784 break;
5785
5786 case AARCH64_OPDE_INVALID_VG_SIZE:
5787 if (detail->data[0].i == 0)
5788 handler (_("unexpected vector group size at operand %d -- `%s'"),
5789 idx + 1, str);
5790 else
5791 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5792 idx + 1, detail->data[0].i, str);
5793 break;
5794
5795 case AARCH64_OPDE_REG_LIST_LENGTH:
5796 if (detail->data[0].i == (1 << 1))
5797 handler (_("expected a single-register list at operand %d -- `%s'"),
5798 idx + 1, str);
5799 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5800 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5801 get_log2 (detail->data[0].i), idx + 1, str);
5802 else if (detail->data[0].i == 0x14)
5803 handler (_("expected a list of %d or %d registers at"
5804 " operand %d -- `%s'"),
5805 2, 4, idx + 1, str);
5806 else
5807 handler (_("invalid number of registers in the list"
5808 " at operand %d -- `%s'"), idx + 1, str);
5809 break;
5810
5811 case AARCH64_OPDE_REG_LIST_STRIDE:
5812 if (detail->data[0].i == (1 << 1))
5813 handler (_("the register list must have a stride of %d"
5814 " at operand %d -- `%s'"), 1, idx + 1, str);
5815 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5816 handler (_("the register list must have a stride of %d or %d"
5817 " at operand %d -- `%s`"), 1,
5818 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5819 else
5820 handler (_("invalid register stride at operand %d -- `%s'"),
5821 idx + 1, str);
5822 break;
5823
5824 case AARCH64_OPDE_UNALIGNED:
5825 handler (_("immediate value must be a multiple of "
5826 "%d at operand %d -- `%s'"),
5827 detail->data[0].i, idx + 1, str);
5828 break;
5829
5830 default:
5831 gas_assert (0);
5832 break;
5833 }
5834 }
5835
5836 /* Return true if the presence of error A against an instruction means
5837 that error B should not be reported. This is only used as a first pass,
5838 to pick the kind of error that we should report. */
5839
5840 static bool
5841 better_error_p (operand_error_record *a, operand_error_record *b)
5842 {
5843 /* For errors reported during parsing, prefer errors that relate to
5844 later operands, since that implies that the earlier operands were
5845 syntactically valid.
5846
5847 For example, if we see a register R instead of an immediate in
5848 operand N, we'll report that as a recoverable "immediate operand
5849 required" error. This is because there is often another opcode
5850 entry that accepts a register operand N, and any errors about R
5851 should be reported against the register forms of the instruction.
5852 But if no such register form exists, the recoverable error should
5853 still win over a syntax error against operand N-1.
5854
5855 For these purposes, count an error reported at the end of the
5856 assembly string as equivalent to an error reported against the
5857 final operand. This means that opcode entries that expect more
5858 operands win over "unexpected characters following instruction". */
5859 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5860 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5861 {
5862 int a_index = (a->detail.index < 0
5863 ? aarch64_num_of_operands (a->opcode) - 1
5864 : a->detail.index);
5865 int b_index = (b->detail.index < 0
5866 ? aarch64_num_of_operands (b->opcode) - 1
5867 : b->detail.index);
5868 if (a_index != b_index)
5869 return a_index > b_index;
5870 }
5871 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5872 }
5873
5874 /* Process and output the error message about the operand mismatching.
5875
5876 When this function is called, the operand error information had
5877 been collected for an assembly line and there will be multiple
5878 errors in the case of multiple instruction templates; output the
5879 error message that most closely describes the problem.
5880
5881 The errors to be printed can be filtered on printing all errors
5882 or only non-fatal errors. This distinction has to be made because
5883 the error buffer may already be filled with fatal errors we don't want to
5884 print due to the different instruction templates. */
5885
5886 static void
5887 output_operand_error_report (char *str, bool non_fatal_only)
5888 {
5889 enum aarch64_operand_error_kind kind;
5890 operand_error_record *curr;
5891 operand_error_record *head = operand_error_report.head;
5892 operand_error_record *record;
5893
5894 /* No error to report. */
5895 if (head == NULL)
5896 return;
5897
5898 gas_assert (head != NULL && operand_error_report.tail != NULL);
5899
5900 /* Only one error. */
5901 if (head == operand_error_report.tail)
5902 {
5903 /* If the only error is a non-fatal one and we don't want to print it,
5904 just exit. */
5905 if (!non_fatal_only || head->detail.non_fatal)
5906 {
5907 DEBUG_TRACE ("single opcode entry with error kind: %s",
5908 operand_mismatch_kind_names[head->detail.kind]);
5909 output_operand_error_record (head, str);
5910 }
5911 return;
5912 }
5913
5914 /* Find the error kind of the highest severity. */
5915 DEBUG_TRACE ("multiple opcode entries with error kind");
5916 record = NULL;
5917 for (curr = head; curr != NULL; curr = curr->next)
5918 {
5919 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5920 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5921 {
5922 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5923 operand_mismatch_kind_names[curr->detail.kind],
5924 curr->detail.data[0].i, curr->detail.data[1].i,
5925 curr->detail.data[2].i);
5926 }
5927 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5928 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5929 {
5930 DEBUG_TRACE ("\t%s [%x]",
5931 operand_mismatch_kind_names[curr->detail.kind],
5932 curr->detail.data[0].i);
5933 }
5934 else
5935 {
5936 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5937 }
5938 if ((!non_fatal_only || curr->detail.non_fatal)
5939 && (!record || better_error_p (curr, record)))
5940 record = curr;
5941 }
5942
5943 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5944 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5945
5946 /* Pick up one of errors of KIND to report. */
5947 record = NULL;
5948 for (curr = head; curr != NULL; curr = curr->next)
5949 {
5950 /* If we don't want to print non-fatal errors then don't consider them
5951 at all. */
5952 if (curr->detail.kind != kind
5953 || (non_fatal_only && !curr->detail.non_fatal))
5954 continue;
5955 /* If there are multiple errors, pick up the one with the highest
5956 mismatching operand index. In the case of multiple errors with
5957 the equally highest operand index, pick up the first one or the
5958 first one with non-NULL error message. */
5959 if (!record || curr->detail.index > record->detail.index)
5960 record = curr;
5961 else if (curr->detail.index == record->detail.index
5962 && !record->detail.error)
5963 {
5964 if (curr->detail.error)
5965 record = curr;
5966 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5967 {
5968 record->detail.data[0].i |= curr->detail.data[0].i;
5969 record->detail.data[1].i |= curr->detail.data[1].i;
5970 record->detail.data[2].i |= curr->detail.data[2].i;
5971 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5972 operand_mismatch_kind_names[kind],
5973 curr->detail.data[0].i, curr->detail.data[1].i,
5974 curr->detail.data[2].i);
5975 }
5976 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
5977 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
5978 {
5979 record->detail.data[0].i |= curr->detail.data[0].i;
5980 DEBUG_TRACE ("\t--> %s [%x]",
5981 operand_mismatch_kind_names[kind],
5982 curr->detail.data[0].i);
5983 }
5984 /* Pick the variant with the cloest match. */
5985 else if (kind == AARCH64_OPDE_INVALID_VARIANT
5986 && record->detail.data[0].i > curr->detail.data[0].i)
5987 record = curr;
5988 }
5989 }
5990
5991 /* The way errors are collected in the back-end is a bit non-intuitive. But
5992 essentially, because each operand template is tried recursively you may
5993 always have errors collected from the previous tried OPND. These are
5994 usually skipped if there is one successful match. However now with the
5995 non-fatal errors we have to ignore those previously collected hard errors
5996 when we're only interested in printing the non-fatal ones. This condition
5997 prevents us from printing errors that are not appropriate, since we did
5998 match a condition, but it also has warnings that it wants to print. */
5999 if (non_fatal_only && !record)
6000 return;
6001
6002 gas_assert (record);
6003 DEBUG_TRACE ("Pick up error kind %s to report",
6004 operand_mismatch_kind_names[kind]);
6005
6006 /* Output. */
6007 output_operand_error_record (record, str);
6008 }
6009 \f
6010 /* Write an AARCH64 instruction to buf - always little-endian. */
6011 static void
6012 put_aarch64_insn (char *buf, uint32_t insn)
6013 {
6014 unsigned char *where = (unsigned char *) buf;
6015 where[0] = insn;
6016 where[1] = insn >> 8;
6017 where[2] = insn >> 16;
6018 where[3] = insn >> 24;
6019 }
6020
6021 static uint32_t
6022 get_aarch64_insn (char *buf)
6023 {
6024 unsigned char *where = (unsigned char *) buf;
6025 uint32_t result;
6026 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6027 | ((uint32_t) where[3] << 24)));
6028 return result;
6029 }
6030
6031 static void
6032 output_inst (struct aarch64_inst *new_inst)
6033 {
6034 char *to = NULL;
6035
6036 to = frag_more (INSN_SIZE);
6037
6038 frag_now->tc_frag_data.recorded = 1;
6039
6040 put_aarch64_insn (to, inst.base.value);
6041
6042 if (inst.reloc.type != BFD_RELOC_UNUSED)
6043 {
6044 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6045 INSN_SIZE, &inst.reloc.exp,
6046 inst.reloc.pc_rel,
6047 inst.reloc.type);
6048 DEBUG_TRACE ("Prepared relocation fix up");
6049 /* Don't check the addend value against the instruction size,
6050 that's the job of our code in md_apply_fix(). */
6051 fixp->fx_no_overflow = 1;
6052 if (new_inst != NULL)
6053 fixp->tc_fix_data.inst = new_inst;
6054 if (aarch64_gas_internal_fixup_p ())
6055 {
6056 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6057 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6058 fixp->fx_addnumber = inst.reloc.flags;
6059 }
6060 }
6061
6062 dwarf2_emit_insn (INSN_SIZE);
6063 }
6064
6065 /* Link together opcodes of the same name. */
6066
6067 struct templates
6068 {
6069 const aarch64_opcode *opcode;
6070 struct templates *next;
6071 };
6072
6073 typedef struct templates templates;
6074
6075 static templates *
6076 lookup_mnemonic (const char *start, int len)
6077 {
6078 templates *templ = NULL;
6079
6080 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6081 return templ;
6082 }
6083
6084 /* Subroutine of md_assemble, responsible for looking up the primary
6085 opcode from the mnemonic the user wrote. BASE points to the beginning
6086 of the mnemonic, DOT points to the first '.' within the mnemonic
6087 (if any) and END points to the end of the mnemonic. */
6088
6089 static templates *
6090 opcode_lookup (char *base, char *dot, char *end)
6091 {
6092 const aarch64_cond *cond;
6093 char condname[16];
6094 int len;
6095
6096 if (dot == end)
6097 return 0;
6098
6099 inst.cond = COND_ALWAYS;
6100
6101 /* Handle a possible condition. */
6102 if (dot)
6103 {
6104 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6105 if (!cond)
6106 return 0;
6107 inst.cond = cond->value;
6108 len = dot - base;
6109 }
6110 else
6111 len = end - base;
6112
6113 if (inst.cond == COND_ALWAYS)
6114 {
6115 /* Look for unaffixed mnemonic. */
6116 return lookup_mnemonic (base, len);
6117 }
6118 else if (len <= 13)
6119 {
6120 /* append ".c" to mnemonic if conditional */
6121 memcpy (condname, base, len);
6122 memcpy (condname + len, ".c", 2);
6123 base = condname;
6124 len += 2;
6125 return lookup_mnemonic (base, len);
6126 }
6127
6128 return NULL;
6129 }
6130
6131 /* Process an optional operand that is found omitted from the assembly line.
6132 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6133 instruction's opcode entry while IDX is the index of this omitted operand.
6134 */
6135
6136 static void
6137 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6138 int idx, aarch64_opnd_info *operand)
6139 {
6140 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6141 gas_assert (optional_operand_p (opcode, idx));
6142 gas_assert (!operand->present);
6143
6144 switch (type)
6145 {
6146 case AARCH64_OPND_Rd:
6147 case AARCH64_OPND_Rn:
6148 case AARCH64_OPND_Rm:
6149 case AARCH64_OPND_Rt:
6150 case AARCH64_OPND_Rt2:
6151 case AARCH64_OPND_Rt_LS64:
6152 case AARCH64_OPND_Rt_SP:
6153 case AARCH64_OPND_Rs:
6154 case AARCH64_OPND_Ra:
6155 case AARCH64_OPND_Rt_SYS:
6156 case AARCH64_OPND_Rd_SP:
6157 case AARCH64_OPND_Rn_SP:
6158 case AARCH64_OPND_Rm_SP:
6159 case AARCH64_OPND_Fd:
6160 case AARCH64_OPND_Fn:
6161 case AARCH64_OPND_Fm:
6162 case AARCH64_OPND_Fa:
6163 case AARCH64_OPND_Ft:
6164 case AARCH64_OPND_Ft2:
6165 case AARCH64_OPND_Sd:
6166 case AARCH64_OPND_Sn:
6167 case AARCH64_OPND_Sm:
6168 case AARCH64_OPND_Va:
6169 case AARCH64_OPND_Vd:
6170 case AARCH64_OPND_Vn:
6171 case AARCH64_OPND_Vm:
6172 case AARCH64_OPND_VdD1:
6173 case AARCH64_OPND_VnD1:
6174 operand->reg.regno = default_value;
6175 break;
6176 case AARCH64_OPND_PAIRREG_OR_XZR:
6177 if (inst.base.operands[idx - 1].reg.regno == 0x1f)
6178 {
6179 operand->reg.regno = 0x1f;
6180 break;
6181 }
6182 operand->reg.regno = inst.base.operands[idx - 1].reg.regno + 1;
6183 break;
6184 case AARCH64_OPND_PAIRREG:
6185 operand->reg.regno = inst.base.operands[idx - 1].reg.regno + 1;
6186 break;
6187
6188 case AARCH64_OPND_Ed:
6189 case AARCH64_OPND_En:
6190 case AARCH64_OPND_Em:
6191 case AARCH64_OPND_Em16:
6192 case AARCH64_OPND_SM3_IMM2:
6193 operand->reglane.regno = default_value;
6194 break;
6195
6196 case AARCH64_OPND_IDX:
6197 case AARCH64_OPND_BIT_NUM:
6198 case AARCH64_OPND_IMMR:
6199 case AARCH64_OPND_IMMS:
6200 case AARCH64_OPND_SHLL_IMM:
6201 case AARCH64_OPND_IMM_VLSL:
6202 case AARCH64_OPND_IMM_VLSR:
6203 case AARCH64_OPND_CCMP_IMM:
6204 case AARCH64_OPND_FBITS:
6205 case AARCH64_OPND_UIMM4:
6206 case AARCH64_OPND_UIMM3_OP1:
6207 case AARCH64_OPND_UIMM3_OP2:
6208 case AARCH64_OPND_IMM:
6209 case AARCH64_OPND_IMM_2:
6210 case AARCH64_OPND_WIDTH:
6211 case AARCH64_OPND_UIMM7:
6212 case AARCH64_OPND_NZCV:
6213 case AARCH64_OPND_SVE_PATTERN:
6214 case AARCH64_OPND_SVE_PRFOP:
6215 operand->imm.value = default_value;
6216 break;
6217
6218 case AARCH64_OPND_SVE_PATTERN_SCALED:
6219 operand->imm.value = default_value;
6220 operand->shifter.kind = AARCH64_MOD_MUL;
6221 operand->shifter.amount = 1;
6222 break;
6223
6224 case AARCH64_OPND_EXCEPTION:
6225 inst.reloc.type = BFD_RELOC_UNUSED;
6226 break;
6227
6228 case AARCH64_OPND_BARRIER_ISB:
6229 operand->barrier = aarch64_barrier_options + default_value;
6230 break;
6231
6232 case AARCH64_OPND_BTI_TARGET:
6233 operand->hint_option = aarch64_hint_options + default_value;
6234 break;
6235
6236 default:
6237 break;
6238 }
6239 }
6240
6241 /* Process the relocation type for move wide instructions.
6242 Return TRUE on success; otherwise return FALSE. */
6243
6244 static bool
6245 process_movw_reloc_info (void)
6246 {
6247 int is32;
6248 unsigned shift;
6249
6250 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6251
6252 if (inst.base.opcode->op == OP_MOVK)
6253 switch (inst.reloc.type)
6254 {
6255 case BFD_RELOC_AARCH64_MOVW_G0_S:
6256 case BFD_RELOC_AARCH64_MOVW_G1_S:
6257 case BFD_RELOC_AARCH64_MOVW_G2_S:
6258 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6259 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6260 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6261 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6262 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6263 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6264 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6265 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6266 set_syntax_error
6267 (_("the specified relocation type is not allowed for MOVK"));
6268 return false;
6269 default:
6270 break;
6271 }
6272
6273 switch (inst.reloc.type)
6274 {
6275 case BFD_RELOC_AARCH64_MOVW_G0:
6276 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6277 case BFD_RELOC_AARCH64_MOVW_G0_S:
6278 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6279 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6280 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6281 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6282 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6283 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6284 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6285 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6286 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6287 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6288 shift = 0;
6289 break;
6290 case BFD_RELOC_AARCH64_MOVW_G1:
6291 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6292 case BFD_RELOC_AARCH64_MOVW_G1_S:
6293 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6294 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6295 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6296 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6297 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6298 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6299 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6300 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6303 shift = 16;
6304 break;
6305 case BFD_RELOC_AARCH64_MOVW_G2:
6306 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6307 case BFD_RELOC_AARCH64_MOVW_G2_S:
6308 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6309 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6310 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6311 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6312 if (is32)
6313 {
6314 set_fatal_syntax_error
6315 (_("the specified relocation type is not allowed for 32-bit "
6316 "register"));
6317 return false;
6318 }
6319 shift = 32;
6320 break;
6321 case BFD_RELOC_AARCH64_MOVW_G3:
6322 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6323 if (is32)
6324 {
6325 set_fatal_syntax_error
6326 (_("the specified relocation type is not allowed for 32-bit "
6327 "register"));
6328 return false;
6329 }
6330 shift = 48;
6331 break;
6332 default:
6333 /* More cases should be added when more MOVW-related relocation types
6334 are supported in GAS. */
6335 gas_assert (aarch64_gas_internal_fixup_p ());
6336 /* The shift amount should have already been set by the parser. */
6337 return true;
6338 }
6339 inst.base.operands[1].shifter.amount = shift;
6340 return true;
6341 }
6342
6343 /* Determine and return the real reloc type code for an instruction
6344 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6345
6346 static inline bfd_reloc_code_real_type
6347 ldst_lo12_determine_real_reloc_type (void)
6348 {
6349 unsigned logsz, max_logsz;
6350 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6351 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6352
6353 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6354 {
6355 BFD_RELOC_AARCH64_LDST8_LO12,
6356 BFD_RELOC_AARCH64_LDST16_LO12,
6357 BFD_RELOC_AARCH64_LDST32_LO12,
6358 BFD_RELOC_AARCH64_LDST64_LO12,
6359 BFD_RELOC_AARCH64_LDST128_LO12
6360 },
6361 {
6362 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6363 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6364 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6365 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6366 BFD_RELOC_AARCH64_NONE
6367 },
6368 {
6369 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6370 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6371 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6372 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6373 BFD_RELOC_AARCH64_NONE
6374 },
6375 {
6376 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6377 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6378 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6379 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6380 BFD_RELOC_AARCH64_NONE
6381 },
6382 {
6383 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6384 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6385 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6386 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6387 BFD_RELOC_AARCH64_NONE
6388 }
6389 };
6390
6391 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6392 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6393 || (inst.reloc.type
6394 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6395 || (inst.reloc.type
6396 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6397 || (inst.reloc.type
6398 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6399 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6400
6401 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6402 opd1_qlf =
6403 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6404 1, opd0_qlf, 0);
6405 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6406
6407 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6408
6409 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6410 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6411 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6412 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6413 max_logsz = 3;
6414 else
6415 max_logsz = 4;
6416
6417 if (logsz > max_logsz)
6418 {
6419 /* SEE PR 27904 for an example of this. */
6420 set_fatal_syntax_error
6421 (_("relocation qualifier does not match instruction size"));
6422 return BFD_RELOC_AARCH64_NONE;
6423 }
6424
6425 /* In reloc.c, these pseudo relocation types should be defined in similar
6426 order as above reloc_ldst_lo12 array. Because the array index calculation
6427 below relies on this. */
6428 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6429 }
6430
6431 /* Check whether a register list REGINFO is valid. The registers have type
6432 REG_TYPE and must be numbered in increasing order (modulo the register
6433 bank size). They must have a consistent stride.
6434
6435 Return true if the list is valid, describing it in LIST if so. */
6436
6437 static bool
6438 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6439 aarch64_reg_type reg_type)
6440 {
6441 uint32_t i, nb_regs, prev_regno, incr, mask;
6442 mask = reg_type_mask (reg_type);
6443
6444 nb_regs = 1 + (reginfo & 0x3);
6445 reginfo >>= 2;
6446 prev_regno = reginfo & 0x1f;
6447 incr = 1;
6448
6449 list->first_regno = prev_regno;
6450 list->num_regs = nb_regs;
6451
6452 for (i = 1; i < nb_regs; ++i)
6453 {
6454 uint32_t curr_regno, curr_incr;
6455 reginfo >>= 5;
6456 curr_regno = reginfo & 0x1f;
6457 curr_incr = (curr_regno - prev_regno) & mask;
6458 if (curr_incr == 0)
6459 return false;
6460 else if (i == 1)
6461 incr = curr_incr;
6462 else if (curr_incr != incr)
6463 return false;
6464 prev_regno = curr_regno;
6465 }
6466
6467 list->stride = incr;
6468 return true;
6469 }
6470
6471 /* Generic instruction operand parser. This does no encoding and no
6472 semantic validation; it merely squirrels values away in the inst
6473 structure. Returns TRUE or FALSE depending on whether the
6474 specified grammar matched. */
6475
6476 static bool
6477 parse_operands (char *str, const aarch64_opcode *opcode)
6478 {
6479 int i;
6480 char *backtrack_pos = 0;
6481 const enum aarch64_opnd *operands = opcode->operands;
6482 const uint64_t flags = opcode->flags;
6483 aarch64_reg_type imm_reg_type;
6484
6485 clear_error ();
6486 skip_whitespace (str);
6487
6488 if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SME2))
6489 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
6490 else if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
6491 || AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))
6492 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6493 else
6494 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6495
6496 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6497 {
6498 int64_t val;
6499 const reg_entry *reg;
6500 int comma_skipped_p = 0;
6501 struct vector_type_el vectype;
6502 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6503 aarch64_opnd_info *info = &inst.base.operands[i];
6504 aarch64_reg_type reg_type;
6505
6506 DEBUG_TRACE ("parse operand %d", i);
6507
6508 /* Assign the operand code. */
6509 info->type = operands[i];
6510
6511 if (optional_operand_p (opcode, i))
6512 {
6513 /* Remember where we are in case we need to backtrack. */
6514 gas_assert (!backtrack_pos);
6515 backtrack_pos = str;
6516 }
6517
6518 /* Expect comma between operands; the backtrack mechanism will take
6519 care of cases of omitted optional operand. */
6520 if (i > 0 && ! skip_past_char (&str, ','))
6521 {
6522 set_syntax_error (_("comma expected between operands"));
6523 goto failure;
6524 }
6525 else
6526 comma_skipped_p = 1;
6527
6528 switch (operands[i])
6529 {
6530 case AARCH64_OPND_Rd:
6531 case AARCH64_OPND_Rn:
6532 case AARCH64_OPND_Rm:
6533 case AARCH64_OPND_Rt:
6534 case AARCH64_OPND_Rt2:
6535 case AARCH64_OPND_X16:
6536 case AARCH64_OPND_Rs:
6537 case AARCH64_OPND_Ra:
6538 case AARCH64_OPND_Rt_LS64:
6539 case AARCH64_OPND_Rt_SYS:
6540 case AARCH64_OPND_PAIRREG:
6541 case AARCH64_OPND_PAIRREG_OR_XZR:
6542 case AARCH64_OPND_SVE_Rm:
6543 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6544
6545 /* In LS64 load/store instructions Rt register number must be even
6546 and <=22. */
6547 if (operands[i] == AARCH64_OPND_Rt_LS64)
6548 {
6549 /* We've already checked if this is valid register.
6550 This will check if register number (Rt) is not undefined for
6551 LS64 instructions:
6552 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6553 if ((info->reg.regno & 0x18) == 0x18
6554 || (info->reg.regno & 0x01) == 0x01)
6555 {
6556 set_syntax_error
6557 (_("invalid Rt register number in 64-byte load/store"));
6558 goto failure;
6559 }
6560 }
6561 else if (operands[i] == AARCH64_OPND_X16)
6562 {
6563 if (info->reg.regno != 16)
6564 {
6565 goto failure;
6566 }
6567 }
6568 break;
6569
6570 case AARCH64_OPND_Rd_SP:
6571 case AARCH64_OPND_Rn_SP:
6572 case AARCH64_OPND_Rt_SP:
6573 case AARCH64_OPND_SVE_Rn_SP:
6574 case AARCH64_OPND_Rm_SP:
6575 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6576 break;
6577
6578 case AARCH64_OPND_Rm_EXT:
6579 case AARCH64_OPND_Rm_SFT:
6580 po_misc_or_fail (parse_shifter_operand
6581 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6582 ? SHIFTED_ARITH_IMM
6583 : SHIFTED_LOGIC_IMM)));
6584 if (!info->shifter.operator_present)
6585 {
6586 /* Default to LSL if not present. Libopcodes prefers shifter
6587 kind to be explicit. */
6588 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6589 info->shifter.kind = AARCH64_MOD_LSL;
6590 /* For Rm_EXT, libopcodes will carry out further check on whether
6591 or not stack pointer is used in the instruction (Recall that
6592 "the extend operator is not optional unless at least one of
6593 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6594 }
6595 break;
6596
6597 case AARCH64_OPND_Fd:
6598 case AARCH64_OPND_Fn:
6599 case AARCH64_OPND_Fm:
6600 case AARCH64_OPND_Fa:
6601 case AARCH64_OPND_Ft:
6602 case AARCH64_OPND_Ft2:
6603 case AARCH64_OPND_Sd:
6604 case AARCH64_OPND_Sn:
6605 case AARCH64_OPND_Sm:
6606 case AARCH64_OPND_SVE_VZn:
6607 case AARCH64_OPND_SVE_Vd:
6608 case AARCH64_OPND_SVE_Vm:
6609 case AARCH64_OPND_SVE_Vn:
6610 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6611 break;
6612
6613 case AARCH64_OPND_SVE_Pd:
6614 case AARCH64_OPND_SVE_Pg3:
6615 case AARCH64_OPND_SVE_Pg4_5:
6616 case AARCH64_OPND_SVE_Pg4_10:
6617 case AARCH64_OPND_SVE_Pg4_16:
6618 case AARCH64_OPND_SVE_Pm:
6619 case AARCH64_OPND_SVE_Pn:
6620 case AARCH64_OPND_SVE_Pt:
6621 case AARCH64_OPND_SME_Pm:
6622 reg_type = REG_TYPE_P;
6623 goto vector_reg;
6624
6625 case AARCH64_OPND_SVE_Za_5:
6626 case AARCH64_OPND_SVE_Za_16:
6627 case AARCH64_OPND_SVE_Zd:
6628 case AARCH64_OPND_SVE_Zm_5:
6629 case AARCH64_OPND_SVE_Zm_16:
6630 case AARCH64_OPND_SVE_Zn:
6631 case AARCH64_OPND_SVE_Zt:
6632 case AARCH64_OPND_SME_Zm:
6633 reg_type = REG_TYPE_Z;
6634 goto vector_reg;
6635
6636 case AARCH64_OPND_SVE_PNd:
6637 case AARCH64_OPND_SVE_PNg4_10:
6638 case AARCH64_OPND_SVE_PNn:
6639 case AARCH64_OPND_SVE_PNt:
6640 case AARCH64_OPND_SME_PNd3:
6641 case AARCH64_OPND_SME_PNg3:
6642 case AARCH64_OPND_SME_PNn:
6643 reg_type = REG_TYPE_PN;
6644 goto vector_reg;
6645
6646 case AARCH64_OPND_Va:
6647 case AARCH64_OPND_Vd:
6648 case AARCH64_OPND_Vn:
6649 case AARCH64_OPND_Vm:
6650 reg_type = REG_TYPE_V;
6651 vector_reg:
6652 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6653 if (!reg)
6654 goto failure;
6655 if (vectype.defined & NTA_HASINDEX)
6656 goto failure;
6657
6658 info->reg.regno = reg->number;
6659 if ((reg_type == REG_TYPE_P
6660 || reg_type == REG_TYPE_PN
6661 || reg_type == REG_TYPE_Z)
6662 && vectype.type == NT_invtype)
6663 /* Unqualified P and Z registers are allowed in certain
6664 contexts. Rely on F_STRICT qualifier checking to catch
6665 invalid uses. */
6666 info->qualifier = AARCH64_OPND_QLF_NIL;
6667 else
6668 {
6669 info->qualifier = vectype_to_qualifier (&vectype);
6670 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6671 goto failure;
6672 }
6673 break;
6674
6675 case AARCH64_OPND_VdD1:
6676 case AARCH64_OPND_VnD1:
6677 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6678 if (!reg)
6679 goto failure;
6680 if (vectype.type != NT_d || vectype.index != 1)
6681 {
6682 set_fatal_syntax_error
6683 (_("the top half of a 128-bit FP/SIMD register is expected"));
6684 goto failure;
6685 }
6686 info->reg.regno = reg->number;
6687 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6688 here; it is correct for the purpose of encoding/decoding since
6689 only the register number is explicitly encoded in the related
6690 instructions, although this appears a bit hacky. */
6691 info->qualifier = AARCH64_OPND_QLF_S_D;
6692 break;
6693
6694 case AARCH64_OPND_SVE_Zm3_INDEX:
6695 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6696 case AARCH64_OPND_SVE_Zm3_19_INDEX:
6697 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6698 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6699 case AARCH64_OPND_SVE_Zm4_INDEX:
6700 case AARCH64_OPND_SVE_Zn_INDEX:
6701 case AARCH64_OPND_SME_Zm_INDEX1:
6702 case AARCH64_OPND_SME_Zm_INDEX2:
6703 case AARCH64_OPND_SME_Zm_INDEX3_1:
6704 case AARCH64_OPND_SME_Zm_INDEX3_2:
6705 case AARCH64_OPND_SME_Zm_INDEX3_10:
6706 case AARCH64_OPND_SME_Zm_INDEX4_1:
6707 case AARCH64_OPND_SME_Zm_INDEX4_10:
6708 case AARCH64_OPND_SME_Zn_INDEX1_16:
6709 case AARCH64_OPND_SME_Zn_INDEX2_15:
6710 case AARCH64_OPND_SME_Zn_INDEX2_16:
6711 case AARCH64_OPND_SME_Zn_INDEX3_14:
6712 case AARCH64_OPND_SME_Zn_INDEX3_15:
6713 case AARCH64_OPND_SME_Zn_INDEX4_14:
6714 reg_type = REG_TYPE_Z;
6715 goto vector_reg_index;
6716
6717 case AARCH64_OPND_Ed:
6718 case AARCH64_OPND_En:
6719 case AARCH64_OPND_Em:
6720 case AARCH64_OPND_Em16:
6721 case AARCH64_OPND_SM3_IMM2:
6722 reg_type = REG_TYPE_V;
6723 vector_reg_index:
6724 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6725 if (!reg)
6726 goto failure;
6727 if (!(vectype.defined & NTA_HASINDEX))
6728 goto failure;
6729
6730 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6731 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6732 info->qualifier = AARCH64_OPND_QLF_NIL;
6733 else
6734 {
6735 if (vectype.type == NT_invtype)
6736 goto failure;
6737 info->qualifier = vectype_to_qualifier (&vectype);
6738 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6739 goto failure;
6740 }
6741
6742 info->reglane.regno = reg->number;
6743 info->reglane.index = vectype.index;
6744 break;
6745
6746 case AARCH64_OPND_SVE_ZnxN:
6747 case AARCH64_OPND_SVE_ZtxN:
6748 case AARCH64_OPND_SME_Zdnx2:
6749 case AARCH64_OPND_SME_Zdnx4:
6750 case AARCH64_OPND_SME_Zmx2:
6751 case AARCH64_OPND_SME_Zmx4:
6752 case AARCH64_OPND_SME_Znx2:
6753 case AARCH64_OPND_SME_Znx4:
6754 case AARCH64_OPND_SME_Ztx2_STRIDED:
6755 case AARCH64_OPND_SME_Ztx4_STRIDED:
6756 reg_type = REG_TYPE_Z;
6757 goto vector_reg_list;
6758
6759 case AARCH64_OPND_SME_Pdx2:
6760 case AARCH64_OPND_SME_PdxN:
6761 reg_type = REG_TYPE_P;
6762 goto vector_reg_list;
6763
6764 case AARCH64_OPND_LVn:
6765 case AARCH64_OPND_LVt:
6766 case AARCH64_OPND_LVt_AL:
6767 case AARCH64_OPND_LEt:
6768 reg_type = REG_TYPE_V;
6769 vector_reg_list:
6770 if (reg_type == REG_TYPE_Z
6771 && get_opcode_dependent_value (opcode) == 1
6772 && *str != '{')
6773 {
6774 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6775 if (!reg)
6776 goto failure;
6777 info->reglist.first_regno = reg->number;
6778 info->reglist.num_regs = 1;
6779 info->reglist.stride = 1;
6780 }
6781 else
6782 {
6783 val = parse_vector_reg_list (&str, reg_type, &vectype);
6784 if (val == PARSE_FAIL)
6785 goto failure;
6786
6787 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6788 {
6789 set_fatal_syntax_error (_("invalid register list"));
6790 goto failure;
6791 }
6792
6793 if ((int) vectype.width > 0 && *str != ',')
6794 {
6795 set_fatal_syntax_error
6796 (_("expected element type rather than vector type"));
6797 goto failure;
6798 }
6799 }
6800 if (operands[i] == AARCH64_OPND_LEt)
6801 {
6802 if (!(vectype.defined & NTA_HASINDEX))
6803 goto failure;
6804 info->reglist.has_index = 1;
6805 info->reglist.index = vectype.index;
6806 }
6807 else
6808 {
6809 if (vectype.defined & NTA_HASINDEX)
6810 goto failure;
6811 if (!(vectype.defined & NTA_HASTYPE))
6812 {
6813 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6814 set_fatal_syntax_error (_("missing type suffix"));
6815 goto failure;
6816 }
6817 }
6818 info->qualifier = vectype_to_qualifier (&vectype);
6819 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6820 goto failure;
6821 break;
6822
6823 case AARCH64_OPND_CRn:
6824 case AARCH64_OPND_CRm:
6825 {
6826 char prefix = *(str++);
6827 if (prefix != 'c' && prefix != 'C')
6828 goto failure;
6829
6830 po_imm_nc_or_fail ();
6831 if (flags & F_OPD_NARROW)
6832 {
6833 if ((operands[i] == AARCH64_OPND_CRn)
6834 && (val < 8 || val > 9))
6835 {
6836 set_fatal_syntax_error (_(N_ ("C8 - C9 expected")));
6837 goto failure;
6838 }
6839 else if ((operands[i] == AARCH64_OPND_CRm)
6840 && (val > 7))
6841 {
6842 set_fatal_syntax_error (_(N_ ("C0 - C7 expected")));
6843 goto failure;
6844 }
6845 }
6846 else if (val > 15)
6847 {
6848 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6849 goto failure;
6850 }
6851 info->qualifier = AARCH64_OPND_QLF_CR;
6852 info->imm.value = val;
6853 break;
6854 }
6855
6856 case AARCH64_OPND_SHLL_IMM:
6857 case AARCH64_OPND_IMM_VLSR:
6858 po_imm_or_fail (1, 64);
6859 info->imm.value = val;
6860 break;
6861
6862 case AARCH64_OPND_CCMP_IMM:
6863 case AARCH64_OPND_SIMM5:
6864 case AARCH64_OPND_FBITS:
6865 case AARCH64_OPND_TME_UIMM16:
6866 case AARCH64_OPND_UIMM4:
6867 case AARCH64_OPND_UIMM4_ADDG:
6868 case AARCH64_OPND_UIMM10:
6869 case AARCH64_OPND_UIMM3_OP1:
6870 case AARCH64_OPND_UIMM3_OP2:
6871 case AARCH64_OPND_IMM_VLSL:
6872 case AARCH64_OPND_IMM:
6873 case AARCH64_OPND_IMM_2:
6874 case AARCH64_OPND_WIDTH:
6875 case AARCH64_OPND_SVE_INV_LIMM:
6876 case AARCH64_OPND_SVE_LIMM:
6877 case AARCH64_OPND_SVE_LIMM_MOV:
6878 case AARCH64_OPND_SVE_SHLIMM_PRED:
6879 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6880 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6881 case AARCH64_OPND_SME_SHRIMM4:
6882 case AARCH64_OPND_SME_SHRIMM5:
6883 case AARCH64_OPND_SVE_SHRIMM_PRED:
6884 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6885 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6886 case AARCH64_OPND_SVE_SIMM5:
6887 case AARCH64_OPND_SVE_SIMM5B:
6888 case AARCH64_OPND_SVE_SIMM6:
6889 case AARCH64_OPND_SVE_SIMM8:
6890 case AARCH64_OPND_SVE_UIMM3:
6891 case AARCH64_OPND_SVE_UIMM7:
6892 case AARCH64_OPND_SVE_UIMM8:
6893 case AARCH64_OPND_SVE_UIMM8_53:
6894 case AARCH64_OPND_IMM_ROT1:
6895 case AARCH64_OPND_IMM_ROT2:
6896 case AARCH64_OPND_IMM_ROT3:
6897 case AARCH64_OPND_SVE_IMM_ROT1:
6898 case AARCH64_OPND_SVE_IMM_ROT2:
6899 case AARCH64_OPND_SVE_IMM_ROT3:
6900 case AARCH64_OPND_CSSC_SIMM8:
6901 case AARCH64_OPND_CSSC_UIMM8:
6902 po_imm_nc_or_fail ();
6903 info->imm.value = val;
6904 break;
6905
6906 case AARCH64_OPND_SVE_AIMM:
6907 case AARCH64_OPND_SVE_ASIMM:
6908 po_imm_nc_or_fail ();
6909 info->imm.value = val;
6910 skip_whitespace (str);
6911 if (skip_past_comma (&str))
6912 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6913 else
6914 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6915 break;
6916
6917 case AARCH64_OPND_SVE_PATTERN:
6918 po_enum_or_fail (aarch64_sve_pattern_array);
6919 info->imm.value = val;
6920 break;
6921
6922 case AARCH64_OPND_SVE_PATTERN_SCALED:
6923 po_enum_or_fail (aarch64_sve_pattern_array);
6924 info->imm.value = val;
6925 if (skip_past_comma (&str)
6926 && !parse_shift (&str, info, SHIFTED_MUL))
6927 goto failure;
6928 if (!info->shifter.operator_present)
6929 {
6930 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6931 info->shifter.kind = AARCH64_MOD_MUL;
6932 info->shifter.amount = 1;
6933 }
6934 break;
6935
6936 case AARCH64_OPND_SVE_PRFOP:
6937 po_enum_or_fail (aarch64_sve_prfop_array);
6938 info->imm.value = val;
6939 break;
6940
6941 case AARCH64_OPND_UIMM7:
6942 po_imm_or_fail (0, 127);
6943 info->imm.value = val;
6944 break;
6945
6946 case AARCH64_OPND_IDX:
6947 case AARCH64_OPND_MASK:
6948 case AARCH64_OPND_BIT_NUM:
6949 case AARCH64_OPND_IMMR:
6950 case AARCH64_OPND_IMMS:
6951 po_imm_or_fail (0, 63);
6952 info->imm.value = val;
6953 break;
6954
6955 case AARCH64_OPND_IMM0:
6956 po_imm_nc_or_fail ();
6957 if (val != 0)
6958 {
6959 set_fatal_syntax_error (_("immediate zero expected"));
6960 goto failure;
6961 }
6962 info->imm.value = 0;
6963 break;
6964
6965 case AARCH64_OPND_FPIMM0:
6966 {
6967 int qfloat;
6968 bool res1 = false, res2 = false;
6969 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6970 it is probably not worth the effort to support it. */
6971 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6972 imm_reg_type))
6973 && (error_p ()
6974 || !(res2 = parse_constant_immediate (&str, &val,
6975 imm_reg_type))))
6976 goto failure;
6977 if ((res1 && qfloat == 0) || (res2 && val == 0))
6978 {
6979 info->imm.value = 0;
6980 info->imm.is_fp = 1;
6981 break;
6982 }
6983 set_fatal_syntax_error (_("immediate zero expected"));
6984 goto failure;
6985 }
6986
6987 case AARCH64_OPND_IMM_MOV:
6988 {
6989 char *saved = str;
6990 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
6991 || reg_name_p (str, REG_TYPE_V))
6992 goto failure;
6993 str = saved;
6994 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6995 GE_OPT_PREFIX, REJECT_ABSENT));
6996 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6997 later. fix_mov_imm_insn will try to determine a machine
6998 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6999 message if the immediate cannot be moved by a single
7000 instruction. */
7001 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7002 inst.base.operands[i].skip = 1;
7003 }
7004 break;
7005
7006 case AARCH64_OPND_SIMD_IMM:
7007 case AARCH64_OPND_SIMD_IMM_SFT:
7008 if (! parse_big_immediate (&str, &val, imm_reg_type))
7009 goto failure;
7010 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7011 /* addr_off_p */ 0,
7012 /* need_libopcodes_p */ 1,
7013 /* skip_p */ 1);
7014 /* Parse shift.
7015 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
7016 shift, we don't check it here; we leave the checking to
7017 the libopcodes (operand_general_constraint_met_p). By
7018 doing this, we achieve better diagnostics. */
7019 if (skip_past_comma (&str)
7020 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
7021 goto failure;
7022 if (!info->shifter.operator_present
7023 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7024 {
7025 /* Default to LSL if not present. Libopcodes prefers shifter
7026 kind to be explicit. */
7027 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7028 info->shifter.kind = AARCH64_MOD_LSL;
7029 }
7030 break;
7031
7032 case AARCH64_OPND_FPIMM:
7033 case AARCH64_OPND_SIMD_FPIMM:
7034 case AARCH64_OPND_SVE_FPIMM8:
7035 {
7036 int qfloat;
7037 bool dp_p;
7038
7039 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7040 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7041 || !aarch64_imm_float_p (qfloat))
7042 {
7043 if (!error_p ())
7044 set_fatal_syntax_error (_("invalid floating-point"
7045 " constant"));
7046 goto failure;
7047 }
7048 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7049 inst.base.operands[i].imm.is_fp = 1;
7050 }
7051 break;
7052
7053 case AARCH64_OPND_SVE_I1_HALF_ONE:
7054 case AARCH64_OPND_SVE_I1_HALF_TWO:
7055 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7056 {
7057 int qfloat;
7058 bool dp_p;
7059
7060 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7061 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7062 {
7063 if (!error_p ())
7064 set_fatal_syntax_error (_("invalid floating-point"
7065 " constant"));
7066 goto failure;
7067 }
7068 inst.base.operands[i].imm.value = qfloat;
7069 inst.base.operands[i].imm.is_fp = 1;
7070 }
7071 break;
7072
7073 case AARCH64_OPND_LIMM:
7074 po_misc_or_fail (parse_shifter_operand (&str, info,
7075 SHIFTED_LOGIC_IMM));
7076 if (info->shifter.operator_present)
7077 {
7078 set_fatal_syntax_error
7079 (_("shift not allowed for bitmask immediate"));
7080 goto failure;
7081 }
7082 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7083 /* addr_off_p */ 0,
7084 /* need_libopcodes_p */ 1,
7085 /* skip_p */ 1);
7086 break;
7087
7088 case AARCH64_OPND_AIMM:
7089 if (opcode->op == OP_ADD)
7090 /* ADD may have relocation types. */
7091 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7092 SHIFTED_ARITH_IMM));
7093 else
7094 po_misc_or_fail (parse_shifter_operand (&str, info,
7095 SHIFTED_ARITH_IMM));
7096 switch (inst.reloc.type)
7097 {
7098 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7099 info->shifter.amount = 12;
7100 break;
7101 case BFD_RELOC_UNUSED:
7102 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7103 if (info->shifter.kind != AARCH64_MOD_NONE)
7104 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7105 inst.reloc.pc_rel = 0;
7106 break;
7107 default:
7108 break;
7109 }
7110 info->imm.value = 0;
7111 if (!info->shifter.operator_present)
7112 {
7113 /* Default to LSL if not present. Libopcodes prefers shifter
7114 kind to be explicit. */
7115 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7116 info->shifter.kind = AARCH64_MOD_LSL;
7117 }
7118 break;
7119
7120 case AARCH64_OPND_HALF:
7121 {
7122 /* #<imm16> or relocation. */
7123 int internal_fixup_p;
7124 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7125 if (internal_fixup_p)
7126 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7127 skip_whitespace (str);
7128 if (skip_past_comma (&str))
7129 {
7130 /* {, LSL #<shift>} */
7131 if (! aarch64_gas_internal_fixup_p ())
7132 {
7133 set_fatal_syntax_error (_("can't mix relocation modifier "
7134 "with explicit shift"));
7135 goto failure;
7136 }
7137 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7138 }
7139 else
7140 inst.base.operands[i].shifter.amount = 0;
7141 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7142 inst.base.operands[i].imm.value = 0;
7143 if (! process_movw_reloc_info ())
7144 goto failure;
7145 }
7146 break;
7147
7148 case AARCH64_OPND_EXCEPTION:
7149 case AARCH64_OPND_UNDEFINED:
7150 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7151 imm_reg_type));
7152 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7153 /* addr_off_p */ 0,
7154 /* need_libopcodes_p */ 0,
7155 /* skip_p */ 1);
7156 break;
7157
7158 case AARCH64_OPND_NZCV:
7159 {
7160 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7161 if (nzcv != NULL)
7162 {
7163 str += 4;
7164 info->imm.value = nzcv->value;
7165 break;
7166 }
7167 po_imm_or_fail (0, 15);
7168 info->imm.value = val;
7169 }
7170 break;
7171
7172 case AARCH64_OPND_COND:
7173 case AARCH64_OPND_COND1:
7174 {
7175 char *start = str;
7176 do
7177 str++;
7178 while (ISALPHA (*str));
7179 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7180 if (info->cond == NULL)
7181 {
7182 set_syntax_error (_("invalid condition"));
7183 goto failure;
7184 }
7185 else if (operands[i] == AARCH64_OPND_COND1
7186 && (info->cond->value & 0xe) == 0xe)
7187 {
7188 /* Do not allow AL or NV. */
7189 set_default_error ();
7190 goto failure;
7191 }
7192 }
7193 break;
7194
7195 case AARCH64_OPND_ADDR_ADRP:
7196 po_misc_or_fail (parse_adrp (&str));
7197 /* Clear the value as operand needs to be relocated. */
7198 info->imm.value = 0;
7199 break;
7200
7201 case AARCH64_OPND_ADDR_PCREL14:
7202 case AARCH64_OPND_ADDR_PCREL19:
7203 case AARCH64_OPND_ADDR_PCREL21:
7204 case AARCH64_OPND_ADDR_PCREL26:
7205 po_misc_or_fail (parse_address (&str, info));
7206 if (!info->addr.pcrel)
7207 {
7208 set_syntax_error (_("invalid pc-relative address"));
7209 goto failure;
7210 }
7211 if (inst.gen_lit_pool
7212 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7213 {
7214 /* Only permit "=value" in the literal load instructions.
7215 The literal will be generated by programmer_friendly_fixup. */
7216 set_syntax_error (_("invalid use of \"=immediate\""));
7217 goto failure;
7218 }
7219 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7220 {
7221 set_syntax_error (_("unrecognized relocation suffix"));
7222 goto failure;
7223 }
7224 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7225 {
7226 info->imm.value = inst.reloc.exp.X_add_number;
7227 inst.reloc.type = BFD_RELOC_UNUSED;
7228 }
7229 else
7230 {
7231 info->imm.value = 0;
7232 if (inst.reloc.type == BFD_RELOC_UNUSED)
7233 switch (opcode->iclass)
7234 {
7235 case compbranch:
7236 case condbranch:
7237 /* e.g. CBZ or B.COND */
7238 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7239 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7240 break;
7241 case testbranch:
7242 /* e.g. TBZ */
7243 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7244 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7245 break;
7246 case branch_imm:
7247 /* e.g. B or BL */
7248 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7249 inst.reloc.type =
7250 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7251 : BFD_RELOC_AARCH64_JUMP26;
7252 break;
7253 case loadlit:
7254 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7255 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7256 break;
7257 case pcreladdr:
7258 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7259 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7260 break;
7261 default:
7262 gas_assert (0);
7263 abort ();
7264 }
7265 inst.reloc.pc_rel = 1;
7266 }
7267 break;
7268
7269 case AARCH64_OPND_ADDR_SIMPLE:
7270 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7271 {
7272 /* [<Xn|SP>{, #<simm>}] */
7273 char *start = str;
7274 /* First use the normal address-parsing routines, to get
7275 the usual syntax errors. */
7276 po_misc_or_fail (parse_address (&str, info));
7277 if (info->addr.pcrel || info->addr.offset.is_reg
7278 || !info->addr.preind || info->addr.postind
7279 || info->addr.writeback)
7280 {
7281 set_syntax_error (_("invalid addressing mode"));
7282 goto failure;
7283 }
7284
7285 /* Then retry, matching the specific syntax of these addresses. */
7286 str = start;
7287 po_char_or_fail ('[');
7288 po_reg_or_fail (REG_TYPE_R64_SP);
7289 /* Accept optional ", #0". */
7290 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7291 && skip_past_char (&str, ','))
7292 {
7293 skip_past_char (&str, '#');
7294 if (! skip_past_char (&str, '0'))
7295 {
7296 set_fatal_syntax_error
7297 (_("the optional immediate offset can only be 0"));
7298 goto failure;
7299 }
7300 }
7301 po_char_or_fail (']');
7302 break;
7303 }
7304
7305 case AARCH64_OPND_ADDR_REGOFF:
7306 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7307 po_misc_or_fail (parse_address (&str, info));
7308 regoff_addr:
7309 if (info->addr.pcrel || !info->addr.offset.is_reg
7310 || !info->addr.preind || info->addr.postind
7311 || info->addr.writeback)
7312 {
7313 set_syntax_error (_("invalid addressing mode"));
7314 goto failure;
7315 }
7316 if (!info->shifter.operator_present)
7317 {
7318 /* Default to LSL if not present. Libopcodes prefers shifter
7319 kind to be explicit. */
7320 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7321 info->shifter.kind = AARCH64_MOD_LSL;
7322 }
7323 /* Qualifier to be deduced by libopcodes. */
7324 break;
7325
7326 case AARCH64_OPND_ADDR_SIMM7:
7327 po_misc_or_fail (parse_address (&str, info));
7328 if (info->addr.pcrel || info->addr.offset.is_reg
7329 || (!info->addr.preind && !info->addr.postind))
7330 {
7331 set_syntax_error (_("invalid addressing mode"));
7332 goto failure;
7333 }
7334 if (inst.reloc.type != BFD_RELOC_UNUSED)
7335 {
7336 set_syntax_error (_("relocation not allowed"));
7337 goto failure;
7338 }
7339 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7340 /* addr_off_p */ 1,
7341 /* need_libopcodes_p */ 1,
7342 /* skip_p */ 0);
7343 break;
7344
7345 case AARCH64_OPND_ADDR_SIMM9:
7346 case AARCH64_OPND_ADDR_SIMM9_2:
7347 case AARCH64_OPND_ADDR_SIMM11:
7348 case AARCH64_OPND_ADDR_SIMM13:
7349 po_misc_or_fail (parse_address (&str, info));
7350 if (info->addr.pcrel || info->addr.offset.is_reg
7351 || (!info->addr.preind && !info->addr.postind)
7352 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7353 && info->addr.writeback))
7354 {
7355 set_syntax_error (_("invalid addressing mode"));
7356 goto failure;
7357 }
7358 if (inst.reloc.type != BFD_RELOC_UNUSED)
7359 {
7360 set_syntax_error (_("relocation not allowed"));
7361 goto failure;
7362 }
7363 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7364 /* addr_off_p */ 1,
7365 /* need_libopcodes_p */ 1,
7366 /* skip_p */ 0);
7367 break;
7368
7369 case AARCH64_OPND_ADDR_SIMM10:
7370 case AARCH64_OPND_ADDR_OFFSET:
7371 po_misc_or_fail (parse_address (&str, info));
7372 if (info->addr.pcrel || info->addr.offset.is_reg
7373 || !info->addr.preind || info->addr.postind)
7374 {
7375 set_syntax_error (_("invalid addressing mode"));
7376 goto failure;
7377 }
7378 if (inst.reloc.type != BFD_RELOC_UNUSED)
7379 {
7380 set_syntax_error (_("relocation not allowed"));
7381 goto failure;
7382 }
7383 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7384 /* addr_off_p */ 1,
7385 /* need_libopcodes_p */ 1,
7386 /* skip_p */ 0);
7387 break;
7388
7389 case AARCH64_OPND_ADDR_UIMM12:
7390 po_misc_or_fail (parse_address (&str, info));
7391 if (info->addr.pcrel || info->addr.offset.is_reg
7392 || !info->addr.preind || info->addr.writeback)
7393 {
7394 set_syntax_error (_("invalid addressing mode"));
7395 goto failure;
7396 }
7397 if (inst.reloc.type == BFD_RELOC_UNUSED)
7398 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7399 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7400 || (inst.reloc.type
7401 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7402 || (inst.reloc.type
7403 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7404 || (inst.reloc.type
7405 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7406 || (inst.reloc.type
7407 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7408 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7409 /* Leave qualifier to be determined by libopcodes. */
7410 break;
7411
7412 case AARCH64_OPND_SIMD_ADDR_POST:
7413 /* [<Xn|SP>], <Xm|#<amount>> */
7414 po_misc_or_fail (parse_address (&str, info));
7415 if (!info->addr.postind || !info->addr.writeback)
7416 {
7417 set_syntax_error (_("invalid addressing mode"));
7418 goto failure;
7419 }
7420 if (!info->addr.offset.is_reg)
7421 {
7422 if (inst.reloc.exp.X_op == O_constant)
7423 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7424 else
7425 {
7426 set_fatal_syntax_error
7427 (_("writeback value must be an immediate constant"));
7428 goto failure;
7429 }
7430 }
7431 /* No qualifier. */
7432 break;
7433
7434 case AARCH64_OPND_SME_SM_ZA:
7435 /* { SM | ZA } */
7436 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7437 {
7438 set_syntax_error (_("unknown or missing PSTATE field name"));
7439 goto failure;
7440 }
7441 info->reg.regno = val;
7442 break;
7443
7444 case AARCH64_OPND_SME_PnT_Wm_imm:
7445 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7446 &info->indexed_za, &qualifier, 0))
7447 goto failure;
7448 info->qualifier = qualifier;
7449 break;
7450
7451 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7452 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7453 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7454 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7455 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7456 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7457 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7458 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7459 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7460 case AARCH64_OPND_SVE_ADDR_RI_U6:
7461 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7462 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7463 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7464 /* [X<n>{, #imm, MUL VL}]
7465 [X<n>{, #imm}]
7466 but recognizing SVE registers. */
7467 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7468 &offset_qualifier));
7469 if (base_qualifier != AARCH64_OPND_QLF_X)
7470 {
7471 set_syntax_error (_("invalid addressing mode"));
7472 goto failure;
7473 }
7474 sve_regimm:
7475 if (info->addr.pcrel || info->addr.offset.is_reg
7476 || !info->addr.preind || info->addr.writeback)
7477 {
7478 set_syntax_error (_("invalid addressing mode"));
7479 goto failure;
7480 }
7481 if (inst.reloc.type != BFD_RELOC_UNUSED
7482 || inst.reloc.exp.X_op != O_constant)
7483 {
7484 /* Make sure this has priority over
7485 "invalid addressing mode". */
7486 set_fatal_syntax_error (_("constant offset required"));
7487 goto failure;
7488 }
7489 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7490 break;
7491
7492 case AARCH64_OPND_SVE_ADDR_R:
7493 /* [<Xn|SP>{, <R><m>}]
7494 but recognizing SVE registers. */
7495 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7496 &offset_qualifier));
7497 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7498 {
7499 offset_qualifier = AARCH64_OPND_QLF_X;
7500 info->addr.offset.is_reg = 1;
7501 info->addr.offset.regno = 31;
7502 }
7503 else if (base_qualifier != AARCH64_OPND_QLF_X
7504 || offset_qualifier != AARCH64_OPND_QLF_X)
7505 {
7506 set_syntax_error (_("invalid addressing mode"));
7507 goto failure;
7508 }
7509 goto regoff_addr;
7510
7511 case AARCH64_OPND_SVE_ADDR_RR:
7512 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7513 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7514 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7515 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7516 case AARCH64_OPND_SVE_ADDR_RX:
7517 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7518 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7519 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7520 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7521 but recognizing SVE registers. */
7522 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7523 &offset_qualifier));
7524 if (base_qualifier != AARCH64_OPND_QLF_X
7525 || offset_qualifier != AARCH64_OPND_QLF_X)
7526 {
7527 set_syntax_error (_("invalid addressing mode"));
7528 goto failure;
7529 }
7530 goto regoff_addr;
7531
7532 case AARCH64_OPND_SVE_ADDR_RZ:
7533 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7534 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7535 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7536 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7537 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7538 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7539 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7540 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7541 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7542 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7543 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7544 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7545 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7546 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7547 &offset_qualifier));
7548 if (base_qualifier != AARCH64_OPND_QLF_X
7549 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7550 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7551 {
7552 set_syntax_error (_("invalid addressing mode"));
7553 goto failure;
7554 }
7555 info->qualifier = offset_qualifier;
7556 goto regoff_addr;
7557
7558 case AARCH64_OPND_SVE_ADDR_ZX:
7559 /* [Zn.<T>{, <Xm>}]. */
7560 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7561 &offset_qualifier));
7562 /* Things to check:
7563 base_qualifier either S_S or S_D
7564 offset_qualifier must be X
7565 */
7566 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7567 && base_qualifier != AARCH64_OPND_QLF_S_D)
7568 || offset_qualifier != AARCH64_OPND_QLF_X)
7569 {
7570 set_syntax_error (_("invalid addressing mode"));
7571 goto failure;
7572 }
7573 info->qualifier = base_qualifier;
7574 if (!info->addr.offset.is_reg || info->addr.pcrel
7575 || !info->addr.preind || info->addr.writeback
7576 || info->shifter.operator_present != 0)
7577 {
7578 set_syntax_error (_("invalid addressing mode"));
7579 goto failure;
7580 }
7581 info->shifter.kind = AARCH64_MOD_LSL;
7582 break;
7583
7584
7585 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7586 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7587 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7588 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7589 /* [Z<n>.<T>{, #imm}] */
7590 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7591 &offset_qualifier));
7592 if (base_qualifier != AARCH64_OPND_QLF_S_S
7593 && base_qualifier != AARCH64_OPND_QLF_S_D)
7594 {
7595 set_syntax_error (_("invalid addressing mode"));
7596 goto failure;
7597 }
7598 info->qualifier = base_qualifier;
7599 goto sve_regimm;
7600
7601 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7602 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7603 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7604 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7605 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7606
7607 We don't reject:
7608
7609 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7610
7611 here since we get better error messages by leaving it to
7612 the qualifier checking routines. */
7613 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7614 &offset_qualifier));
7615 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7616 && base_qualifier != AARCH64_OPND_QLF_S_D)
7617 || offset_qualifier != base_qualifier)
7618 {
7619 set_syntax_error (_("invalid addressing mode"));
7620 goto failure;
7621 }
7622 info->qualifier = base_qualifier;
7623 goto regoff_addr;
7624 case AARCH64_OPND_SYSREG:
7625 case AARCH64_OPND_SYSREG128:
7626 {
7627 bool sysreg128_p = operands[i] == AARCH64_OPND_SYSREG128;
7628 uint32_t sysreg_flags;
7629 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7630 &sysreg_flags,
7631 sysreg128_p)) == PARSE_FAIL)
7632 {
7633 set_syntax_error (_("unknown or missing system register name"));
7634 goto failure;
7635 }
7636 inst.base.operands[i].sysreg.value = val;
7637 inst.base.operands[i].sysreg.flags = sysreg_flags;
7638 break;
7639 }
7640
7641 case AARCH64_OPND_PSTATEFIELD:
7642 {
7643 uint32_t sysreg_flags;
7644 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7645 &sysreg_flags, false)) == PARSE_FAIL)
7646 {
7647 set_syntax_error (_("unknown or missing PSTATE field name"));
7648 goto failure;
7649 }
7650 inst.base.operands[i].pstatefield = val;
7651 inst.base.operands[i].sysreg.flags = sysreg_flags;
7652 break;
7653 }
7654
7655 case AARCH64_OPND_SYSREG_IC:
7656 inst.base.operands[i].sysins_op =
7657 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7658 goto sys_reg_ins;
7659
7660 case AARCH64_OPND_SYSREG_DC:
7661 inst.base.operands[i].sysins_op =
7662 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7663 goto sys_reg_ins;
7664
7665 case AARCH64_OPND_SYSREG_AT:
7666 inst.base.operands[i].sysins_op =
7667 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7668 goto sys_reg_ins;
7669
7670 case AARCH64_OPND_SYSREG_SR:
7671 inst.base.operands[i].sysins_op =
7672 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7673 goto sys_reg_ins;
7674
7675 case AARCH64_OPND_SYSREG_TLBI:
7676 case AARCH64_OPND_SYSREG_TLBIP:
7677 inst.base.operands[i].sysins_op =
7678 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7679 sys_reg_ins:
7680 if (inst.base.operands[i].sysins_op == NULL)
7681 {
7682 set_fatal_syntax_error ( _("unknown or missing operation name"));
7683 goto failure;
7684 }
7685 break;
7686
7687 case AARCH64_OPND_BARRIER:
7688 case AARCH64_OPND_BARRIER_ISB:
7689 val = parse_barrier (&str);
7690 if (val != PARSE_FAIL
7691 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7692 {
7693 /* ISB only accepts options name 'sy'. */
7694 set_syntax_error
7695 (_("the specified option is not accepted in ISB"));
7696 /* Turn off backtrack as this optional operand is present. */
7697 backtrack_pos = 0;
7698 goto failure;
7699 }
7700 if (val != PARSE_FAIL
7701 && operands[i] == AARCH64_OPND_BARRIER)
7702 {
7703 /* Regular barriers accept options CRm (C0-C15).
7704 DSB nXS barrier variant accepts values > 15. */
7705 if (val < 0 || val > 15)
7706 {
7707 set_syntax_error (_("the specified option is not accepted in DSB"));
7708 goto failure;
7709 }
7710 }
7711 /* This is an extension to accept a 0..15 immediate. */
7712 if (val == PARSE_FAIL)
7713 po_imm_or_fail (0, 15);
7714 info->barrier = aarch64_barrier_options + val;
7715 break;
7716
7717 case AARCH64_OPND_BARRIER_DSB_NXS:
7718 val = parse_barrier (&str);
7719 if (val != PARSE_FAIL)
7720 {
7721 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7722 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7723 {
7724 set_syntax_error (_("the specified option is not accepted in DSB"));
7725 /* Turn off backtrack as this optional operand is present. */
7726 backtrack_pos = 0;
7727 goto failure;
7728 }
7729 }
7730 else
7731 {
7732 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7733 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7734 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7735 goto failure;
7736 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7737 {
7738 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7739 goto failure;
7740 }
7741 }
7742 /* Option index is encoded as 2-bit value in val<3:2>. */
7743 val = (val >> 2) - 4;
7744 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7745 break;
7746
7747 case AARCH64_OPND_PRFOP:
7748 val = parse_pldop (&str);
7749 /* This is an extension to accept a 0..31 immediate. */
7750 if (val == PARSE_FAIL)
7751 po_imm_or_fail (0, 31);
7752 inst.base.operands[i].prfop = aarch64_prfops + val;
7753 break;
7754
7755 case AARCH64_OPND_RPRFMOP:
7756 po_enum_or_fail (aarch64_rprfmop_array);
7757 info->imm.value = val;
7758 break;
7759
7760 case AARCH64_OPND_BARRIER_PSB:
7761 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7762 goto failure;
7763 break;
7764
7765 case AARCH64_OPND_SME_ZT0:
7766 po_reg_or_fail (REG_TYPE_ZT0);
7767 break;
7768
7769 case AARCH64_OPND_SME_ZT0_INDEX:
7770 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7771 if (!reg || vectype.type != NT_invtype)
7772 goto failure;
7773 if (!(vectype.defined & NTA_HASINDEX))
7774 {
7775 set_syntax_error (_("missing register index"));
7776 goto failure;
7777 }
7778 info->imm.value = vectype.index;
7779 break;
7780
7781 case AARCH64_OPND_SME_ZT0_LIST:
7782 if (*str != '{')
7783 {
7784 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7785 goto failure;
7786 }
7787 str++;
7788 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7789 goto failure;
7790 if (*str != '}')
7791 {
7792 set_syntax_error (_("expected '}' after ZT0"));
7793 goto failure;
7794 }
7795 str++;
7796 break;
7797
7798 case AARCH64_OPND_SME_PNn3_INDEX1:
7799 case AARCH64_OPND_SME_PNn3_INDEX2:
7800 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7801 if (!reg)
7802 goto failure;
7803 if (!(vectype.defined & NTA_HASINDEX))
7804 {
7805 set_syntax_error (_("missing register index"));
7806 goto failure;
7807 }
7808 info->reglane.regno = reg->number;
7809 info->reglane.index = vectype.index;
7810 if (vectype.type == NT_invtype)
7811 info->qualifier = AARCH64_OPND_QLF_NIL;
7812 else
7813 info->qualifier = vectype_to_qualifier (&vectype);
7814 break;
7815
7816 case AARCH64_OPND_BARRIER_GCSB:
7817 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7818 goto failure;
7819 break;
7820
7821 case AARCH64_OPND_BTI_TARGET:
7822 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7823 goto failure;
7824 break;
7825
7826 case AARCH64_OPND_SME_ZAda_2b:
7827 case AARCH64_OPND_SME_ZAda_3b:
7828 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7829 if (!reg)
7830 goto failure;
7831 info->reg.regno = reg->number;
7832 info->qualifier = qualifier;
7833 break;
7834
7835 case AARCH64_OPND_SME_ZA_HV_idx_src:
7836 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7837 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7838 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7839 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7840 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7841 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7842 &info->indexed_za,
7843 &qualifier)
7844 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7845 &info->indexed_za, &qualifier, 0))
7846 goto failure;
7847 info->qualifier = qualifier;
7848 break;
7849
7850 case AARCH64_OPND_SME_list_of_64bit_tiles:
7851 val = parse_sme_list_of_64bit_tiles (&str);
7852 if (val == PARSE_FAIL)
7853 goto failure;
7854 info->imm.value = val;
7855 break;
7856
7857 case AARCH64_OPND_SME_ZA_array_off1x4:
7858 case AARCH64_OPND_SME_ZA_array_off2x2:
7859 case AARCH64_OPND_SME_ZA_array_off2x4:
7860 case AARCH64_OPND_SME_ZA_array_off3_0:
7861 case AARCH64_OPND_SME_ZA_array_off3_5:
7862 case AARCH64_OPND_SME_ZA_array_off3x2:
7863 case AARCH64_OPND_SME_ZA_array_off4:
7864 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7865 &info->indexed_za, &qualifier, 0))
7866 goto failure;
7867 info->qualifier = qualifier;
7868 break;
7869
7870 case AARCH64_OPND_SME_ZA_array_vrsb_1:
7871 case AARCH64_OPND_SME_ZA_array_vrsh_1:
7872 case AARCH64_OPND_SME_ZA_array_vrss_1:
7873 case AARCH64_OPND_SME_ZA_array_vrsd_1:
7874 case AARCH64_OPND_SME_ZA_array_vrsb_2:
7875 case AARCH64_OPND_SME_ZA_array_vrsh_2:
7876 case AARCH64_OPND_SME_ZA_array_vrss_2:
7877 case AARCH64_OPND_SME_ZA_array_vrsd_2:
7878 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7879 &info->indexed_za, &qualifier, 0))
7880 goto failure;
7881 info->qualifier = qualifier;
7882 break;
7883
7884
7885 case AARCH64_OPND_SME_VLxN_10:
7886 case AARCH64_OPND_SME_VLxN_13:
7887 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7888 info->imm.value = val;
7889 break;
7890
7891 case AARCH64_OPND_MOPS_ADDR_Rd:
7892 case AARCH64_OPND_MOPS_ADDR_Rs:
7893 po_char_or_fail ('[');
7894 if (!parse_x0_to_x30 (&str, info))
7895 goto failure;
7896 po_char_or_fail (']');
7897 po_char_or_fail ('!');
7898 break;
7899
7900 case AARCH64_OPND_MOPS_WB_Rn:
7901 if (!parse_x0_to_x30 (&str, info))
7902 goto failure;
7903 po_char_or_fail ('!');
7904 break;
7905
7906 case AARCH64_OPND_LSE128_Rt:
7907 case AARCH64_OPND_LSE128_Rt2:
7908 po_int_fp_reg_or_fail (REG_TYPE_R_64);
7909 break;
7910
7911 default:
7912 as_fatal (_("unhandled operand code %d"), operands[i]);
7913 }
7914
7915 /* If we get here, this operand was successfully parsed. */
7916 inst.base.operands[i].present = 1;
7917
7918 /* As instructions can have multiple optional operands, it is imporant to
7919 reset the backtrack_pos variable once we finish processing an operand
7920 successfully. */
7921 backtrack_pos = 0;
7922
7923 continue;
7924
7925 failure:
7926 /* The parse routine should already have set the error, but in case
7927 not, set a default one here. */
7928 if (! error_p ())
7929 set_default_error ();
7930
7931 if (! backtrack_pos)
7932 goto parse_operands_return;
7933
7934 {
7935 /* We reach here because this operand is marked as optional, and
7936 either no operand was supplied or the operand was supplied but it
7937 was syntactically incorrect. In the latter case we report an
7938 error. In the former case we perform a few more checks before
7939 dropping through to the code to insert the default operand. */
7940
7941 char *tmp = backtrack_pos;
7942 char endchar = END_OF_INSN;
7943
7944 skip_past_char (&tmp, ',');
7945
7946 if (*tmp != endchar)
7947 /* The user has supplied an operand in the wrong format. */
7948 goto parse_operands_return;
7949
7950 /* Make sure there is not a comma before the optional operand.
7951 For example the fifth operand of 'sys' is optional:
7952
7953 sys #0,c0,c0,#0, <--- wrong
7954 sys #0,c0,c0,#0 <--- correct. */
7955 if (comma_skipped_p && i && endchar == END_OF_INSN)
7956 {
7957 set_fatal_syntax_error
7958 (_("unexpected comma before the omitted optional operand"));
7959 goto parse_operands_return;
7960 }
7961 }
7962
7963 /* Reaching here means we are dealing with an optional operand that is
7964 omitted from the assembly line. */
7965 gas_assert (optional_operand_p (opcode, i));
7966 info->present = 0;
7967 process_omitted_operand (operands[i], opcode, i, info);
7968
7969 /* Try again, skipping the optional operand at backtrack_pos. */
7970 str = backtrack_pos;
7971 backtrack_pos = 0;
7972
7973 /* Clear any error record after the omitted optional operand has been
7974 successfully handled. */
7975 clear_error ();
7976 }
7977
7978 /* Check if we have parsed all the operands. */
7979 if (*str != '\0' && ! error_p ())
7980 {
7981 /* Set I to the index of the last present operand; this is
7982 for the purpose of diagnostics. */
7983 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7984 ;
7985 set_fatal_syntax_error
7986 (_("unexpected characters following instruction"));
7987 }
7988
7989 parse_operands_return:
7990
7991 if (error_p ())
7992 {
7993 inst.parsing_error.index = i;
7994 DEBUG_TRACE ("parsing FAIL: %s - %s",
7995 operand_mismatch_kind_names[inst.parsing_error.kind],
7996 inst.parsing_error.error);
7997 /* Record the operand error properly; this is useful when there
7998 are multiple instruction templates for a mnemonic name, so that
7999 later on, we can select the error that most closely describes
8000 the problem. */
8001 record_operand_error_info (opcode, &inst.parsing_error);
8002 return false;
8003 }
8004 else
8005 {
8006 DEBUG_TRACE ("parsing SUCCESS");
8007 return true;
8008 }
8009 }
8010
8011 /* It does some fix-up to provide some programmer friendly feature while
8012 keeping the libopcodes happy, i.e. libopcodes only accepts
8013 the preferred architectural syntax.
8014 Return FALSE if there is any failure; otherwise return TRUE. */
8015
8016 static bool
8017 programmer_friendly_fixup (aarch64_instruction *instr)
8018 {
8019 aarch64_inst *base = &instr->base;
8020 const aarch64_opcode *opcode = base->opcode;
8021 enum aarch64_op op = opcode->op;
8022 aarch64_opnd_info *operands = base->operands;
8023
8024 DEBUG_TRACE ("enter");
8025
8026 switch (opcode->iclass)
8027 {
8028 case testbranch:
8029 /* TBNZ Xn|Wn, #uimm6, label
8030 Test and Branch Not Zero: conditionally jumps to label if bit number
8031 uimm6 in register Xn is not zero. The bit number implies the width of
8032 the register, which may be written and should be disassembled as Wn if
8033 uimm is less than 32. */
8034 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
8035 {
8036 if (operands[1].imm.value >= 32)
8037 {
8038 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
8039 0, 31);
8040 return false;
8041 }
8042 operands[0].qualifier = AARCH64_OPND_QLF_X;
8043 }
8044 break;
8045 case loadlit:
8046 /* LDR Wt, label | =value
8047 As a convenience assemblers will typically permit the notation
8048 "=value" in conjunction with the pc-relative literal load instructions
8049 to automatically place an immediate value or symbolic address in a
8050 nearby literal pool and generate a hidden label which references it.
8051 ISREG has been set to 0 in the case of =value. */
8052 if (instr->gen_lit_pool
8053 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8054 {
8055 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8056 if (op == OP_LDRSW_LIT)
8057 size = 4;
8058 if (instr->reloc.exp.X_op != O_constant
8059 && instr->reloc.exp.X_op != O_big
8060 && instr->reloc.exp.X_op != O_symbol)
8061 {
8062 record_operand_error (opcode, 1,
8063 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8064 _("constant expression expected"));
8065 return false;
8066 }
8067 if (! add_to_lit_pool (&instr->reloc.exp, size))
8068 {
8069 record_operand_error (opcode, 1,
8070 AARCH64_OPDE_OTHER_ERROR,
8071 _("literal pool insertion failed"));
8072 return false;
8073 }
8074 }
8075 break;
8076 case log_shift:
8077 case bitfield:
8078 /* UXT[BHW] Wd, Wn
8079 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8080 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8081 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8082 A programmer-friendly assembler should accept a destination Xd in
8083 place of Wd, however that is not the preferred form for disassembly.
8084 */
8085 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8086 && operands[1].qualifier == AARCH64_OPND_QLF_W
8087 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8088 operands[0].qualifier = AARCH64_OPND_QLF_W;
8089 break;
8090
8091 case addsub_ext:
8092 {
8093 /* In the 64-bit form, the final register operand is written as Wm
8094 for all but the (possibly omitted) UXTX/LSL and SXTX
8095 operators.
8096 As a programmer-friendly assembler, we accept e.g.
8097 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8098 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8099 int idx = aarch64_operand_index (opcode->operands,
8100 AARCH64_OPND_Rm_EXT);
8101 gas_assert (idx == 1 || idx == 2);
8102 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8103 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8104 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8105 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8106 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8107 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8108 }
8109 break;
8110
8111 default:
8112 break;
8113 }
8114
8115 DEBUG_TRACE ("exit with SUCCESS");
8116 return true;
8117 }
8118
8119 /* Check for loads and stores that will cause unpredictable behavior. */
8120
8121 static void
8122 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8123 {
8124 aarch64_inst *base = &instr->base;
8125 const aarch64_opcode *opcode = base->opcode;
8126 const aarch64_opnd_info *opnds = base->operands;
8127 switch (opcode->iclass)
8128 {
8129 case ldst_pos:
8130 case ldst_imm9:
8131 case ldst_imm10:
8132 case ldst_unscaled:
8133 case ldst_unpriv:
8134 /* Loading/storing the base register is unpredictable if writeback. */
8135 if ((aarch64_get_operand_class (opnds[0].type)
8136 == AARCH64_OPND_CLASS_INT_REG)
8137 && opnds[0].reg.regno == opnds[1].addr.base_regno
8138 && opnds[1].addr.base_regno != REG_SP
8139 /* Exempt STG/STZG/ST2G/STZ2G. */
8140 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8141 && opnds[1].addr.writeback)
8142 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8143 break;
8144
8145 case ldstpair_off:
8146 case ldstnapair_offs:
8147 case ldstpair_indexed:
8148 /* Loading/storing the base register is unpredictable if writeback. */
8149 if ((aarch64_get_operand_class (opnds[0].type)
8150 == AARCH64_OPND_CLASS_INT_REG)
8151 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8152 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8153 && opnds[2].addr.base_regno != REG_SP
8154 /* Exempt STGP. */
8155 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8156 && opnds[2].addr.writeback)
8157 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8158 /* Load operations must load different registers. */
8159 if ((opcode->opcode & (1 << 22))
8160 && opnds[0].reg.regno == opnds[1].reg.regno)
8161 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8162 break;
8163
8164 case ldstexcl:
8165 if ((aarch64_get_operand_class (opnds[0].type)
8166 == AARCH64_OPND_CLASS_INT_REG)
8167 && (aarch64_get_operand_class (opnds[1].type)
8168 == AARCH64_OPND_CLASS_INT_REG))
8169 {
8170 if ((opcode->opcode & (1 << 22)))
8171 {
8172 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8173 if ((opcode->opcode & (1 << 21))
8174 && opnds[0].reg.regno == opnds[1].reg.regno)
8175 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8176 }
8177 else
8178 {
8179 /* Store-Exclusive is unpredictable if Rt == Rs. */
8180 if (opnds[0].reg.regno == opnds[1].reg.regno)
8181 as_warn
8182 (_("unpredictable: identical transfer and status registers"
8183 " --`%s'"),str);
8184
8185 if (opnds[0].reg.regno == opnds[2].reg.regno)
8186 {
8187 if (!(opcode->opcode & (1 << 21)))
8188 /* Store-Exclusive is unpredictable if Rn == Rs. */
8189 as_warn
8190 (_("unpredictable: identical base and status registers"
8191 " --`%s'"),str);
8192 else
8193 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8194 as_warn
8195 (_("unpredictable: "
8196 "identical transfer and status registers"
8197 " --`%s'"),str);
8198 }
8199
8200 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8201 if ((opcode->opcode & (1 << 21))
8202 && opnds[0].reg.regno == opnds[3].reg.regno
8203 && opnds[3].reg.regno != REG_SP)
8204 as_warn (_("unpredictable: identical base and status registers"
8205 " --`%s'"),str);
8206 }
8207 }
8208 break;
8209
8210 default:
8211 break;
8212 }
8213 }
8214
8215 static void
8216 force_automatic_sequence_close (void)
8217 {
8218 struct aarch64_segment_info_type *tc_seg_info;
8219
8220 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8221 if (tc_seg_info->insn_sequence.instr)
8222 {
8223 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8224 _("previous `%s' sequence has not been closed"),
8225 tc_seg_info->insn_sequence.instr->opcode->name);
8226 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8227 }
8228 }
8229
8230 /* A wrapper function to interface with libopcodes on encoding and
8231 record the error message if there is any.
8232
8233 Return TRUE on success; otherwise return FALSE. */
8234
8235 static bool
8236 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8237 aarch64_insn *code)
8238 {
8239 aarch64_operand_error error_info;
8240 memset (&error_info, '\0', sizeof (error_info));
8241 error_info.kind = AARCH64_OPDE_NIL;
8242 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8243 && !error_info.non_fatal)
8244 return true;
8245
8246 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8247 record_operand_error_info (opcode, &error_info);
8248 return error_info.non_fatal;
8249 }
8250
8251 #ifdef DEBUG_AARCH64
8252 static inline void
8253 dump_opcode_operands (const aarch64_opcode *opcode)
8254 {
8255 int i = 0;
8256 while (opcode->operands[i] != AARCH64_OPND_NIL)
8257 {
8258 aarch64_verbose ("\t\t opnd%d: %s", i,
8259 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8260 ? aarch64_get_operand_name (opcode->operands[i])
8261 : aarch64_get_operand_desc (opcode->operands[i]));
8262 ++i;
8263 }
8264 }
8265 #endif /* DEBUG_AARCH64 */
8266
8267 /* This is the guts of the machine-dependent assembler. STR points to a
8268 machine dependent instruction. This function is supposed to emit
8269 the frags/bytes it assembles to. */
8270
8271 void
8272 md_assemble (char *str)
8273 {
8274 templates *template;
8275 const aarch64_opcode *opcode;
8276 struct aarch64_segment_info_type *tc_seg_info;
8277 aarch64_inst *inst_base;
8278 unsigned saved_cond;
8279
8280 /* Align the previous label if needed. */
8281 if (last_label_seen != NULL)
8282 {
8283 symbol_set_frag (last_label_seen, frag_now);
8284 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8285 S_SET_SEGMENT (last_label_seen, now_seg);
8286 }
8287
8288 /* Update the current insn_sequence from the segment. */
8289 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8290 insn_sequence = &tc_seg_info->insn_sequence;
8291 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8292
8293 inst.reloc.type = BFD_RELOC_UNUSED;
8294
8295 DEBUG_TRACE ("\n\n");
8296 DEBUG_TRACE ("==============================");
8297 DEBUG_TRACE ("Enter md_assemble with %s", str);
8298
8299 /* Scan up to the end of the mnemonic, which must end in whitespace,
8300 '.', or end of string. */
8301 char *p = str;
8302 char *dot = 0;
8303 for (; is_part_of_name (*p); p++)
8304 if (*p == '.' && !dot)
8305 dot = p;
8306
8307 if (p == str)
8308 {
8309 as_bad (_("unknown mnemonic -- `%s'"), str);
8310 return;
8311 }
8312
8313 if (!dot && create_register_alias (str, p))
8314 return;
8315
8316 template = opcode_lookup (str, dot, p);
8317 if (!template)
8318 {
8319 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8320 str);
8321 return;
8322 }
8323
8324 skip_whitespace (p);
8325 if (*p == ',')
8326 {
8327 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8328 get_mnemonic_name (str), str);
8329 return;
8330 }
8331
8332 init_operand_error_report ();
8333
8334 /* Sections are assumed to start aligned. In executable section, there is no
8335 MAP_DATA symbol pending. So we only align the address during
8336 MAP_DATA --> MAP_INSN transition.
8337 For other sections, this is not guaranteed. */
8338 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8339 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8340 frag_align_code (2, 0);
8341
8342 saved_cond = inst.cond;
8343 reset_aarch64_instruction (&inst);
8344 inst.cond = saved_cond;
8345
8346 /* Iterate through all opcode entries with the same mnemonic name. */
8347 do
8348 {
8349 opcode = template->opcode;
8350
8351 DEBUG_TRACE ("opcode %s found", opcode->name);
8352 #ifdef DEBUG_AARCH64
8353 if (debug_dump)
8354 dump_opcode_operands (opcode);
8355 #endif /* DEBUG_AARCH64 */
8356
8357 mapping_state (MAP_INSN);
8358
8359 inst_base = &inst.base;
8360 inst_base->opcode = opcode;
8361
8362 /* Truly conditionally executed instructions, e.g. b.cond. */
8363 if (opcode->flags & F_COND)
8364 {
8365 gas_assert (inst.cond != COND_ALWAYS);
8366 inst_base->cond = get_cond_from_value (inst.cond);
8367 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8368 }
8369 else if (inst.cond != COND_ALWAYS)
8370 {
8371 /* It shouldn't arrive here, where the assembly looks like a
8372 conditional instruction but the found opcode is unconditional. */
8373 gas_assert (0);
8374 continue;
8375 }
8376
8377 if (parse_operands (p, opcode)
8378 && programmer_friendly_fixup (&inst)
8379 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8380 {
8381 /* Check that this instruction is supported for this CPU. */
8382 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8383 {
8384 as_bad (_("selected processor does not support `%s'"), str);
8385 return;
8386 }
8387
8388 warn_unpredictable_ldst (&inst, str);
8389
8390 if (inst.reloc.type == BFD_RELOC_UNUSED
8391 || !inst.reloc.need_libopcodes_p)
8392 output_inst (NULL);
8393 else
8394 {
8395 /* If there is relocation generated for the instruction,
8396 store the instruction information for the future fix-up. */
8397 struct aarch64_inst *copy;
8398 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8399 copy = XNEW (struct aarch64_inst);
8400 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8401 output_inst (copy);
8402 }
8403
8404 /* Issue non-fatal messages if any. */
8405 output_operand_error_report (str, true);
8406 return;
8407 }
8408
8409 template = template->next;
8410 if (template != NULL)
8411 {
8412 reset_aarch64_instruction (&inst);
8413 inst.cond = saved_cond;
8414 }
8415 }
8416 while (template != NULL);
8417
8418 /* Issue the error messages if any. */
8419 output_operand_error_report (str, false);
8420 }
8421
8422 /* Various frobbings of labels and their addresses. */
8423
8424 void
8425 aarch64_start_line_hook (void)
8426 {
8427 last_label_seen = NULL;
8428 }
8429
8430 void
8431 aarch64_frob_label (symbolS * sym)
8432 {
8433 last_label_seen = sym;
8434
8435 dwarf2_emit_label (sym);
8436 }
8437
8438 void
8439 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8440 {
8441 /* Check to see if we have a block to close. */
8442 force_automatic_sequence_close ();
8443 }
8444
8445 int
8446 aarch64_data_in_code (void)
8447 {
8448 if (startswith (input_line_pointer + 1, "data:"))
8449 {
8450 *input_line_pointer = '/';
8451 input_line_pointer += 5;
8452 *input_line_pointer = 0;
8453 return 1;
8454 }
8455
8456 return 0;
8457 }
8458
8459 char *
8460 aarch64_canonicalize_symbol_name (char *name)
8461 {
8462 int len;
8463
8464 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8465 *(name + len - 5) = 0;
8466
8467 return name;
8468 }
8469 \f
8470 /* Table of all register names defined by default. The user can
8471 define additional names with .req. Note that all register names
8472 should appear in both upper and lowercase variants. Some registers
8473 also have mixed-case names. */
8474
8475 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8476 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8477 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8478 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8479 #define REGSET16(p,t) \
8480 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8481 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8482 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8483 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8484 #define REGSET16S(p,s,t) \
8485 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8486 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8487 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8488 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8489 #define REGSET31(p,t) \
8490 REGSET16(p, t), \
8491 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8492 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8493 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8494 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8495 #define REGSET(p,t) \
8496 REGSET31(p,t), REGNUM(p,31,t)
8497
8498 /* These go into aarch64_reg_hsh hash-table. */
8499 static const reg_entry reg_names[] = {
8500 /* Integer registers. */
8501 REGSET31 (x, R_64), REGSET31 (X, R_64),
8502 REGSET31 (w, R_32), REGSET31 (W, R_32),
8503
8504 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8505 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8506 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8507 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8508 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8509 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8510
8511 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8512 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8513
8514 /* Floating-point single precision registers. */
8515 REGSET (s, FP_S), REGSET (S, FP_S),
8516
8517 /* Floating-point double precision registers. */
8518 REGSET (d, FP_D), REGSET (D, FP_D),
8519
8520 /* Floating-point half precision registers. */
8521 REGSET (h, FP_H), REGSET (H, FP_H),
8522
8523 /* Floating-point byte precision registers. */
8524 REGSET (b, FP_B), REGSET (B, FP_B),
8525
8526 /* Floating-point quad precision registers. */
8527 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8528
8529 /* FP/SIMD registers. */
8530 REGSET (v, V), REGSET (V, V),
8531
8532 /* SVE vector registers. */
8533 REGSET (z, Z), REGSET (Z, Z),
8534
8535 /* SVE predicate(-as-mask) registers. */
8536 REGSET16 (p, P), REGSET16 (P, P),
8537
8538 /* SVE predicate-as-counter registers. */
8539 REGSET16 (pn, PN), REGSET16 (PN, PN),
8540
8541 /* SME ZA. We model this as a register because it acts syntactically
8542 like ZA0H, supporting qualifier suffixes and indexing. */
8543 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8544
8545 /* SME ZA tile registers. */
8546 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8547
8548 /* SME ZA tile registers (horizontal slice). */
8549 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8550
8551 /* SME ZA tile registers (vertical slice). */
8552 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8553
8554 /* SME2 ZT0. */
8555 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8556 };
8557
8558 #undef REGDEF
8559 #undef REGDEF_ALIAS
8560 #undef REGNUM
8561 #undef REGSET16
8562 #undef REGSET31
8563 #undef REGSET
8564
8565 #define N 1
8566 #define n 0
8567 #define Z 1
8568 #define z 0
8569 #define C 1
8570 #define c 0
8571 #define V 1
8572 #define v 0
8573 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8574 static const asm_nzcv nzcv_names[] = {
8575 {"nzcv", B (n, z, c, v)},
8576 {"nzcV", B (n, z, c, V)},
8577 {"nzCv", B (n, z, C, v)},
8578 {"nzCV", B (n, z, C, V)},
8579 {"nZcv", B (n, Z, c, v)},
8580 {"nZcV", B (n, Z, c, V)},
8581 {"nZCv", B (n, Z, C, v)},
8582 {"nZCV", B (n, Z, C, V)},
8583 {"Nzcv", B (N, z, c, v)},
8584 {"NzcV", B (N, z, c, V)},
8585 {"NzCv", B (N, z, C, v)},
8586 {"NzCV", B (N, z, C, V)},
8587 {"NZcv", B (N, Z, c, v)},
8588 {"NZcV", B (N, Z, c, V)},
8589 {"NZCv", B (N, Z, C, v)},
8590 {"NZCV", B (N, Z, C, V)}
8591 };
8592
8593 #undef N
8594 #undef n
8595 #undef Z
8596 #undef z
8597 #undef C
8598 #undef c
8599 #undef V
8600 #undef v
8601 #undef B
8602 \f
8603 /* MD interface: bits in the object file. */
8604
8605 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8606 for use in the a.out file, and stores them in the array pointed to by buf.
8607 This knows about the endian-ness of the target machine and does
8608 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8609 2 (short) and 4 (long) Floating numbers are put out as a series of
8610 LITTLENUMS (shorts, here at least). */
8611
8612 void
8613 md_number_to_chars (char *buf, valueT val, int n)
8614 {
8615 if (target_big_endian)
8616 number_to_chars_bigendian (buf, val, n);
8617 else
8618 number_to_chars_littleendian (buf, val, n);
8619 }
8620
8621 /* MD interface: Sections. */
8622
8623 /* Estimate the size of a frag before relaxing. Assume everything fits in
8624 4 bytes. */
8625
8626 int
8627 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8628 {
8629 fragp->fr_var = 4;
8630 return 4;
8631 }
8632
8633 /* Round up a section size to the appropriate boundary. */
8634
8635 valueT
8636 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8637 {
8638 return size;
8639 }
8640
8641 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8642 of an rs_align_code fragment.
8643
8644 Here we fill the frag with the appropriate info for padding the
8645 output stream. The resulting frag will consist of a fixed (fr_fix)
8646 and of a repeating (fr_var) part.
8647
8648 The fixed content is always emitted before the repeating content and
8649 these two parts are used as follows in constructing the output:
8650 - the fixed part will be used to align to a valid instruction word
8651 boundary, in case that we start at a misaligned address; as no
8652 executable instruction can live at the misaligned location, we
8653 simply fill with zeros;
8654 - the variable part will be used to cover the remaining padding and
8655 we fill using the AArch64 NOP instruction.
8656
8657 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8658 enough storage space for up to 3 bytes for padding the back to a valid
8659 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8660
8661 void
8662 aarch64_handle_align (fragS * fragP)
8663 {
8664 /* NOP = d503201f */
8665 /* AArch64 instructions are always little-endian. */
8666 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8667
8668 int bytes, fix, noop_size;
8669 char *p;
8670
8671 if (fragP->fr_type != rs_align_code)
8672 return;
8673
8674 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8675 p = fragP->fr_literal + fragP->fr_fix;
8676
8677 #ifdef OBJ_ELF
8678 gas_assert (fragP->tc_frag_data.recorded);
8679 #endif
8680
8681 noop_size = sizeof (aarch64_noop);
8682
8683 fix = bytes & (noop_size - 1);
8684 if (fix)
8685 {
8686 #if defined OBJ_ELF || defined OBJ_COFF
8687 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8688 #endif
8689 memset (p, 0, fix);
8690 p += fix;
8691 fragP->fr_fix += fix;
8692 }
8693
8694 if (noop_size)
8695 memcpy (p, aarch64_noop, noop_size);
8696 fragP->fr_var = noop_size;
8697 }
8698
8699 /* Perform target specific initialisation of a frag.
8700 Note - despite the name this initialisation is not done when the frag
8701 is created, but only when its type is assigned. A frag can be created
8702 and used a long time before its type is set, so beware of assuming that
8703 this initialisation is performed first. */
8704
8705 #ifndef OBJ_ELF
8706 void
8707 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8708 int max_chars ATTRIBUTE_UNUSED)
8709 {
8710 }
8711
8712 #else /* OBJ_ELF is defined. */
8713 void
8714 aarch64_init_frag (fragS * fragP, int max_chars)
8715 {
8716 /* Record a mapping symbol for alignment frags. We will delete this
8717 later if the alignment ends up empty. */
8718 if (!fragP->tc_frag_data.recorded)
8719 fragP->tc_frag_data.recorded = 1;
8720
8721 /* PR 21809: Do not set a mapping state for debug sections
8722 - it just confuses other tools. */
8723 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8724 return;
8725
8726 switch (fragP->fr_type)
8727 {
8728 case rs_align_test:
8729 case rs_fill:
8730 mapping_state_2 (MAP_DATA, max_chars);
8731 break;
8732 case rs_align:
8733 /* PR 20364: We can get alignment frags in code sections,
8734 so do not just assume that we should use the MAP_DATA state. */
8735 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8736 break;
8737 case rs_align_code:
8738 mapping_state_2 (MAP_INSN, max_chars);
8739 break;
8740 default:
8741 break;
8742 }
8743 }
8744
8745 /* Whether SFrame stack trace info is supported. */
8746
8747 bool
8748 aarch64_support_sframe_p (void)
8749 {
8750 /* At this time, SFrame is supported for aarch64 only. */
8751 return (aarch64_abi == AARCH64_ABI_LP64);
8752 }
8753
8754 /* Specify if RA tracking is needed. */
8755
8756 bool
8757 aarch64_sframe_ra_tracking_p (void)
8758 {
8759 return true;
8760 }
8761
8762 /* Specify the fixed offset to recover RA from CFA.
8763 (useful only when RA tracking is not needed). */
8764
8765 offsetT
8766 aarch64_sframe_cfa_ra_offset (void)
8767 {
8768 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8769 }
8770
8771 /* Get the abi/arch indentifier for SFrame. */
8772
8773 unsigned char
8774 aarch64_sframe_get_abi_arch (void)
8775 {
8776 unsigned char sframe_abi_arch = 0;
8777
8778 if (aarch64_support_sframe_p ())
8779 {
8780 sframe_abi_arch = target_big_endian
8781 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8782 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8783 }
8784
8785 return sframe_abi_arch;
8786 }
8787
8788 #endif /* OBJ_ELF */
8789 \f
8790 /* Initialize the DWARF-2 unwind information for this procedure. */
8791
8792 void
8793 tc_aarch64_frame_initial_instructions (void)
8794 {
8795 cfi_add_CFA_def_cfa (REG_SP, 0);
8796 }
8797
8798 /* Convert REGNAME to a DWARF-2 register number. */
8799
8800 int
8801 tc_aarch64_regname_to_dw2regnum (char *regname)
8802 {
8803 const reg_entry *reg = parse_reg (&regname);
8804 if (reg == NULL)
8805 return -1;
8806
8807 switch (reg->type)
8808 {
8809 case REG_TYPE_SP_32:
8810 case REG_TYPE_SP_64:
8811 case REG_TYPE_R_32:
8812 case REG_TYPE_R_64:
8813 return reg->number;
8814
8815 case REG_TYPE_FP_B:
8816 case REG_TYPE_FP_H:
8817 case REG_TYPE_FP_S:
8818 case REG_TYPE_FP_D:
8819 case REG_TYPE_FP_Q:
8820 return reg->number + 64;
8821
8822 default:
8823 break;
8824 }
8825 return -1;
8826 }
8827
8828 /* Implement DWARF2_ADDR_SIZE. */
8829
8830 int
8831 aarch64_dwarf2_addr_size (void)
8832 {
8833 if (ilp32_p)
8834 return 4;
8835 else if (llp64_p)
8836 return 8;
8837 return bfd_arch_bits_per_address (stdoutput) / 8;
8838 }
8839
8840 /* MD interface: Symbol and relocation handling. */
8841
8842 /* Return the address within the segment that a PC-relative fixup is
8843 relative to. For AArch64 PC-relative fixups applied to instructions
8844 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8845
8846 long
8847 md_pcrel_from_section (fixS * fixP, segT seg)
8848 {
8849 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8850
8851 /* If this is pc-relative and we are going to emit a relocation
8852 then we just want to put out any pipeline compensation that the linker
8853 will need. Otherwise we want to use the calculated base. */
8854 if (fixP->fx_pcrel
8855 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8856 || aarch64_force_relocation (fixP)))
8857 base = 0;
8858
8859 /* AArch64 should be consistent for all pc-relative relocations. */
8860 return base + AARCH64_PCREL_OFFSET;
8861 }
8862
8863 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8864 Otherwise we have no need to default values of symbols. */
8865
8866 symbolS *
8867 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8868 {
8869 #ifdef OBJ_ELF
8870 if (name[0] == '_' && name[1] == 'G'
8871 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8872 {
8873 if (!GOT_symbol)
8874 {
8875 if (symbol_find (name))
8876 as_bad (_("GOT already in the symbol table"));
8877
8878 GOT_symbol = symbol_new (name, undefined_section,
8879 &zero_address_frag, 0);
8880 }
8881
8882 return GOT_symbol;
8883 }
8884 #endif
8885
8886 return 0;
8887 }
8888
8889 /* Return non-zero if the indicated VALUE has overflowed the maximum
8890 range expressible by a unsigned number with the indicated number of
8891 BITS. */
8892
8893 static bool
8894 unsigned_overflow (valueT value, unsigned bits)
8895 {
8896 valueT lim;
8897 if (bits >= sizeof (valueT) * 8)
8898 return false;
8899 lim = (valueT) 1 << bits;
8900 return (value >= lim);
8901 }
8902
8903
8904 /* Return non-zero if the indicated VALUE has overflowed the maximum
8905 range expressible by an signed number with the indicated number of
8906 BITS. */
8907
8908 static bool
8909 signed_overflow (offsetT value, unsigned bits)
8910 {
8911 offsetT lim;
8912 if (bits >= sizeof (offsetT) * 8)
8913 return false;
8914 lim = (offsetT) 1 << (bits - 1);
8915 return (value < -lim || value >= lim);
8916 }
8917
8918 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8919 unsigned immediate offset load/store instruction, try to encode it as
8920 an unscaled, 9-bit, signed immediate offset load/store instruction.
8921 Return TRUE if it is successful; otherwise return FALSE.
8922
8923 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8924 in response to the standard LDR/STR mnemonics when the immediate offset is
8925 unambiguous, i.e. when it is negative or unaligned. */
8926
8927 static bool
8928 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8929 {
8930 int idx;
8931 enum aarch64_op new_op;
8932 const aarch64_opcode *new_opcode;
8933
8934 gas_assert (instr->opcode->iclass == ldst_pos);
8935
8936 switch (instr->opcode->op)
8937 {
8938 case OP_LDRB_POS:new_op = OP_LDURB; break;
8939 case OP_STRB_POS: new_op = OP_STURB; break;
8940 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8941 case OP_LDRH_POS: new_op = OP_LDURH; break;
8942 case OP_STRH_POS: new_op = OP_STURH; break;
8943 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8944 case OP_LDR_POS: new_op = OP_LDUR; break;
8945 case OP_STR_POS: new_op = OP_STUR; break;
8946 case OP_LDRF_POS: new_op = OP_LDURV; break;
8947 case OP_STRF_POS: new_op = OP_STURV; break;
8948 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8949 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8950 default: new_op = OP_NIL; break;
8951 }
8952
8953 if (new_op == OP_NIL)
8954 return false;
8955
8956 new_opcode = aarch64_get_opcode (new_op);
8957 gas_assert (new_opcode != NULL);
8958
8959 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8960 instr->opcode->op, new_opcode->op);
8961
8962 aarch64_replace_opcode (instr, new_opcode);
8963
8964 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8965 qualifier matching may fail because the out-of-date qualifier will
8966 prevent the operand being updated with a new and correct qualifier. */
8967 idx = aarch64_operand_index (instr->opcode->operands,
8968 AARCH64_OPND_ADDR_SIMM9);
8969 gas_assert (idx == 1);
8970 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8971
8972 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8973
8974 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8975 insn_sequence))
8976 return false;
8977
8978 return true;
8979 }
8980
8981 /* Called by fix_insn to fix a MOV immediate alias instruction.
8982
8983 Operand for a generic move immediate instruction, which is an alias
8984 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8985 a 32-bit/64-bit immediate value into general register. An assembler error
8986 shall result if the immediate cannot be created by a single one of these
8987 instructions. If there is a choice, then to ensure reversability an
8988 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8989
8990 static void
8991 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8992 {
8993 const aarch64_opcode *opcode;
8994
8995 /* Need to check if the destination is SP/ZR. The check has to be done
8996 before any aarch64_replace_opcode. */
8997 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8998 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8999
9000 instr->operands[1].imm.value = value;
9001 instr->operands[1].skip = 0;
9002
9003 if (try_mov_wide_p)
9004 {
9005 /* Try the MOVZ alias. */
9006 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
9007 aarch64_replace_opcode (instr, opcode);
9008 if (aarch64_opcode_encode (instr->opcode, instr,
9009 &instr->value, NULL, NULL, insn_sequence))
9010 {
9011 put_aarch64_insn (buf, instr->value);
9012 return;
9013 }
9014 /* Try the MOVK alias. */
9015 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
9016 aarch64_replace_opcode (instr, opcode);
9017 if (aarch64_opcode_encode (instr->opcode, instr,
9018 &instr->value, NULL, NULL, insn_sequence))
9019 {
9020 put_aarch64_insn (buf, instr->value);
9021 return;
9022 }
9023 }
9024
9025 if (try_mov_bitmask_p)
9026 {
9027 /* Try the ORR alias. */
9028 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
9029 aarch64_replace_opcode (instr, opcode);
9030 if (aarch64_opcode_encode (instr->opcode, instr,
9031 &instr->value, NULL, NULL, insn_sequence))
9032 {
9033 put_aarch64_insn (buf, instr->value);
9034 return;
9035 }
9036 }
9037
9038 as_bad_where (fixP->fx_file, fixP->fx_line,
9039 _("immediate cannot be moved by a single instruction"));
9040 }
9041
9042 /* An instruction operand which is immediate related may have symbol used
9043 in the assembly, e.g.
9044
9045 mov w0, u32
9046 .set u32, 0x00ffff00
9047
9048 At the time when the assembly instruction is parsed, a referenced symbol,
9049 like 'u32' in the above example may not have been seen; a fixS is created
9050 in such a case and is handled here after symbols have been resolved.
9051 Instruction is fixed up with VALUE using the information in *FIXP plus
9052 extra information in FLAGS.
9053
9054 This function is called by md_apply_fix to fix up instructions that need
9055 a fix-up described above but does not involve any linker-time relocation. */
9056
9057 static void
9058 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9059 {
9060 int idx;
9061 uint32_t insn;
9062 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9063 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9064 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9065
9066 if (new_inst)
9067 {
9068 /* Now the instruction is about to be fixed-up, so the operand that
9069 was previously marked as 'ignored' needs to be unmarked in order
9070 to get the encoding done properly. */
9071 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9072 new_inst->operands[idx].skip = 0;
9073 }
9074
9075 gas_assert (opnd != AARCH64_OPND_NIL);
9076
9077 switch (opnd)
9078 {
9079 case AARCH64_OPND_EXCEPTION:
9080 case AARCH64_OPND_UNDEFINED:
9081 if (unsigned_overflow (value, 16))
9082 as_bad_where (fixP->fx_file, fixP->fx_line,
9083 _("immediate out of range"));
9084 insn = get_aarch64_insn (buf);
9085 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9086 put_aarch64_insn (buf, insn);
9087 break;
9088
9089 case AARCH64_OPND_AIMM:
9090 /* ADD or SUB with immediate.
9091 NOTE this assumes we come here with a add/sub shifted reg encoding
9092 3 322|2222|2 2 2 21111 111111
9093 1 098|7654|3 2 1 09876 543210 98765 43210
9094 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9095 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9096 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9097 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9098 ->
9099 3 322|2222|2 2 221111111111
9100 1 098|7654|3 2 109876543210 98765 43210
9101 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9102 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9103 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9104 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9105 Fields sf Rn Rd are already set. */
9106 insn = get_aarch64_insn (buf);
9107 if (value < 0)
9108 {
9109 /* Add <-> sub. */
9110 insn = reencode_addsub_switch_add_sub (insn);
9111 value = -value;
9112 }
9113
9114 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9115 && unsigned_overflow (value, 12))
9116 {
9117 /* Try to shift the value by 12 to make it fit. */
9118 if (((value >> 12) << 12) == value
9119 && ! unsigned_overflow (value, 12 + 12))
9120 {
9121 value >>= 12;
9122 insn |= encode_addsub_imm_shift_amount (1);
9123 }
9124 }
9125
9126 if (unsigned_overflow (value, 12))
9127 as_bad_where (fixP->fx_file, fixP->fx_line,
9128 _("immediate out of range"));
9129
9130 insn |= encode_addsub_imm (value);
9131
9132 put_aarch64_insn (buf, insn);
9133 break;
9134
9135 case AARCH64_OPND_SIMD_IMM:
9136 case AARCH64_OPND_SIMD_IMM_SFT:
9137 case AARCH64_OPND_LIMM:
9138 /* Bit mask immediate. */
9139 gas_assert (new_inst != NULL);
9140 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9141 new_inst->operands[idx].imm.value = value;
9142 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9143 &new_inst->value, NULL, NULL, insn_sequence))
9144 put_aarch64_insn (buf, new_inst->value);
9145 else
9146 as_bad_where (fixP->fx_file, fixP->fx_line,
9147 _("invalid immediate"));
9148 break;
9149
9150 case AARCH64_OPND_HALF:
9151 /* 16-bit unsigned immediate. */
9152 if (unsigned_overflow (value, 16))
9153 as_bad_where (fixP->fx_file, fixP->fx_line,
9154 _("immediate out of range"));
9155 insn = get_aarch64_insn (buf);
9156 insn |= encode_movw_imm (value & 0xffff);
9157 put_aarch64_insn (buf, insn);
9158 break;
9159
9160 case AARCH64_OPND_IMM_MOV:
9161 /* Operand for a generic move immediate instruction, which is
9162 an alias instruction that generates a single MOVZ, MOVN or ORR
9163 instruction to loads a 32-bit/64-bit immediate value into general
9164 register. An assembler error shall result if the immediate cannot be
9165 created by a single one of these instructions. If there is a choice,
9166 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9167 and MOVZ or MOVN to ORR. */
9168 gas_assert (new_inst != NULL);
9169 fix_mov_imm_insn (fixP, buf, new_inst, value);
9170 break;
9171
9172 case AARCH64_OPND_ADDR_SIMM7:
9173 case AARCH64_OPND_ADDR_SIMM9:
9174 case AARCH64_OPND_ADDR_SIMM9_2:
9175 case AARCH64_OPND_ADDR_SIMM10:
9176 case AARCH64_OPND_ADDR_UIMM12:
9177 case AARCH64_OPND_ADDR_SIMM11:
9178 case AARCH64_OPND_ADDR_SIMM13:
9179 /* Immediate offset in an address. */
9180 insn = get_aarch64_insn (buf);
9181
9182 gas_assert (new_inst != NULL && new_inst->value == insn);
9183 gas_assert (new_inst->opcode->operands[1] == opnd
9184 || new_inst->opcode->operands[2] == opnd);
9185
9186 /* Get the index of the address operand. */
9187 if (new_inst->opcode->operands[1] == opnd)
9188 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9189 idx = 1;
9190 else
9191 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9192 idx = 2;
9193
9194 /* Update the resolved offset value. */
9195 new_inst->operands[idx].addr.offset.imm = value;
9196
9197 /* Encode/fix-up. */
9198 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9199 &new_inst->value, NULL, NULL, insn_sequence))
9200 {
9201 put_aarch64_insn (buf, new_inst->value);
9202 break;
9203 }
9204 else if (new_inst->opcode->iclass == ldst_pos
9205 && try_to_encode_as_unscaled_ldst (new_inst))
9206 {
9207 put_aarch64_insn (buf, new_inst->value);
9208 break;
9209 }
9210
9211 as_bad_where (fixP->fx_file, fixP->fx_line,
9212 _("immediate offset out of range"));
9213 break;
9214
9215 default:
9216 gas_assert (0);
9217 as_fatal (_("unhandled operand code %d"), opnd);
9218 }
9219 }
9220
9221 /* Apply a fixup (fixP) to segment data, once it has been determined
9222 by our caller that we have all the info we need to fix it up.
9223
9224 Parameter valP is the pointer to the value of the bits. */
9225
9226 void
9227 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9228 {
9229 offsetT value = *valP;
9230 uint32_t insn;
9231 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9232 int scale;
9233 unsigned flags = fixP->fx_addnumber;
9234
9235 DEBUG_TRACE ("\n\n");
9236 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9237 DEBUG_TRACE ("Enter md_apply_fix");
9238
9239 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9240
9241 /* Note whether this will delete the relocation. */
9242
9243 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9244 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9245 fixP->fx_done = 1;
9246
9247 /* Process the relocations. */
9248 switch (fixP->fx_r_type)
9249 {
9250 case BFD_RELOC_NONE:
9251 /* This will need to go in the object file. */
9252 fixP->fx_done = 0;
9253 break;
9254
9255 case BFD_RELOC_8:
9256 case BFD_RELOC_8_PCREL:
9257 if (fixP->fx_done || !seg->use_rela_p)
9258 md_number_to_chars (buf, value, 1);
9259 break;
9260
9261 case BFD_RELOC_16:
9262 case BFD_RELOC_16_PCREL:
9263 if (fixP->fx_done || !seg->use_rela_p)
9264 md_number_to_chars (buf, value, 2);
9265 break;
9266
9267 case BFD_RELOC_32:
9268 case BFD_RELOC_32_PCREL:
9269 if (fixP->fx_done || !seg->use_rela_p)
9270 md_number_to_chars (buf, value, 4);
9271 break;
9272
9273 case BFD_RELOC_64:
9274 case BFD_RELOC_64_PCREL:
9275 if (fixP->fx_done || !seg->use_rela_p)
9276 md_number_to_chars (buf, value, 8);
9277 break;
9278
9279 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9280 /* We claim that these fixups have been processed here, even if
9281 in fact we generate an error because we do not have a reloc
9282 for them, so tc_gen_reloc() will reject them. */
9283 fixP->fx_done = 1;
9284 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9285 {
9286 as_bad_where (fixP->fx_file, fixP->fx_line,
9287 _("undefined symbol %s used as an immediate value"),
9288 S_GET_NAME (fixP->fx_addsy));
9289 goto apply_fix_return;
9290 }
9291 fix_insn (fixP, flags, value);
9292 break;
9293
9294 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9295 if (fixP->fx_done || !seg->use_rela_p)
9296 {
9297 if (value & 3)
9298 as_bad_where (fixP->fx_file, fixP->fx_line,
9299 _("pc-relative load offset not word aligned"));
9300 if (signed_overflow (value, 21))
9301 as_bad_where (fixP->fx_file, fixP->fx_line,
9302 _("pc-relative load offset out of range"));
9303 insn = get_aarch64_insn (buf);
9304 insn |= encode_ld_lit_ofs_19 (value >> 2);
9305 put_aarch64_insn (buf, insn);
9306 }
9307 break;
9308
9309 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9310 if (fixP->fx_done || !seg->use_rela_p)
9311 {
9312 if (signed_overflow (value, 21))
9313 as_bad_where (fixP->fx_file, fixP->fx_line,
9314 _("pc-relative address offset out of range"));
9315 insn = get_aarch64_insn (buf);
9316 insn |= encode_adr_imm (value);
9317 put_aarch64_insn (buf, insn);
9318 }
9319 break;
9320
9321 case BFD_RELOC_AARCH64_BRANCH19:
9322 if (fixP->fx_done || !seg->use_rela_p)
9323 {
9324 if (value & 3)
9325 as_bad_where (fixP->fx_file, fixP->fx_line,
9326 _("conditional branch target not word aligned"));
9327 if (signed_overflow (value, 21))
9328 as_bad_where (fixP->fx_file, fixP->fx_line,
9329 _("conditional branch out of range"));
9330 insn = get_aarch64_insn (buf);
9331 insn |= encode_cond_branch_ofs_19 (value >> 2);
9332 put_aarch64_insn (buf, insn);
9333 }
9334 break;
9335
9336 case BFD_RELOC_AARCH64_TSTBR14:
9337 if (fixP->fx_done || !seg->use_rela_p)
9338 {
9339 if (value & 3)
9340 as_bad_where (fixP->fx_file, fixP->fx_line,
9341 _("conditional branch target not word aligned"));
9342 if (signed_overflow (value, 16))
9343 as_bad_where (fixP->fx_file, fixP->fx_line,
9344 _("conditional branch out of range"));
9345 insn = get_aarch64_insn (buf);
9346 insn |= encode_tst_branch_ofs_14 (value >> 2);
9347 put_aarch64_insn (buf, insn);
9348 }
9349 break;
9350
9351 case BFD_RELOC_AARCH64_CALL26:
9352 case BFD_RELOC_AARCH64_JUMP26:
9353 if (fixP->fx_done || !seg->use_rela_p)
9354 {
9355 if (value & 3)
9356 as_bad_where (fixP->fx_file, fixP->fx_line,
9357 _("branch target not word aligned"));
9358 if (signed_overflow (value, 28))
9359 as_bad_where (fixP->fx_file, fixP->fx_line,
9360 _("branch out of range"));
9361 insn = get_aarch64_insn (buf);
9362 insn |= encode_branch_ofs_26 (value >> 2);
9363 put_aarch64_insn (buf, insn);
9364 }
9365 break;
9366
9367 case BFD_RELOC_AARCH64_MOVW_G0:
9368 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9369 case BFD_RELOC_AARCH64_MOVW_G0_S:
9370 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9371 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9372 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9373 scale = 0;
9374 goto movw_common;
9375 case BFD_RELOC_AARCH64_MOVW_G1:
9376 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9377 case BFD_RELOC_AARCH64_MOVW_G1_S:
9378 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9379 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9380 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9381 scale = 16;
9382 goto movw_common;
9383 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9384 scale = 0;
9385 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9386 /* Should always be exported to object file, see
9387 aarch64_force_relocation(). */
9388 gas_assert (!fixP->fx_done);
9389 gas_assert (seg->use_rela_p);
9390 goto movw_common;
9391 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9392 scale = 16;
9393 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9394 /* Should always be exported to object file, see
9395 aarch64_force_relocation(). */
9396 gas_assert (!fixP->fx_done);
9397 gas_assert (seg->use_rela_p);
9398 goto movw_common;
9399 case BFD_RELOC_AARCH64_MOVW_G2:
9400 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9401 case BFD_RELOC_AARCH64_MOVW_G2_S:
9402 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9403 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9404 scale = 32;
9405 goto movw_common;
9406 case BFD_RELOC_AARCH64_MOVW_G3:
9407 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9408 scale = 48;
9409 movw_common:
9410 if (fixP->fx_done || !seg->use_rela_p)
9411 {
9412 insn = get_aarch64_insn (buf);
9413
9414 if (!fixP->fx_done)
9415 {
9416 /* REL signed addend must fit in 16 bits */
9417 if (signed_overflow (value, 16))
9418 as_bad_where (fixP->fx_file, fixP->fx_line,
9419 _("offset out of range"));
9420 }
9421 else
9422 {
9423 /* Check for overflow and scale. */
9424 switch (fixP->fx_r_type)
9425 {
9426 case BFD_RELOC_AARCH64_MOVW_G0:
9427 case BFD_RELOC_AARCH64_MOVW_G1:
9428 case BFD_RELOC_AARCH64_MOVW_G2:
9429 case BFD_RELOC_AARCH64_MOVW_G3:
9430 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9431 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9432 if (unsigned_overflow (value, scale + 16))
9433 as_bad_where (fixP->fx_file, fixP->fx_line,
9434 _("unsigned value out of range"));
9435 break;
9436 case BFD_RELOC_AARCH64_MOVW_G0_S:
9437 case BFD_RELOC_AARCH64_MOVW_G1_S:
9438 case BFD_RELOC_AARCH64_MOVW_G2_S:
9439 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9440 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9441 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9442 /* NOTE: We can only come here with movz or movn. */
9443 if (signed_overflow (value, scale + 16))
9444 as_bad_where (fixP->fx_file, fixP->fx_line,
9445 _("signed value out of range"));
9446 if (value < 0)
9447 {
9448 /* Force use of MOVN. */
9449 value = ~value;
9450 insn = reencode_movzn_to_movn (insn);
9451 }
9452 else
9453 {
9454 /* Force use of MOVZ. */
9455 insn = reencode_movzn_to_movz (insn);
9456 }
9457 break;
9458 default:
9459 /* Unchecked relocations. */
9460 break;
9461 }
9462 value >>= scale;
9463 }
9464
9465 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9466 insn |= encode_movw_imm (value & 0xffff);
9467
9468 put_aarch64_insn (buf, insn);
9469 }
9470 break;
9471
9472 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9473 fixP->fx_r_type = (ilp32_p
9474 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9475 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9476 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9477 /* Should always be exported to object file, see
9478 aarch64_force_relocation(). */
9479 gas_assert (!fixP->fx_done);
9480 gas_assert (seg->use_rela_p);
9481 break;
9482
9483 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9484 fixP->fx_r_type = (ilp32_p
9485 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9486 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9487 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9488 /* Should always be exported to object file, see
9489 aarch64_force_relocation(). */
9490 gas_assert (!fixP->fx_done);
9491 gas_assert (seg->use_rela_p);
9492 break;
9493
9494 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9495 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9496 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9497 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9498 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9499 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9500 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9501 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9502 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9503 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9504 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9505 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9506 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9507 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9508 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9509 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9510 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9511 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9512 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9513 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9514 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9515 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9516 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9517 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9518 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9519 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9520 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9521 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9522 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9523 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9524 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9525 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9526 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9527 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9528 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9529 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9530 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9531 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9532 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9533 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9534 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9535 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9536 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9537 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9538 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9539 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9540 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9541 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9542 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9543 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9544 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9545 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9546 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9547 /* Should always be exported to object file, see
9548 aarch64_force_relocation(). */
9549 gas_assert (!fixP->fx_done);
9550 gas_assert (seg->use_rela_p);
9551 break;
9552
9553 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9554 /* Should always be exported to object file, see
9555 aarch64_force_relocation(). */
9556 fixP->fx_r_type = (ilp32_p
9557 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9558 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9559 gas_assert (!fixP->fx_done);
9560 gas_assert (seg->use_rela_p);
9561 break;
9562
9563 case BFD_RELOC_AARCH64_ADD_LO12:
9564 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9565 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9566 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9567 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9568 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9569 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9570 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9571 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9572 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9573 case BFD_RELOC_AARCH64_LDST128_LO12:
9574 case BFD_RELOC_AARCH64_LDST16_LO12:
9575 case BFD_RELOC_AARCH64_LDST32_LO12:
9576 case BFD_RELOC_AARCH64_LDST64_LO12:
9577 case BFD_RELOC_AARCH64_LDST8_LO12:
9578 /* Should always be exported to object file, see
9579 aarch64_force_relocation(). */
9580 gas_assert (!fixP->fx_done);
9581 gas_assert (seg->use_rela_p);
9582 break;
9583
9584 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9585 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9586 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9587 break;
9588
9589 case BFD_RELOC_UNUSED:
9590 /* An error will already have been reported. */
9591 break;
9592
9593 case BFD_RELOC_RVA:
9594 case BFD_RELOC_32_SECREL:
9595 case BFD_RELOC_16_SECIDX:
9596 break;
9597
9598 default:
9599 as_bad_where (fixP->fx_file, fixP->fx_line,
9600 _("unexpected %s fixup"),
9601 bfd_get_reloc_code_name (fixP->fx_r_type));
9602 break;
9603 }
9604
9605 apply_fix_return:
9606 /* Free the allocated the struct aarch64_inst.
9607 N.B. currently there are very limited number of fix-up types actually use
9608 this field, so the impact on the performance should be minimal . */
9609 free (fixP->tc_fix_data.inst);
9610
9611 return;
9612 }
9613
9614 /* Translate internal representation of relocation info to BFD target
9615 format. */
9616
9617 arelent *
9618 tc_gen_reloc (asection * section, fixS * fixp)
9619 {
9620 arelent *reloc;
9621 bfd_reloc_code_real_type code;
9622
9623 reloc = XNEW (arelent);
9624
9625 reloc->sym_ptr_ptr = XNEW (asymbol *);
9626 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9627 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9628
9629 if (fixp->fx_pcrel)
9630 {
9631 if (section->use_rela_p)
9632 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9633 else
9634 fixp->fx_offset = reloc->address;
9635 }
9636 reloc->addend = fixp->fx_offset;
9637
9638 code = fixp->fx_r_type;
9639 switch (code)
9640 {
9641 case BFD_RELOC_16:
9642 if (fixp->fx_pcrel)
9643 code = BFD_RELOC_16_PCREL;
9644 break;
9645
9646 case BFD_RELOC_32:
9647 if (fixp->fx_pcrel)
9648 code = BFD_RELOC_32_PCREL;
9649 break;
9650
9651 case BFD_RELOC_64:
9652 if (fixp->fx_pcrel)
9653 code = BFD_RELOC_64_PCREL;
9654 break;
9655
9656 default:
9657 break;
9658 }
9659
9660 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9661 if (reloc->howto == NULL)
9662 {
9663 as_bad_where (fixp->fx_file, fixp->fx_line,
9664 _
9665 ("cannot represent %s relocation in this object file format"),
9666 bfd_get_reloc_code_name (code));
9667 return NULL;
9668 }
9669
9670 return reloc;
9671 }
9672
9673 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9674
9675 void
9676 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9677 {
9678 bfd_reloc_code_real_type type;
9679 int pcrel = 0;
9680
9681 #ifdef TE_PE
9682 if (exp->X_op == O_secrel)
9683 {
9684 exp->X_op = O_symbol;
9685 type = BFD_RELOC_32_SECREL;
9686 }
9687 else if (exp->X_op == O_secidx)
9688 {
9689 exp->X_op = O_symbol;
9690 type = BFD_RELOC_16_SECIDX;
9691 }
9692 else
9693 {
9694 #endif
9695 /* Pick a reloc.
9696 FIXME: @@ Should look at CPU word size. */
9697 switch (size)
9698 {
9699 case 1:
9700 type = BFD_RELOC_8;
9701 break;
9702 case 2:
9703 type = BFD_RELOC_16;
9704 break;
9705 case 4:
9706 type = BFD_RELOC_32;
9707 break;
9708 case 8:
9709 type = BFD_RELOC_64;
9710 break;
9711 default:
9712 as_bad (_("cannot do %u-byte relocation"), size);
9713 type = BFD_RELOC_UNUSED;
9714 break;
9715 }
9716 #ifdef TE_PE
9717 }
9718 #endif
9719
9720 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9721 }
9722
9723 /* Implement md_after_parse_args. This is the earliest time we need to decide
9724 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9725
9726 void
9727 aarch64_after_parse_args (void)
9728 {
9729 if (aarch64_abi != AARCH64_ABI_NONE)
9730 return;
9731
9732 #ifdef OBJ_ELF
9733 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9734 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9735 aarch64_abi = AARCH64_ABI_ILP32;
9736 else
9737 aarch64_abi = AARCH64_ABI_LP64;
9738 #else
9739 aarch64_abi = AARCH64_ABI_LLP64;
9740 #endif
9741 }
9742
9743 #ifdef OBJ_ELF
9744 const char *
9745 elf64_aarch64_target_format (void)
9746 {
9747 #ifdef TE_CLOUDABI
9748 /* FIXME: What to do for ilp32_p ? */
9749 if (target_big_endian)
9750 return "elf64-bigaarch64-cloudabi";
9751 else
9752 return "elf64-littleaarch64-cloudabi";
9753 #else
9754 if (target_big_endian)
9755 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9756 else
9757 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9758 #endif
9759 }
9760
9761 void
9762 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9763 {
9764 elf_frob_symbol (symp, puntp);
9765 }
9766 #elif defined OBJ_COFF
9767 const char *
9768 coff_aarch64_target_format (void)
9769 {
9770 return "pe-aarch64-little";
9771 }
9772 #endif
9773
9774 /* MD interface: Finalization. */
9775
9776 /* A good place to do this, although this was probably not intended
9777 for this kind of use. We need to dump the literal pool before
9778 references are made to a null symbol pointer. */
9779
9780 void
9781 aarch64_cleanup (void)
9782 {
9783 literal_pool *pool;
9784
9785 for (pool = list_of_pools; pool; pool = pool->next)
9786 {
9787 /* Put it at the end of the relevant section. */
9788 subseg_set (pool->section, pool->sub_section);
9789 s_ltorg (0);
9790 }
9791 }
9792
9793 #ifdef OBJ_ELF
9794 /* Remove any excess mapping symbols generated for alignment frags in
9795 SEC. We may have created a mapping symbol before a zero byte
9796 alignment; remove it if there's a mapping symbol after the
9797 alignment. */
9798 static void
9799 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9800 void *dummy ATTRIBUTE_UNUSED)
9801 {
9802 segment_info_type *seginfo = seg_info (sec);
9803 fragS *fragp;
9804
9805 if (seginfo == NULL || seginfo->frchainP == NULL)
9806 return;
9807
9808 for (fragp = seginfo->frchainP->frch_root;
9809 fragp != NULL; fragp = fragp->fr_next)
9810 {
9811 symbolS *sym = fragp->tc_frag_data.last_map;
9812 fragS *next = fragp->fr_next;
9813
9814 /* Variable-sized frags have been converted to fixed size by
9815 this point. But if this was variable-sized to start with,
9816 there will be a fixed-size frag after it. So don't handle
9817 next == NULL. */
9818 if (sym == NULL || next == NULL)
9819 continue;
9820
9821 if (S_GET_VALUE (sym) < next->fr_address)
9822 /* Not at the end of this frag. */
9823 continue;
9824 know (S_GET_VALUE (sym) == next->fr_address);
9825
9826 do
9827 {
9828 if (next->tc_frag_data.first_map != NULL)
9829 {
9830 /* Next frag starts with a mapping symbol. Discard this
9831 one. */
9832 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9833 break;
9834 }
9835
9836 if (next->fr_next == NULL)
9837 {
9838 /* This mapping symbol is at the end of the section. Discard
9839 it. */
9840 know (next->fr_fix == 0 && next->fr_var == 0);
9841 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9842 break;
9843 }
9844
9845 /* As long as we have empty frags without any mapping symbols,
9846 keep looking. */
9847 /* If the next frag is non-empty and does not start with a
9848 mapping symbol, then this mapping symbol is required. */
9849 if (next->fr_address != next->fr_next->fr_address)
9850 break;
9851
9852 next = next->fr_next;
9853 }
9854 while (next != NULL);
9855 }
9856 }
9857 #endif
9858
9859 /* Adjust the symbol table. */
9860
9861 void
9862 aarch64_adjust_symtab (void)
9863 {
9864 #ifdef OBJ_ELF
9865 /* Remove any overlapping mapping symbols generated by alignment frags. */
9866 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9867 /* Now do generic ELF adjustments. */
9868 elf_adjust_symtab ();
9869 #endif
9870 }
9871
9872 static void
9873 checked_hash_insert (htab_t table, const char *key, void *value)
9874 {
9875 str_hash_insert (table, key, value, 0);
9876 }
9877
9878 static void
9879 sysreg_hash_insert (htab_t table, const char *key, void *value)
9880 {
9881 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9882 checked_hash_insert (table, key, value);
9883 }
9884
9885 static void
9886 fill_instruction_hash_table (void)
9887 {
9888 const aarch64_opcode *opcode = aarch64_opcode_table;
9889
9890 while (opcode->name != NULL)
9891 {
9892 templates *templ, *new_templ;
9893 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9894
9895 new_templ = XNEW (templates);
9896 new_templ->opcode = opcode;
9897 new_templ->next = NULL;
9898
9899 if (!templ)
9900 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9901 else
9902 {
9903 new_templ->next = templ->next;
9904 templ->next = new_templ;
9905 }
9906 ++opcode;
9907 }
9908 }
9909
9910 static inline void
9911 convert_to_upper (char *dst, const char *src, size_t num)
9912 {
9913 unsigned int i;
9914 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9915 *dst = TOUPPER (*src);
9916 *dst = '\0';
9917 }
9918
9919 /* Assume STR point to a lower-case string, allocate, convert and return
9920 the corresponding upper-case string. */
9921 static inline const char*
9922 get_upper_str (const char *str)
9923 {
9924 char *ret;
9925 size_t len = strlen (str);
9926 ret = XNEWVEC (char, len + 1);
9927 convert_to_upper (ret, str, len);
9928 return ret;
9929 }
9930
9931 /* MD interface: Initialization. */
9932
9933 void
9934 md_begin (void)
9935 {
9936 unsigned mach;
9937 unsigned int i;
9938
9939 aarch64_ops_hsh = str_htab_create ();
9940 aarch64_cond_hsh = str_htab_create ();
9941 aarch64_shift_hsh = str_htab_create ();
9942 aarch64_sys_regs_hsh = str_htab_create ();
9943 aarch64_pstatefield_hsh = str_htab_create ();
9944 aarch64_sys_regs_ic_hsh = str_htab_create ();
9945 aarch64_sys_regs_dc_hsh = str_htab_create ();
9946 aarch64_sys_regs_at_hsh = str_htab_create ();
9947 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9948 aarch64_sys_regs_sr_hsh = str_htab_create ();
9949 aarch64_reg_hsh = str_htab_create ();
9950 aarch64_barrier_opt_hsh = str_htab_create ();
9951 aarch64_nzcv_hsh = str_htab_create ();
9952 aarch64_pldop_hsh = str_htab_create ();
9953 aarch64_hint_opt_hsh = str_htab_create ();
9954
9955 fill_instruction_hash_table ();
9956
9957 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9958 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9959 (void *) (aarch64_sys_regs + i));
9960
9961 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9962 sysreg_hash_insert (aarch64_pstatefield_hsh,
9963 aarch64_pstatefields[i].name,
9964 (void *) (aarch64_pstatefields + i));
9965
9966 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9967 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9968 aarch64_sys_regs_ic[i].name,
9969 (void *) (aarch64_sys_regs_ic + i));
9970
9971 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9972 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9973 aarch64_sys_regs_dc[i].name,
9974 (void *) (aarch64_sys_regs_dc + i));
9975
9976 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9977 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9978 aarch64_sys_regs_at[i].name,
9979 (void *) (aarch64_sys_regs_at + i));
9980
9981 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9982 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9983 aarch64_sys_regs_tlbi[i].name,
9984 (void *) (aarch64_sys_regs_tlbi + i));
9985
9986 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9987 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9988 aarch64_sys_regs_sr[i].name,
9989 (void *) (aarch64_sys_regs_sr + i));
9990
9991 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9992 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9993 (void *) (reg_names + i));
9994
9995 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9996 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9997 (void *) (nzcv_names + i));
9998
9999 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
10000 {
10001 const char *name = aarch64_operand_modifiers[i].name;
10002 checked_hash_insert (aarch64_shift_hsh, name,
10003 (void *) (aarch64_operand_modifiers + i));
10004 /* Also hash the name in the upper case. */
10005 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
10006 (void *) (aarch64_operand_modifiers + i));
10007 }
10008
10009 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
10010 {
10011 unsigned int j;
10012 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
10013 the same condition code. */
10014 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
10015 {
10016 const char *name = aarch64_conds[i].names[j];
10017 if (name == NULL)
10018 break;
10019 checked_hash_insert (aarch64_cond_hsh, name,
10020 (void *) (aarch64_conds + i));
10021 /* Also hash the name in the upper case. */
10022 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
10023 (void *) (aarch64_conds + i));
10024 }
10025 }
10026
10027 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
10028 {
10029 const char *name = aarch64_barrier_options[i].name;
10030 /* Skip xx00 - the unallocated values of option. */
10031 if ((i & 0x3) == 0)
10032 continue;
10033 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10034 (void *) (aarch64_barrier_options + i));
10035 /* Also hash the name in the upper case. */
10036 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10037 (void *) (aarch64_barrier_options + i));
10038 }
10039
10040 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
10041 {
10042 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
10043 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10044 (void *) (aarch64_barrier_dsb_nxs_options + i));
10045 /* Also hash the name in the upper case. */
10046 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10047 (void *) (aarch64_barrier_dsb_nxs_options + i));
10048 }
10049
10050 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10051 {
10052 const char* name = aarch64_prfops[i].name;
10053 /* Skip the unallocated hint encodings. */
10054 if (name == NULL)
10055 continue;
10056 checked_hash_insert (aarch64_pldop_hsh, name,
10057 (void *) (aarch64_prfops + i));
10058 /* Also hash the name in the upper case. */
10059 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10060 (void *) (aarch64_prfops + i));
10061 }
10062
10063 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10064 {
10065 const char* name = aarch64_hint_options[i].name;
10066 const char* upper_name = get_upper_str(name);
10067
10068 checked_hash_insert (aarch64_hint_opt_hsh, name,
10069 (void *) (aarch64_hint_options + i));
10070
10071 /* Also hash the name in the upper case if not the same. */
10072 if (strcmp (name, upper_name) != 0)
10073 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10074 (void *) (aarch64_hint_options + i));
10075 }
10076
10077 /* Set the cpu variant based on the command-line options. */
10078 if (!mcpu_cpu_opt)
10079 mcpu_cpu_opt = march_cpu_opt;
10080
10081 if (!mcpu_cpu_opt)
10082 mcpu_cpu_opt = &cpu_default;
10083
10084 cpu_variant = *mcpu_cpu_opt;
10085
10086 /* Record the CPU type. */
10087 if(ilp32_p)
10088 mach = bfd_mach_aarch64_ilp32;
10089 else if (llp64_p)
10090 mach = bfd_mach_aarch64_llp64;
10091 else
10092 mach = bfd_mach_aarch64;
10093
10094 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10095 #ifdef OBJ_ELF
10096 /* FIXME - is there a better way to do it ? */
10097 aarch64_sframe_cfa_sp_reg = 31;
10098 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10099 aarch64_sframe_cfa_ra_reg = 30;
10100 #endif
10101 }
10102
10103 /* Command line processing. */
10104
10105 const char *md_shortopts = "m:";
10106
10107 #ifdef AARCH64_BI_ENDIAN
10108 #define OPTION_EB (OPTION_MD_BASE + 0)
10109 #define OPTION_EL (OPTION_MD_BASE + 1)
10110 #else
10111 #if TARGET_BYTES_BIG_ENDIAN
10112 #define OPTION_EB (OPTION_MD_BASE + 0)
10113 #else
10114 #define OPTION_EL (OPTION_MD_BASE + 1)
10115 #endif
10116 #endif
10117
10118 struct option md_longopts[] = {
10119 #ifdef OPTION_EB
10120 {"EB", no_argument, NULL, OPTION_EB},
10121 #endif
10122 #ifdef OPTION_EL
10123 {"EL", no_argument, NULL, OPTION_EL},
10124 #endif
10125 {NULL, no_argument, NULL, 0}
10126 };
10127
10128 size_t md_longopts_size = sizeof (md_longopts);
10129
10130 struct aarch64_option_table
10131 {
10132 const char *option; /* Option name to match. */
10133 const char *help; /* Help information. */
10134 int *var; /* Variable to change. */
10135 int value; /* What to change it to. */
10136 char *deprecated; /* If non-null, print this message. */
10137 };
10138
10139 static struct aarch64_option_table aarch64_opts[] = {
10140 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10141 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10142 NULL},
10143 #ifdef DEBUG_AARCH64
10144 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10145 #endif /* DEBUG_AARCH64 */
10146 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10147 NULL},
10148 {"mno-verbose-error", N_("do not output verbose error messages"),
10149 &verbose_error_p, 0, NULL},
10150 {NULL, NULL, NULL, 0, NULL}
10151 };
10152
10153 struct aarch64_cpu_option_table
10154 {
10155 const char *name;
10156 const aarch64_feature_set value;
10157 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10158 case. */
10159 const char *canonical_name;
10160 };
10161
10162 /* This list should, at a minimum, contain all the cpu names
10163 recognized by GCC. */
10164 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10165 {"all", AARCH64_ALL_FEATURES, NULL},
10166 {"cortex-a34", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A34"},
10167 {"cortex-a35", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A35"},
10168 {"cortex-a53", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A53"},
10169 {"cortex-a57", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A57"},
10170 {"cortex-a72", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A72"},
10171 {"cortex-a73", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A73"},
10172 {"cortex-a55", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10173 "Cortex-A55"},
10174 {"cortex-a75", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10175 "Cortex-A75"},
10176 {"cortex-a76", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10177 "Cortex-A76"},
10178 {"cortex-a76ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10179 SSBS), "Cortex-A76AE"},
10180 {"cortex-a77", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10181 SSBS), "Cortex-A77"},
10182 {"cortex-a65", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10183 SSBS), "Cortex-A65"},
10184 {"cortex-a65ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10185 SSBS), "Cortex-A65AE"},
10186 {"cortex-a78", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10187 SSBS, PROFILE), "Cortex-A78"},
10188 {"cortex-a78ae", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10189 SSBS, PROFILE), "Cortex-A78AE"},
10190 {"cortex-a78c", AARCH64_CPU_FEATURES (V8_2A, 7, DOTPROD, F16, FLAGM,
10191 PAC, PROFILE, RCPC, SSBS),
10192 "Cortex-A78C"},
10193 {"cortex-a510", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10194 SVE2_BITPERM), "Cortex-A510"},
10195 {"cortex-a520", AARCH64_CPU_FEATURES (V9_2A, 2, MEMTAG, SVE2_BITPERM),
10196 "Cortex-A520"},
10197 {"cortex-a710", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10198 SVE2_BITPERM), "Cortex-A710"},
10199 {"cortex-a720", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10200 SVE2_BITPERM), "Cortex-A720"},
10201 {"ares", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10202 PROFILE), "Ares"},
10203 {"exynos-m1", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10204 "Samsung Exynos M1"},
10205 {"falkor", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10206 "Qualcomm Falkor"},
10207 {"neoverse-e1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10208 SSBS), "Neoverse E1"},
10209 {"neoverse-n1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10210 PROFILE), "Neoverse N1"},
10211 {"neoverse-n2", AARCH64_CPU_FEATURES (V8_5A, 8, BFLOAT16, I8MM, F16,
10212 SVE, SVE2, SVE2_BITPERM, MEMTAG,
10213 RNG), "Neoverse N2"},
10214 {"neoverse-v1", AARCH64_CPU_FEATURES (V8_4A, 8, PROFILE, CVADP, SVE,
10215 SSBS, RNG, F16, BFLOAT16, I8MM),
10216 "Neoverse V1"},
10217 {"qdf24xx", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10218 "Qualcomm QDF24XX"},
10219 {"saphira", AARCH64_CPU_FEATURES (V8_4A, 3, SHA2, AES, PROFILE),
10220 "Qualcomm Saphira"},
10221 {"thunderx", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10222 "Cavium ThunderX"},
10223 {"vulcan", AARCH64_CPU_FEATURES (V8_1A, 2, SHA2, AES),
10224 "Broadcom Vulcan"},
10225 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10226 in earlier releases and is superseded by 'xgene1' in all
10227 tools. */
10228 {"xgene-1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10229 {"xgene1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10230 {"xgene2", AARCH64_CPU_FEATURES (V8A, 1, CRC), "APM X-Gene 2"},
10231 {"cortex-r82", AARCH64_ARCH_FEATURES (V8R), "Cortex-R82"},
10232 {"cortex-x1", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10233 SSBS, PROFILE), "Cortex-X1"},
10234 {"cortex-x2", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10235 SVE2_BITPERM), "Cortex-X2"},
10236 {"cortex-x3", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10237 SVE2_BITPERM), "Cortex-X3"},
10238 {"cortex-x4", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10239 SVE2_BITPERM), "Cortex-X4"},
10240 {"generic", AARCH64_ARCH_FEATURES (V8A), NULL},
10241
10242 {NULL, AARCH64_NO_FEATURES, NULL}
10243 };
10244
10245 struct aarch64_arch_option_table
10246 {
10247 const char *name;
10248 const aarch64_feature_set value;
10249 };
10250
10251 /* This list should, at a minimum, contain all the architecture names
10252 recognized by GCC. */
10253 static const struct aarch64_arch_option_table aarch64_archs[] = {
10254 {"all", AARCH64_ALL_FEATURES},
10255 {"armv8-a", AARCH64_ARCH_FEATURES (V8A)},
10256 {"armv8.1-a", AARCH64_ARCH_FEATURES (V8_1A)},
10257 {"armv8.2-a", AARCH64_ARCH_FEATURES (V8_2A)},
10258 {"armv8.3-a", AARCH64_ARCH_FEATURES (V8_3A)},
10259 {"armv8.4-a", AARCH64_ARCH_FEATURES (V8_4A)},
10260 {"armv8.5-a", AARCH64_ARCH_FEATURES (V8_5A)},
10261 {"armv8.6-a", AARCH64_ARCH_FEATURES (V8_6A)},
10262 {"armv8.7-a", AARCH64_ARCH_FEATURES (V8_7A)},
10263 {"armv8.8-a", AARCH64_ARCH_FEATURES (V8_8A)},
10264 {"armv8.9-a", AARCH64_ARCH_FEATURES (V8_9A)},
10265 {"armv8-r", AARCH64_ARCH_FEATURES (V8R)},
10266 {"armv9-a", AARCH64_ARCH_FEATURES (V9A)},
10267 {"armv9.1-a", AARCH64_ARCH_FEATURES (V9_1A)},
10268 {"armv9.2-a", AARCH64_ARCH_FEATURES (V9_2A)},
10269 {"armv9.3-a", AARCH64_ARCH_FEATURES (V9_3A)},
10270 {"armv9.4-a", AARCH64_ARCH_FEATURES (V9_4A)},
10271 {NULL, AARCH64_NO_FEATURES}
10272 };
10273
10274 /* ISA extensions. */
10275 struct aarch64_option_cpu_value_table
10276 {
10277 const char *name;
10278 const aarch64_feature_set value;
10279 const aarch64_feature_set require; /* Feature dependencies. */
10280 };
10281
10282 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10283 {"crc", AARCH64_FEATURE (CRC), AARCH64_NO_FEATURES},
10284 {"crypto", AARCH64_FEATURES (2, AES, SHA2),
10285 AARCH64_FEATURE (SIMD)},
10286 {"fp", AARCH64_FEATURE (FP), AARCH64_NO_FEATURES},
10287 {"lse", AARCH64_FEATURE (LSE), AARCH64_NO_FEATURES},
10288 {"lse128", AARCH64_FEATURE (LSE128), AARCH64_FEATURE (LSE)},
10289 {"simd", AARCH64_FEATURE (SIMD), AARCH64_FEATURE (FP)},
10290 {"pan", AARCH64_FEATURE (PAN), AARCH64_NO_FEATURES},
10291 {"lor", AARCH64_FEATURE (LOR), AARCH64_NO_FEATURES},
10292 {"ras", AARCH64_FEATURE (RAS), AARCH64_NO_FEATURES},
10293 {"rdma", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10294 {"rdm", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10295 {"fp16", AARCH64_FEATURE (F16), AARCH64_FEATURE (FP)},
10296 {"fp16fml", AARCH64_FEATURE (F16_FML), AARCH64_FEATURE (F16)},
10297 {"profile", AARCH64_FEATURE (PROFILE), AARCH64_NO_FEATURES},
10298 {"sve", AARCH64_FEATURE (SVE), AARCH64_FEATURE (COMPNUM)},
10299 {"tme", AARCH64_FEATURE (TME), AARCH64_NO_FEATURES},
10300 {"fcma", AARCH64_FEATURE (COMPNUM),
10301 AARCH64_FEATURES (2, F16, SIMD)},
10302 {"compnum", AARCH64_FEATURE (COMPNUM),
10303 AARCH64_FEATURES (2, F16, SIMD)},
10304 {"jscvt", AARCH64_FEATURE (JSCVT), AARCH64_FEATURE (FP)},
10305 {"rcpc", AARCH64_FEATURE (RCPC), AARCH64_NO_FEATURES},
10306 {"rcpc2", AARCH64_FEATURE (RCPC2), AARCH64_FEATURE (RCPC)},
10307 {"dotprod", AARCH64_FEATURE (DOTPROD), AARCH64_FEATURE (SIMD)},
10308 {"sha2", AARCH64_FEATURE (SHA2), AARCH64_FEATURE (FP)},
10309 {"frintts", AARCH64_FEATURE (FRINTTS), AARCH64_FEATURE (SIMD)},
10310 {"sb", AARCH64_FEATURE (SB), AARCH64_NO_FEATURES},
10311 {"predres", AARCH64_FEATURE (PREDRES), AARCH64_NO_FEATURES},
10312 {"predres2", AARCH64_FEATURE (PREDRES2), AARCH64_FEATURE (PREDRES)},
10313 {"aes", AARCH64_FEATURE (AES), AARCH64_FEATURE (SIMD)},
10314 {"sm4", AARCH64_FEATURE (SM4), AARCH64_FEATURE (SIMD)},
10315 {"sha3", AARCH64_FEATURE (SHA3), AARCH64_FEATURE (SHA2)},
10316 {"rng", AARCH64_FEATURE (RNG), AARCH64_NO_FEATURES},
10317 {"ssbs", AARCH64_FEATURE (SSBS), AARCH64_NO_FEATURES},
10318 {"memtag", AARCH64_FEATURE (MEMTAG), AARCH64_NO_FEATURES},
10319 {"sve2", AARCH64_FEATURE (SVE2), AARCH64_FEATURE (SVE)},
10320 {"sve2-sm4", AARCH64_FEATURE (SVE2_SM4),
10321 AARCH64_FEATURES (2, SVE2, SM4)},
10322 {"sve2-aes", AARCH64_FEATURE (SVE2_AES),
10323 AARCH64_FEATURES (2, SVE2, AES)},
10324 {"sve2-sha3", AARCH64_FEATURE (SVE2_SHA3),
10325 AARCH64_FEATURES (2, SVE2, SHA3)},
10326 {"sve2-bitperm", AARCH64_FEATURE (SVE2_BITPERM),
10327 AARCH64_FEATURE (SVE2)},
10328 {"sme", AARCH64_FEATURE (SME),
10329 AARCH64_FEATURES (2, SVE2, BFLOAT16)},
10330 {"sme-f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10331 {"sme-f64f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10332 {"sme-i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10333 {"sme-i16i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10334 {"sme2", AARCH64_FEATURE (SME2), AARCH64_FEATURE (SME)},
10335 {"bf16", AARCH64_FEATURE (BFLOAT16), AARCH64_FEATURE (FP)},
10336 {"i8mm", AARCH64_FEATURE (I8MM), AARCH64_FEATURE (SIMD)},
10337 {"f32mm", AARCH64_FEATURE (F32MM), AARCH64_FEATURE (SVE)},
10338 {"f64mm", AARCH64_FEATURE (F64MM), AARCH64_FEATURE (SVE)},
10339 {"ls64", AARCH64_FEATURE (LS64), AARCH64_NO_FEATURES},
10340 {"flagm", AARCH64_FEATURE (FLAGM), AARCH64_NO_FEATURES},
10341 {"flagm2", AARCH64_FEATURE (FLAGMANIP), AARCH64_FEATURE (FLAGM)},
10342 {"pauth", AARCH64_FEATURE (PAC), AARCH64_NO_FEATURES},
10343 {"xs", AARCH64_FEATURE (XS), AARCH64_NO_FEATURES},
10344 {"wfxt", AARCH64_FEATURE (WFXT), AARCH64_NO_FEATURES},
10345 {"mops", AARCH64_FEATURE (MOPS), AARCH64_NO_FEATURES},
10346 {"hbc", AARCH64_FEATURE (HBC), AARCH64_NO_FEATURES},
10347 {"cssc", AARCH64_FEATURE (CSSC), AARCH64_NO_FEATURES},
10348 {"chk", AARCH64_FEATURE (CHK), AARCH64_NO_FEATURES},
10349 {"gcs", AARCH64_FEATURE (GCS), AARCH64_NO_FEATURES},
10350 {"the", AARCH64_FEATURE (THE), AARCH64_NO_FEATURES},
10351 {"rasv2", AARCH64_FEATURE (RASv2), AARCH64_FEATURE (RAS)},
10352 {"ite", AARCH64_FEATURE (ITE), AARCH64_NO_FEATURES},
10353 {"d128", AARCH64_FEATURE (D128),
10354 AARCH64_FEATURE (LSE128)},
10355 {"b16b16", AARCH64_FEATURE (B16B16), AARCH64_FEATURE (SVE2)},
10356 {"sme2p1", AARCH64_FEATURE (SME2p1), AARCH64_FEATURE (SME2)},
10357 {"sve2p1", AARCH64_FEATURE (SVE2p1), AARCH64_FEATURE (SVE2)},
10358 {NULL, AARCH64_NO_FEATURES, AARCH64_NO_FEATURES},
10359 };
10360
10361 struct aarch64_long_option_table
10362 {
10363 const char *option; /* Substring to match. */
10364 const char *help; /* Help information. */
10365 int (*func) (const char *subopt); /* Function to decode sub-option. */
10366 char *deprecated; /* If non-null, print this message. */
10367 };
10368
10369 /* Transitive closure of features depending on set. */
10370 static aarch64_feature_set
10371 aarch64_feature_disable_set (aarch64_feature_set set)
10372 {
10373 const struct aarch64_option_cpu_value_table *opt;
10374 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10375
10376 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10377 {
10378 prev = set;
10379 for (opt = aarch64_features; opt->name != NULL; opt++)
10380 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10381 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10382 }
10383 return set;
10384 }
10385
10386 /* Transitive closure of dependencies of set. */
10387 static aarch64_feature_set
10388 aarch64_feature_enable_set (aarch64_feature_set set)
10389 {
10390 const struct aarch64_option_cpu_value_table *opt;
10391 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10392
10393 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10394 {
10395 prev = set;
10396 for (opt = aarch64_features; opt->name != NULL; opt++)
10397 if (AARCH64_CPU_HAS_ALL_FEATURES (set, opt->value))
10398 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10399 }
10400 return set;
10401 }
10402
10403 static int
10404 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10405 bool ext_only)
10406 {
10407 /* We insist on extensions being added before being removed. We achieve
10408 this by using the ADDING_VALUE variable to indicate whether we are
10409 adding an extension (1) or removing it (0) and only allowing it to
10410 change in the order -1 -> 1 -> 0. */
10411 int adding_value = -1;
10412 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10413
10414 /* Copy the feature set, so that we can modify it. */
10415 *ext_set = **opt_p;
10416 *opt_p = ext_set;
10417
10418 while (str != NULL && *str != 0)
10419 {
10420 const struct aarch64_option_cpu_value_table *opt;
10421 const char *ext = NULL;
10422 int optlen;
10423
10424 if (!ext_only)
10425 {
10426 if (*str != '+')
10427 {
10428 as_bad (_("invalid architectural extension"));
10429 return 0;
10430 }
10431
10432 ext = strchr (++str, '+');
10433 }
10434
10435 if (ext != NULL)
10436 optlen = ext - str;
10437 else
10438 optlen = strlen (str);
10439
10440 if (optlen >= 2 && startswith (str, "no"))
10441 {
10442 if (adding_value != 0)
10443 adding_value = 0;
10444 optlen -= 2;
10445 str += 2;
10446 }
10447 else if (optlen > 0)
10448 {
10449 if (adding_value == -1)
10450 adding_value = 1;
10451 else if (adding_value != 1)
10452 {
10453 as_bad (_("must specify extensions to add before specifying "
10454 "those to remove"));
10455 return false;
10456 }
10457 }
10458
10459 if (optlen == 0)
10460 {
10461 as_bad (_("missing architectural extension"));
10462 return 0;
10463 }
10464
10465 gas_assert (adding_value != -1);
10466
10467 for (opt = aarch64_features; opt->name != NULL; opt++)
10468 if (optlen == (int) strlen(opt->name)
10469 && strncmp (opt->name, str, optlen) == 0)
10470 {
10471 aarch64_feature_set set;
10472
10473 /* Add or remove the extension. */
10474 if (adding_value)
10475 {
10476 set = aarch64_feature_enable_set (opt->value);
10477 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10478 }
10479 else
10480 {
10481 set = aarch64_feature_disable_set (opt->value);
10482 AARCH64_CLEAR_FEATURES (*ext_set, *ext_set, set);
10483 }
10484 break;
10485 }
10486
10487 if (opt->name == NULL)
10488 {
10489 as_bad (_("unknown architectural extension `%s'"), str);
10490 return 0;
10491 }
10492
10493 str = ext;
10494 };
10495
10496 return 1;
10497 }
10498
10499 static int
10500 aarch64_parse_cpu (const char *str)
10501 {
10502 const struct aarch64_cpu_option_table *opt;
10503 const char *ext = strchr (str, '+');
10504 size_t optlen;
10505
10506 if (ext != NULL)
10507 optlen = ext - str;
10508 else
10509 optlen = strlen (str);
10510
10511 if (optlen == 0)
10512 {
10513 as_bad (_("missing cpu name `%s'"), str);
10514 return 0;
10515 }
10516
10517 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10518 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10519 {
10520 mcpu_cpu_opt = &opt->value;
10521 if (ext != NULL)
10522 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10523
10524 return 1;
10525 }
10526
10527 as_bad (_("unknown cpu `%s'"), str);
10528 return 0;
10529 }
10530
10531 static int
10532 aarch64_parse_arch (const char *str)
10533 {
10534 const struct aarch64_arch_option_table *opt;
10535 const char *ext = strchr (str, '+');
10536 size_t optlen;
10537
10538 if (ext != NULL)
10539 optlen = ext - str;
10540 else
10541 optlen = strlen (str);
10542
10543 if (optlen == 0)
10544 {
10545 as_bad (_("missing architecture name `%s'"), str);
10546 return 0;
10547 }
10548
10549 for (opt = aarch64_archs; opt->name != NULL; opt++)
10550 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10551 {
10552 march_cpu_opt = &opt->value;
10553 if (ext != NULL)
10554 return aarch64_parse_features (ext, &march_cpu_opt, false);
10555
10556 return 1;
10557 }
10558
10559 as_bad (_("unknown architecture `%s'\n"), str);
10560 return 0;
10561 }
10562
10563 /* ABIs. */
10564 struct aarch64_option_abi_value_table
10565 {
10566 const char *name;
10567 enum aarch64_abi_type value;
10568 };
10569
10570 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10571 #ifdef OBJ_ELF
10572 {"ilp32", AARCH64_ABI_ILP32},
10573 {"lp64", AARCH64_ABI_LP64},
10574 #else
10575 {"llp64", AARCH64_ABI_LLP64},
10576 #endif
10577 };
10578
10579 static int
10580 aarch64_parse_abi (const char *str)
10581 {
10582 unsigned int i;
10583
10584 if (str[0] == '\0')
10585 {
10586 as_bad (_("missing abi name `%s'"), str);
10587 return 0;
10588 }
10589
10590 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10591 if (strcmp (str, aarch64_abis[i].name) == 0)
10592 {
10593 aarch64_abi = aarch64_abis[i].value;
10594 return 1;
10595 }
10596
10597 as_bad (_("unknown abi `%s'\n"), str);
10598 return 0;
10599 }
10600
10601 static struct aarch64_long_option_table aarch64_long_opts[] = {
10602 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10603 aarch64_parse_abi, NULL},
10604 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10605 aarch64_parse_cpu, NULL},
10606 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10607 aarch64_parse_arch, NULL},
10608 {NULL, NULL, 0, NULL}
10609 };
10610
10611 int
10612 md_parse_option (int c, const char *arg)
10613 {
10614 struct aarch64_option_table *opt;
10615 struct aarch64_long_option_table *lopt;
10616
10617 switch (c)
10618 {
10619 #ifdef OPTION_EB
10620 case OPTION_EB:
10621 target_big_endian = 1;
10622 break;
10623 #endif
10624
10625 #ifdef OPTION_EL
10626 case OPTION_EL:
10627 target_big_endian = 0;
10628 break;
10629 #endif
10630
10631 case 'a':
10632 /* Listing option. Just ignore these, we don't support additional
10633 ones. */
10634 return 0;
10635
10636 default:
10637 for (opt = aarch64_opts; opt->option != NULL; opt++)
10638 {
10639 if (c == opt->option[0]
10640 && ((arg == NULL && opt->option[1] == 0)
10641 || streq (arg, opt->option + 1)))
10642 {
10643 /* If the option is deprecated, tell the user. */
10644 if (opt->deprecated != NULL)
10645 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10646 arg ? arg : "", _(opt->deprecated));
10647
10648 if (opt->var != NULL)
10649 *opt->var = opt->value;
10650
10651 return 1;
10652 }
10653 }
10654
10655 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10656 {
10657 /* These options are expected to have an argument. */
10658 if (c == lopt->option[0]
10659 && arg != NULL
10660 && startswith (arg, lopt->option + 1))
10661 {
10662 /* If the option is deprecated, tell the user. */
10663 if (lopt->deprecated != NULL)
10664 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10665 _(lopt->deprecated));
10666
10667 /* Call the sup-option parser. */
10668 return lopt->func (arg + strlen (lopt->option) - 1);
10669 }
10670 }
10671
10672 return 0;
10673 }
10674
10675 return 1;
10676 }
10677
10678 void
10679 md_show_usage (FILE * fp)
10680 {
10681 struct aarch64_option_table *opt;
10682 struct aarch64_long_option_table *lopt;
10683
10684 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10685
10686 for (opt = aarch64_opts; opt->option != NULL; opt++)
10687 if (opt->help != NULL)
10688 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10689
10690 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10691 if (lopt->help != NULL)
10692 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10693
10694 #ifdef OPTION_EB
10695 fprintf (fp, _("\
10696 -EB assemble code for a big-endian cpu\n"));
10697 #endif
10698
10699 #ifdef OPTION_EL
10700 fprintf (fp, _("\
10701 -EL assemble code for a little-endian cpu\n"));
10702 #endif
10703 }
10704
10705 /* Parse a .cpu directive. */
10706
10707 static void
10708 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10709 {
10710 const struct aarch64_cpu_option_table *opt;
10711 char saved_char;
10712 char *name;
10713 char *ext;
10714 size_t optlen;
10715
10716 name = input_line_pointer;
10717 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10718 saved_char = *input_line_pointer;
10719 *input_line_pointer = 0;
10720
10721 ext = strchr (name, '+');
10722
10723 if (ext != NULL)
10724 optlen = ext - name;
10725 else
10726 optlen = strlen (name);
10727
10728 /* Skip the first "all" entry. */
10729 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10730 if (strlen (opt->name) == optlen
10731 && strncmp (name, opt->name, optlen) == 0)
10732 {
10733 mcpu_cpu_opt = &opt->value;
10734 if (ext != NULL)
10735 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10736 return;
10737
10738 cpu_variant = *mcpu_cpu_opt;
10739
10740 *input_line_pointer = saved_char;
10741 demand_empty_rest_of_line ();
10742 return;
10743 }
10744 as_bad (_("unknown cpu `%s'"), name);
10745 *input_line_pointer = saved_char;
10746 ignore_rest_of_line ();
10747 }
10748
10749
10750 /* Parse a .arch directive. */
10751
10752 static void
10753 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10754 {
10755 const struct aarch64_arch_option_table *opt;
10756 char saved_char;
10757 char *name;
10758 char *ext;
10759 size_t optlen;
10760
10761 name = input_line_pointer;
10762 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10763 saved_char = *input_line_pointer;
10764 *input_line_pointer = 0;
10765
10766 ext = strchr (name, '+');
10767
10768 if (ext != NULL)
10769 optlen = ext - name;
10770 else
10771 optlen = strlen (name);
10772
10773 /* Skip the first "all" entry. */
10774 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10775 if (strlen (opt->name) == optlen
10776 && strncmp (name, opt->name, optlen) == 0)
10777 {
10778 mcpu_cpu_opt = &opt->value;
10779 if (ext != NULL)
10780 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10781 return;
10782
10783 cpu_variant = *mcpu_cpu_opt;
10784
10785 *input_line_pointer = saved_char;
10786 demand_empty_rest_of_line ();
10787 return;
10788 }
10789
10790 as_bad (_("unknown architecture `%s'\n"), name);
10791 *input_line_pointer = saved_char;
10792 ignore_rest_of_line ();
10793 }
10794
10795 /* Parse a .arch_extension directive. */
10796
10797 static void
10798 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10799 {
10800 char saved_char;
10801 char *ext = input_line_pointer;
10802
10803 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10804 saved_char = *input_line_pointer;
10805 *input_line_pointer = 0;
10806
10807 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10808 return;
10809
10810 cpu_variant = *mcpu_cpu_opt;
10811
10812 *input_line_pointer = saved_char;
10813 demand_empty_rest_of_line ();
10814 }
10815
10816 /* Copy symbol information. */
10817
10818 void
10819 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10820 {
10821 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10822 }
10823
10824 #ifdef OBJ_ELF
10825 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10826 This is needed so AArch64 specific st_other values can be independently
10827 specified for an IFUNC resolver (that is called by the dynamic linker)
10828 and the symbol it resolves (aliased to the resolver). In particular,
10829 if a function symbol has special st_other value set via directives,
10830 then attaching an IFUNC resolver to that symbol should not override
10831 the st_other setting. Requiring the directive on the IFUNC resolver
10832 symbol would be unexpected and problematic in C code, where the two
10833 symbols appear as two independent function declarations. */
10834
10835 void
10836 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10837 {
10838 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10839 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10840 /* If size is unset, copy size from src. Because we don't track whether
10841 .size has been used, we can't differentiate .size dest, 0 from the case
10842 where dest's size is unset. */
10843 if (!destelf->size && S_GET_SIZE (dest) == 0)
10844 {
10845 if (srcelf->size)
10846 {
10847 destelf->size = XNEW (expressionS);
10848 *destelf->size = *srcelf->size;
10849 }
10850 S_SET_SIZE (dest, S_GET_SIZE (src));
10851 }
10852 }
10853 #endif