]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Commonise index parsing
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* Diagnostics inline function utilities.
165
166 These are lightweight utilities which should only be called by parse_operands
167 and other parsers. GAS processes each assembly line by parsing it against
168 instruction template(s), in the case of multiple templates (for the same
169 mnemonic name), those templates are tried one by one until one succeeds or
170 all fail. An assembly line may fail a few templates before being
171 successfully parsed; an error saved here in most cases is not a user error
172 but an error indicating the current template is not the right template.
173 Therefore it is very important that errors can be saved at a low cost during
174 the parsing; we don't want to slow down the whole parsing by recording
175 non-user errors in detail.
176
177 Remember that the objective is to help GAS pick up the most appropriate
178 error message in the case of multiple templates, e.g. FMOV which has 8
179 templates. */
180
181 static inline void
182 clear_error (void)
183 {
184 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
185 inst.parsing_error.kind = AARCH64_OPDE_NIL;
186 }
187
188 static inline bool
189 error_p (void)
190 {
191 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
192 }
193
194 static inline void
195 set_error (enum aarch64_operand_error_kind kind, const char *error)
196 {
197 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
198 inst.parsing_error.index = -1;
199 inst.parsing_error.kind = kind;
200 inst.parsing_error.error = error;
201 }
202
203 static inline void
204 set_recoverable_error (const char *error)
205 {
206 set_error (AARCH64_OPDE_RECOVERABLE, error);
207 }
208
209 /* Use the DESC field of the corresponding aarch64_operand entry to compose
210 the error message. */
211 static inline void
212 set_default_error (void)
213 {
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
215 }
216
217 static inline void
218 set_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
221 }
222
223 static inline void
224 set_first_syntax_error (const char *error)
225 {
226 if (! error_p ())
227 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
228 }
229
230 static inline void
231 set_fatal_syntax_error (const char *error)
232 {
233 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
234 }
235 \f
236 /* Return value for certain parsers when the parsing fails; those parsers
237 return the information of the parsed result, e.g. register number, on
238 success. */
239 #define PARSE_FAIL -1
240
241 /* This is an invalid condition code that means no conditional field is
242 present. */
243 #define COND_ALWAYS 0x10
244
245 typedef struct
246 {
247 const char *template;
248 uint32_t value;
249 } asm_nzcv;
250
251 struct reloc_entry
252 {
253 char *name;
254 bfd_reloc_code_real_type reloc;
255 };
256
257 /* Macros to define the register types and masks for the purpose
258 of parsing. */
259
260 #undef AARCH64_REG_TYPES
261 #define AARCH64_REG_TYPES \
262 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
263 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
264 BASIC_REG_TYPE(SP_32) /* wsp */ \
265 BASIC_REG_TYPE(SP_64) /* sp */ \
266 BASIC_REG_TYPE(Z_32) /* wzr */ \
267 BASIC_REG_TYPE(Z_64) /* xzr */ \
268 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
269 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
270 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
271 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
272 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
273 BASIC_REG_TYPE(VN) /* v[0-31] */ \
274 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
275 BASIC_REG_TYPE(PN) /* p[0-15] */ \
276 BASIC_REG_TYPE(ZA) /* za */ \
277 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
278 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
279 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
280 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
281 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
282 /* Typecheck: same, plus SVE registers. */ \
283 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
286 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
288 /* Typecheck: same, plus SVE registers. */ \
289 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
293 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
295 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: any [BHSDQ]P FP. */ \
300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
307 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
308 be used for SVE instructions, since Zn and Pn are valid symbols \
309 in other contexts. */ \
310 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
315 | REG_TYPE(ZN) | REG_TYPE(PN)) \
316 /* Any integer register; used for error messages only. */ \
317 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
320 /* The whole of ZA or a single tile. */ \
321 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
322 /* A horizontal or vertical slice of a ZA tile. */ \
323 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
324 /* Pseudo type to mark the end of the enumerator sequence. */ \
325 BASIC_REG_TYPE(MAX)
326
327 #undef BASIC_REG_TYPE
328 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
329 #undef MULTI_REG_TYPE
330 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
331
332 /* Register type enumerators. */
333 typedef enum aarch64_reg_type_
334 {
335 /* A list of REG_TYPE_*. */
336 AARCH64_REG_TYPES
337 } aarch64_reg_type;
338
339 #undef BASIC_REG_TYPE
340 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
341 #undef REG_TYPE
342 #define REG_TYPE(T) (1 << REG_TYPE_##T)
343 #undef MULTI_REG_TYPE
344 #define MULTI_REG_TYPE(T,V) V,
345
346 /* Structure for a hash table entry for a register. */
347 typedef struct
348 {
349 const char *name;
350 unsigned char number;
351 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
352 unsigned char builtin;
353 } reg_entry;
354
355 /* Values indexed by aarch64_reg_type to assist the type checking. */
356 static const unsigned reg_type_masks[] =
357 {
358 AARCH64_REG_TYPES
359 };
360
361 #undef BASIC_REG_TYPE
362 #undef REG_TYPE
363 #undef MULTI_REG_TYPE
364 #undef AARCH64_REG_TYPES
365
366 /* Diagnostics used when we don't get a register of the expected type.
367 Note: this has to synchronized with aarch64_reg_type definitions
368 above. */
369 static const char *
370 get_reg_expected_msg (aarch64_reg_type reg_type)
371 {
372 const char *msg;
373
374 switch (reg_type)
375 {
376 case REG_TYPE_R_32:
377 msg = N_("integer 32-bit register expected");
378 break;
379 case REG_TYPE_R_64:
380 msg = N_("integer 64-bit register expected");
381 break;
382 case REG_TYPE_R_N:
383 msg = N_("integer register expected");
384 break;
385 case REG_TYPE_R64_SP:
386 msg = N_("64-bit integer or SP register expected");
387 break;
388 case REG_TYPE_SVE_BASE:
389 msg = N_("base register expected");
390 break;
391 case REG_TYPE_R_Z:
392 msg = N_("integer or zero register expected");
393 break;
394 case REG_TYPE_SVE_OFFSET:
395 msg = N_("offset register expected");
396 break;
397 case REG_TYPE_R_SP:
398 msg = N_("integer or SP register expected");
399 break;
400 case REG_TYPE_R_Z_SP:
401 msg = N_("integer, zero or SP register expected");
402 break;
403 case REG_TYPE_FP_B:
404 msg = N_("8-bit SIMD scalar register expected");
405 break;
406 case REG_TYPE_FP_H:
407 msg = N_("16-bit SIMD scalar or floating-point half precision "
408 "register expected");
409 break;
410 case REG_TYPE_FP_S:
411 msg = N_("32-bit SIMD scalar or floating-point single precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_D:
415 msg = N_("64-bit SIMD scalar or floating-point double precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_Q:
419 msg = N_("128-bit SIMD scalar or floating-point quad precision "
420 "register expected");
421 break;
422 case REG_TYPE_R_Z_BHSDQ_V:
423 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
424 msg = N_("register expected");
425 break;
426 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
427 msg = N_("SIMD scalar or floating-point register expected");
428 break;
429 case REG_TYPE_VN: /* any V reg */
430 msg = N_("vector register expected");
431 break;
432 case REG_TYPE_ZN:
433 msg = N_("SVE vector register expected");
434 break;
435 case REG_TYPE_PN:
436 msg = N_("SVE predicate register expected");
437 break;
438 default:
439 as_fatal (_("invalid register type %d"), reg_type);
440 }
441 return msg;
442 }
443
444 /* Some well known registers that we refer to directly elsewhere. */
445 #define REG_SP 31
446 #define REG_ZR 31
447
448 /* Instructions take 4 bytes in the object file. */
449 #define INSN_SIZE 4
450
451 static htab_t aarch64_ops_hsh;
452 static htab_t aarch64_cond_hsh;
453 static htab_t aarch64_shift_hsh;
454 static htab_t aarch64_sys_regs_hsh;
455 static htab_t aarch64_pstatefield_hsh;
456 static htab_t aarch64_sys_regs_ic_hsh;
457 static htab_t aarch64_sys_regs_dc_hsh;
458 static htab_t aarch64_sys_regs_at_hsh;
459 static htab_t aarch64_sys_regs_tlbi_hsh;
460 static htab_t aarch64_sys_regs_sr_hsh;
461 static htab_t aarch64_reg_hsh;
462 static htab_t aarch64_barrier_opt_hsh;
463 static htab_t aarch64_nzcv_hsh;
464 static htab_t aarch64_pldop_hsh;
465 static htab_t aarch64_hint_opt_hsh;
466
467 /* Stuff needed to resolve the label ambiguity
468 As:
469 ...
470 label: <insn>
471 may differ from:
472 ...
473 label:
474 <insn> */
475
476 static symbolS *last_label_seen;
477
478 /* Literal pool structure. Held on a per-section
479 and per-sub-section basis. */
480
481 #define MAX_LITERAL_POOL_SIZE 1024
482 typedef struct literal_expression
483 {
484 expressionS exp;
485 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
486 LITTLENUM_TYPE * bignum;
487 } literal_expression;
488
489 typedef struct literal_pool
490 {
491 literal_expression literals[MAX_LITERAL_POOL_SIZE];
492 unsigned int next_free_entry;
493 unsigned int id;
494 symbolS *symbol;
495 segT section;
496 subsegT sub_section;
497 int size;
498 struct literal_pool *next;
499 } literal_pool;
500
501 /* Pointer to a linked list of literal pools. */
502 static literal_pool *list_of_pools = NULL;
503 \f
504 /* Pure syntax. */
505
506 /* This array holds the chars that always start a comment. If the
507 pre-processor is disabled, these aren't very useful. */
508 const char comment_chars[] = "";
509
510 /* This array holds the chars that only start a comment at the beginning of
511 a line. If the line seems to have the form '# 123 filename'
512 .line and .file directives will appear in the pre-processed output. */
513 /* Note that input_file.c hand checks for '#' at the beginning of the
514 first line of the input file. This is because the compiler outputs
515 #NO_APP at the beginning of its output. */
516 /* Also note that comments like this one will always work. */
517 const char line_comment_chars[] = "#";
518
519 const char line_separator_chars[] = ";";
520
521 /* Chars that can be used to separate mant
522 from exp in floating point numbers. */
523 const char EXP_CHARS[] = "eE";
524
525 /* Chars that mean this number is a floating point constant. */
526 /* As in 0f12.456 */
527 /* or 0d1.2345e12 */
528
529 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
530
531 /* Prefix character that indicates the start of an immediate value. */
532 #define is_immediate_prefix(C) ((C) == '#')
533
534 /* Separator character handling. */
535
536 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
537
538 static inline bool
539 skip_past_char (char **str, char c)
540 {
541 if (**str == c)
542 {
543 (*str)++;
544 return true;
545 }
546 else
547 return false;
548 }
549
550 #define skip_past_comma(str) skip_past_char (str, ',')
551
552 /* Arithmetic expressions (possibly involving symbols). */
553
554 static bool in_aarch64_get_expression = false;
555
556 /* Third argument to aarch64_get_expression. */
557 #define GE_NO_PREFIX false
558 #define GE_OPT_PREFIX true
559
560 /* Fourth argument to aarch64_get_expression. */
561 #define ALLOW_ABSENT false
562 #define REJECT_ABSENT true
563
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE.
567
568 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
569 If REJECT_ABSENT is true then trat missing expressions as an error. */
570
571 static bool
572 aarch64_get_expression (expressionS * ep,
573 char ** str,
574 bool allow_immediate_prefix,
575 bool reject_absent)
576 {
577 char *save_in;
578 segT seg;
579 bool prefix_present = false;
580
581 if (allow_immediate_prefix)
582 {
583 if (is_immediate_prefix (**str))
584 {
585 (*str)++;
586 prefix_present = true;
587 }
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_aarch64_get_expression = true;
595 seg = expression (ep);
596 in_aarch64_get_expression = false;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return false;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section
615 && seg != undefined_section)
616 {
617 set_syntax_error (_("bad segment"));
618 *str = input_line_pointer;
619 input_line_pointer = save_in;
620 return false;
621 }
622 #else
623 (void) seg;
624 #endif
625
626 *str = input_line_pointer;
627 input_line_pointer = save_in;
628 return true;
629 }
630
631 /* Turn a string in input_line_pointer into a floating point constant
632 of type TYPE, and store the appropriate bytes in *LITP. The number
633 of LITTLENUMS emitted is stored in *SIZEP. An error message is
634 returned, or NULL on OK. */
635
636 const char *
637 md_atof (int type, char *litP, int *sizeP)
638 {
639 return ieee_md_atof (type, litP, sizeP, target_big_endian);
640 }
641
642 /* We handle all bad expressions here, so that we can report the faulty
643 instruction in the error message. */
644 void
645 md_operand (expressionS * exp)
646 {
647 if (in_aarch64_get_expression)
648 exp->X_op = O_illegal;
649 }
650
651 /* Immediate values. */
652
653 /* Errors may be set multiple times during parsing or bit encoding
654 (particularly in the Neon bits), but usually the earliest error which is set
655 will be the most meaningful. Avoid overwriting it with later (cascading)
656 errors by calling this function. */
657
658 static void
659 first_error (const char *error)
660 {
661 if (! error_p ())
662 set_syntax_error (error);
663 }
664
665 /* Similar to first_error, but this function accepts formatted error
666 message. */
667 static void
668 first_error_fmt (const char *format, ...)
669 {
670 va_list args;
671 enum
672 { size = 100 };
673 /* N.B. this single buffer will not cause error messages for different
674 instructions to pollute each other; this is because at the end of
675 processing of each assembly line, error message if any will be
676 collected by as_bad. */
677 static char buffer[size];
678
679 if (! error_p ())
680 {
681 int ret ATTRIBUTE_UNUSED;
682 va_start (args, format);
683 ret = vsnprintf (buffer, size, format, args);
684 know (ret <= size - 1 && ret >= 0);
685 va_end (args);
686 set_syntax_error (buffer);
687 }
688 }
689
690 /* Internal helper routine converting a vector_type_el structure *VECTYPE
691 to a corresponding operand qualifier. */
692
693 static inline aarch64_opnd_qualifier_t
694 vectype_to_qualifier (const struct vector_type_el *vectype)
695 {
696 /* Element size in bytes indexed by vector_el_type. */
697 const unsigned char ele_size[5]
698 = {1, 2, 4, 8, 16};
699 const unsigned int ele_base [5] =
700 {
701 AARCH64_OPND_QLF_V_4B,
702 AARCH64_OPND_QLF_V_2H,
703 AARCH64_OPND_QLF_V_2S,
704 AARCH64_OPND_QLF_V_1D,
705 AARCH64_OPND_QLF_V_1Q
706 };
707
708 if (!vectype->defined || vectype->type == NT_invtype)
709 goto vectype_conversion_fail;
710
711 if (vectype->type == NT_zero)
712 return AARCH64_OPND_QLF_P_Z;
713 if (vectype->type == NT_merge)
714 return AARCH64_OPND_QLF_P_M;
715
716 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
717
718 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
719 {
720 /* Special case S_4B. */
721 if (vectype->type == NT_b && vectype->width == 4)
722 return AARCH64_OPND_QLF_S_4B;
723
724 /* Special case S_2H. */
725 if (vectype->type == NT_h && vectype->width == 2)
726 return AARCH64_OPND_QLF_S_2H;
727
728 /* Vector element register. */
729 return AARCH64_OPND_QLF_S_B + vectype->type;
730 }
731 else
732 {
733 /* Vector register. */
734 int reg_size = ele_size[vectype->type] * vectype->width;
735 unsigned offset;
736 unsigned shift;
737 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
738 goto vectype_conversion_fail;
739
740 /* The conversion is by calculating the offset from the base operand
741 qualifier for the vector type. The operand qualifiers are regular
742 enough that the offset can established by shifting the vector width by
743 a vector-type dependent amount. */
744 shift = 0;
745 if (vectype->type == NT_b)
746 shift = 3;
747 else if (vectype->type == NT_h || vectype->type == NT_s)
748 shift = 2;
749 else if (vectype->type >= NT_d)
750 shift = 1;
751 else
752 gas_assert (0);
753
754 offset = ele_base [vectype->type] + (vectype->width >> shift);
755 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
756 && offset <= AARCH64_OPND_QLF_V_1Q);
757 return offset;
758 }
759
760 vectype_conversion_fail:
761 first_error (_("bad vector arrangement type"));
762 return AARCH64_OPND_QLF_NIL;
763 }
764
765 /* Register parsing. */
766
767 /* Generic register parser which is called by other specialized
768 register parsers.
769 CCP points to what should be the beginning of a register name.
770 If it is indeed a valid register name, advance CCP over it and
771 return the reg_entry structure; otherwise return NULL.
772 It does not issue diagnostics. */
773
774 static reg_entry *
775 parse_reg (char **ccp)
776 {
777 char *start = *ccp;
778 char *p;
779 reg_entry *reg;
780
781 #ifdef REGISTER_PREFIX
782 if (*start != REGISTER_PREFIX)
783 return NULL;
784 start++;
785 #endif
786
787 p = start;
788 if (!ISALPHA (*p) || !is_name_beginner (*p))
789 return NULL;
790
791 do
792 p++;
793 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
794
795 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
796
797 if (!reg)
798 return NULL;
799
800 *ccp = p;
801 return reg;
802 }
803
804 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
805 return FALSE. */
806 static bool
807 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
808 {
809 return (reg_type_masks[type] & (1 << reg->type)) != 0;
810 }
811
812 /* Try to parse a base or offset register. Allow SVE base and offset
813 registers if REG_TYPE includes SVE registers. Return the register
814 entry on success, setting *QUALIFIER to the register qualifier.
815 Return null otherwise.
816
817 Note that this function does not issue any diagnostics. */
818
819 static const reg_entry *
820 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
821 aarch64_opnd_qualifier_t *qualifier)
822 {
823 char *str = *ccp;
824 const reg_entry *reg = parse_reg (&str);
825
826 if (reg == NULL)
827 return NULL;
828
829 switch (reg->type)
830 {
831 case REG_TYPE_R_32:
832 case REG_TYPE_SP_32:
833 case REG_TYPE_Z_32:
834 *qualifier = AARCH64_OPND_QLF_W;
835 break;
836
837 case REG_TYPE_R_64:
838 case REG_TYPE_SP_64:
839 case REG_TYPE_Z_64:
840 *qualifier = AARCH64_OPND_QLF_X;
841 break;
842
843 case REG_TYPE_ZN:
844 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
845 || str[0] != '.')
846 return NULL;
847 switch (TOLOWER (str[1]))
848 {
849 case 's':
850 *qualifier = AARCH64_OPND_QLF_S_S;
851 break;
852 case 'd':
853 *qualifier = AARCH64_OPND_QLF_S_D;
854 break;
855 default:
856 return NULL;
857 }
858 str += 2;
859 break;
860
861 default:
862 return NULL;
863 }
864
865 *ccp = str;
866
867 return reg;
868 }
869
870 /* Try to parse a base or offset register. Return the register entry
871 on success, setting *QUALIFIER to the register qualifier. Return null
872 otherwise.
873
874 Note that this function does not issue any diagnostics. */
875
876 static const reg_entry *
877 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
878 {
879 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
880 }
881
882 /* Parse the qualifier of a vector register or vector element of type
883 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
884 succeeds; otherwise return FALSE.
885
886 Accept only one occurrence of:
887 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
888 b h s d q */
889 static bool
890 parse_vector_type_for_operand (aarch64_reg_type reg_type,
891 struct vector_type_el *parsed_type, char **str)
892 {
893 char *ptr = *str;
894 unsigned width;
895 unsigned element_size;
896 enum vector_el_type type;
897
898 /* skip '.' */
899 gas_assert (*ptr == '.');
900 ptr++;
901
902 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
903 {
904 width = 0;
905 goto elt_size;
906 }
907 width = strtoul (ptr, &ptr, 10);
908 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
909 {
910 first_error_fmt (_("bad size %d in vector width specifier"), width);
911 return false;
912 }
913
914 elt_size:
915 switch (TOLOWER (*ptr))
916 {
917 case 'b':
918 type = NT_b;
919 element_size = 8;
920 break;
921 case 'h':
922 type = NT_h;
923 element_size = 16;
924 break;
925 case 's':
926 type = NT_s;
927 element_size = 32;
928 break;
929 case 'd':
930 type = NT_d;
931 element_size = 64;
932 break;
933 case 'q':
934 if (reg_type != REG_TYPE_VN || width == 1)
935 {
936 type = NT_q;
937 element_size = 128;
938 break;
939 }
940 /* fall through. */
941 default:
942 if (*ptr != '\0')
943 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
944 else
945 first_error (_("missing element size"));
946 return false;
947 }
948 if (width != 0 && width * element_size != 64
949 && width * element_size != 128
950 && !(width == 2 && element_size == 16)
951 && !(width == 4 && element_size == 8))
952 {
953 first_error_fmt (_
954 ("invalid element size %d and vector size combination %c"),
955 width, *ptr);
956 return false;
957 }
958 ptr++;
959
960 parsed_type->type = type;
961 parsed_type->width = width;
962 parsed_type->element_size = element_size;
963
964 *str = ptr;
965
966 return true;
967 }
968
969 /* *STR contains an SVE zero/merge predication suffix. Parse it into
970 *PARSED_TYPE and point *STR at the end of the suffix. */
971
972 static bool
973 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
974 {
975 char *ptr = *str;
976
977 /* Skip '/'. */
978 gas_assert (*ptr == '/');
979 ptr++;
980 switch (TOLOWER (*ptr))
981 {
982 case 'z':
983 parsed_type->type = NT_zero;
984 break;
985 case 'm':
986 parsed_type->type = NT_merge;
987 break;
988 default:
989 if (*ptr != '\0' && *ptr != ',')
990 first_error_fmt (_("unexpected character `%c' in predication type"),
991 *ptr);
992 else
993 first_error (_("missing predication type"));
994 return false;
995 }
996 parsed_type->width = 0;
997 *str = ptr + 1;
998 return true;
999 }
1000
1001 /* Return true if CH is a valid suffix character for registers of
1002 type TYPE. */
1003
1004 static bool
1005 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1006 {
1007 switch (type)
1008 {
1009 case REG_TYPE_VN:
1010 case REG_TYPE_ZN:
1011 case REG_TYPE_ZA:
1012 case REG_TYPE_ZAT:
1013 case REG_TYPE_ZATH:
1014 case REG_TYPE_ZATV:
1015 return ch == '.';
1016
1017 case REG_TYPE_PN:
1018 return ch == '.' || ch == '/';
1019
1020 default:
1021 return false;
1022 }
1023 }
1024
1025 /* Parse an index expression at *STR, storing it in *IMM on success. */
1026
1027 static bool
1028 parse_index_expression (char **str, int64_t *imm)
1029 {
1030 expressionS exp;
1031
1032 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1033 if (exp.X_op != O_constant)
1034 {
1035 first_error (_("constant expression required"));
1036 return false;
1037 }
1038 *imm = exp.X_add_number;
1039 return true;
1040 }
1041
1042 /* Parse a register of the type TYPE.
1043
1044 Return null if the string pointed to by *CCP is not a valid register
1045 name or the parsed register is not of TYPE.
1046
1047 Otherwise return the register, and optionally return the register
1048 shape and element index information in *TYPEINFO.
1049
1050 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1051
1052 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1053 register index. */
1054
1055 #define PTR_IN_REGLIST (1U << 0)
1056 #define PTR_FULL_REG (1U << 1)
1057
1058 static const reg_entry *
1059 parse_typed_reg (char **ccp, aarch64_reg_type type,
1060 struct vector_type_el *typeinfo, unsigned int flags)
1061 {
1062 char *str = *ccp;
1063 const reg_entry *reg = parse_reg (&str);
1064 struct vector_type_el atype;
1065 struct vector_type_el parsetype;
1066 bool is_typed_vecreg = false;
1067
1068 atype.defined = 0;
1069 atype.type = NT_invtype;
1070 atype.width = -1;
1071 atype.element_size = 0;
1072 atype.index = 0;
1073
1074 if (reg == NULL)
1075 {
1076 if (typeinfo)
1077 *typeinfo = atype;
1078 set_default_error ();
1079 return NULL;
1080 }
1081
1082 if (! aarch64_check_reg_type (reg, type))
1083 {
1084 DEBUG_TRACE ("reg type check failed");
1085 set_default_error ();
1086 return NULL;
1087 }
1088 type = reg->type;
1089
1090 if (aarch64_valid_suffix_char_p (reg->type, *str))
1091 {
1092 if (*str == '.')
1093 {
1094 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1095 return NULL;
1096 if ((reg->type == REG_TYPE_ZAT
1097 || reg->type == REG_TYPE_ZATH
1098 || reg->type == REG_TYPE_ZATV)
1099 && reg->number * 8 >= parsetype.element_size)
1100 {
1101 set_syntax_error (_("ZA tile number out of range"));
1102 return NULL;
1103 }
1104 }
1105 else
1106 {
1107 if (!parse_predication_for_operand (&parsetype, &str))
1108 return NULL;
1109 }
1110
1111 /* Register if of the form Vn.[bhsdq]. */
1112 is_typed_vecreg = true;
1113
1114 if (type != REG_TYPE_VN)
1115 {
1116 /* The width is always variable; we don't allow an integer width
1117 to be specified. */
1118 gas_assert (parsetype.width == 0);
1119 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1120 }
1121 else if (parsetype.width == 0)
1122 /* Expect index. In the new scheme we cannot have
1123 Vn.[bhsdq] represent a scalar. Therefore any
1124 Vn.[bhsdq] should have an index following it.
1125 Except in reglists of course. */
1126 atype.defined |= NTA_HASINDEX;
1127 else
1128 atype.defined |= NTA_HASTYPE;
1129
1130 atype.type = parsetype.type;
1131 atype.width = parsetype.width;
1132 }
1133
1134 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1135 {
1136 /* Reject Sn[index] syntax. */
1137 if (!is_typed_vecreg)
1138 {
1139 first_error (_("this type of register can't be indexed"));
1140 return NULL;
1141 }
1142
1143 if (flags & PTR_IN_REGLIST)
1144 {
1145 first_error (_("index not allowed inside register list"));
1146 return NULL;
1147 }
1148
1149 atype.defined |= NTA_HASINDEX;
1150
1151 if (!parse_index_expression (&str, &atype.index))
1152 return NULL;
1153
1154 if (! skip_past_char (&str, ']'))
1155 return NULL;
1156 }
1157 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1158 {
1159 /* Indexed vector register expected. */
1160 first_error (_("indexed vector register expected"));
1161 return NULL;
1162 }
1163
1164 /* A vector reg Vn should be typed or indexed. */
1165 if (type == REG_TYPE_VN && atype.defined == 0)
1166 {
1167 first_error (_("invalid use of vector register"));
1168 }
1169
1170 if (typeinfo)
1171 *typeinfo = atype;
1172
1173 *ccp = str;
1174
1175 return reg;
1176 }
1177
1178 /* Parse register.
1179
1180 Return the register on success; return null otherwise.
1181
1182 If this is a NEON vector register with additional type information, fill
1183 in the struct pointed to by VECTYPE (if non-NULL).
1184
1185 This parser does not handle register lists. */
1186
1187 static const reg_entry *
1188 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1189 struct vector_type_el *vectype)
1190 {
1191 return parse_typed_reg (ccp, type, vectype, 0);
1192 }
1193
1194 static inline bool
1195 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1196 {
1197 return (e1.type == e2.type
1198 && e1.defined == e2.defined
1199 && e1.width == e2.width
1200 && e1.element_size == e2.element_size
1201 && e1.index == e2.index);
1202 }
1203
1204 /* This function parses a list of vector registers of type TYPE.
1205 On success, it returns the parsed register list information in the
1206 following encoded format:
1207
1208 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1209 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1210
1211 The information of the register shape and/or index is returned in
1212 *VECTYPE.
1213
1214 It returns PARSE_FAIL if the register list is invalid.
1215
1216 The list contains one to four registers.
1217 Each register can be one of:
1218 <Vt>.<T>[<index>]
1219 <Vt>.<T>
1220 All <T> should be identical.
1221 All <index> should be identical.
1222 There are restrictions on <Vt> numbers which are checked later
1223 (by reg_list_valid_p). */
1224
1225 static int
1226 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1227 struct vector_type_el *vectype)
1228 {
1229 char *str = *ccp;
1230 int nb_regs;
1231 struct vector_type_el typeinfo, typeinfo_first;
1232 int val, val_range;
1233 int in_range;
1234 int ret_val;
1235 int i;
1236 bool error = false;
1237 bool expect_index = false;
1238
1239 if (*str != '{')
1240 {
1241 set_syntax_error (_("expecting {"));
1242 return PARSE_FAIL;
1243 }
1244 str++;
1245
1246 nb_regs = 0;
1247 typeinfo_first.defined = 0;
1248 typeinfo_first.type = NT_invtype;
1249 typeinfo_first.width = -1;
1250 typeinfo_first.element_size = 0;
1251 typeinfo_first.index = 0;
1252 ret_val = 0;
1253 val = -1;
1254 val_range = -1;
1255 in_range = 0;
1256 do
1257 {
1258 if (in_range)
1259 {
1260 str++; /* skip over '-' */
1261 val_range = val;
1262 }
1263 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1264 PTR_IN_REGLIST);
1265 if (!reg)
1266 {
1267 set_first_syntax_error (_("invalid vector register in list"));
1268 error = true;
1269 continue;
1270 }
1271 val = reg->number;
1272 /* reject [bhsd]n */
1273 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1274 {
1275 set_first_syntax_error (_("invalid scalar register in list"));
1276 error = true;
1277 continue;
1278 }
1279
1280 if (typeinfo.defined & NTA_HASINDEX)
1281 expect_index = true;
1282
1283 if (in_range)
1284 {
1285 if (val < val_range)
1286 {
1287 set_first_syntax_error
1288 (_("invalid range in vector register list"));
1289 error = true;
1290 }
1291 val_range++;
1292 }
1293 else
1294 {
1295 val_range = val;
1296 if (nb_regs == 0)
1297 typeinfo_first = typeinfo;
1298 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1299 {
1300 set_first_syntax_error
1301 (_("type mismatch in vector register list"));
1302 error = true;
1303 }
1304 }
1305 if (! error)
1306 for (i = val_range; i <= val; i++)
1307 {
1308 ret_val |= i << (5 * nb_regs);
1309 nb_regs++;
1310 }
1311 in_range = 0;
1312 }
1313 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1314
1315 skip_whitespace (str);
1316 if (*str != '}')
1317 {
1318 set_first_syntax_error (_("end of vector register list not found"));
1319 error = true;
1320 }
1321 str++;
1322
1323 skip_whitespace (str);
1324
1325 if (expect_index)
1326 {
1327 if (skip_past_char (&str, '['))
1328 {
1329 if (!parse_index_expression (&str, &typeinfo_first.index))
1330 error = true;
1331 if (! skip_past_char (&str, ']'))
1332 error = true;
1333 }
1334 else
1335 {
1336 set_first_syntax_error (_("expected index"));
1337 error = true;
1338 }
1339 }
1340
1341 if (nb_regs > 4)
1342 {
1343 set_first_syntax_error (_("too many registers in vector register list"));
1344 error = true;
1345 }
1346 else if (nb_regs == 0)
1347 {
1348 set_first_syntax_error (_("empty vector register list"));
1349 error = true;
1350 }
1351
1352 *ccp = str;
1353 if (! error)
1354 *vectype = typeinfo_first;
1355
1356 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1357 }
1358
1359 /* Directives: register aliases. */
1360
1361 static reg_entry *
1362 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1363 {
1364 reg_entry *new;
1365 const char *name;
1366
1367 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1368 {
1369 if (new->builtin)
1370 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1371 str);
1372
1373 /* Only warn about a redefinition if it's not defined as the
1374 same register. */
1375 else if (new->number != number || new->type != type)
1376 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1377
1378 return NULL;
1379 }
1380
1381 name = xstrdup (str);
1382 new = XNEW (reg_entry);
1383
1384 new->name = name;
1385 new->number = number;
1386 new->type = type;
1387 new->builtin = false;
1388
1389 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1390
1391 return new;
1392 }
1393
1394 /* Look for the .req directive. This is of the form:
1395
1396 new_register_name .req existing_register_name
1397
1398 If we find one, or if it looks sufficiently like one that we want to
1399 handle any error here, return TRUE. Otherwise return FALSE. */
1400
1401 static bool
1402 create_register_alias (char *newname, char *p)
1403 {
1404 const reg_entry *old;
1405 char *oldname, *nbuf;
1406 size_t nlen;
1407
1408 /* The input scrubber ensures that whitespace after the mnemonic is
1409 collapsed to single spaces. */
1410 oldname = p;
1411 if (!startswith (oldname, " .req "))
1412 return false;
1413
1414 oldname += 6;
1415 if (*oldname == '\0')
1416 return false;
1417
1418 old = str_hash_find (aarch64_reg_hsh, oldname);
1419 if (!old)
1420 {
1421 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1422 return true;
1423 }
1424
1425 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1426 the desired alias name, and p points to its end. If not, then
1427 the desired alias name is in the global original_case_string. */
1428 #ifdef TC_CASE_SENSITIVE
1429 nlen = p - newname;
1430 #else
1431 newname = original_case_string;
1432 nlen = strlen (newname);
1433 #endif
1434
1435 nbuf = xmemdup0 (newname, nlen);
1436
1437 /* Create aliases under the new name as stated; an all-lowercase
1438 version of the new name; and an all-uppercase version of the new
1439 name. */
1440 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1441 {
1442 for (p = nbuf; *p; p++)
1443 *p = TOUPPER (*p);
1444
1445 if (strncmp (nbuf, newname, nlen))
1446 {
1447 /* If this attempt to create an additional alias fails, do not bother
1448 trying to create the all-lower case alias. We will fail and issue
1449 a second, duplicate error message. This situation arises when the
1450 programmer does something like:
1451 foo .req r0
1452 Foo .req r1
1453 The second .req creates the "Foo" alias but then fails to create
1454 the artificial FOO alias because it has already been created by the
1455 first .req. */
1456 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1457 {
1458 free (nbuf);
1459 return true;
1460 }
1461 }
1462
1463 for (p = nbuf; *p; p++)
1464 *p = TOLOWER (*p);
1465
1466 if (strncmp (nbuf, newname, nlen))
1467 insert_reg_alias (nbuf, old->number, old->type);
1468 }
1469
1470 free (nbuf);
1471 return true;
1472 }
1473
1474 /* Should never be called, as .req goes between the alias and the
1475 register name, not at the beginning of the line. */
1476 static void
1477 s_req (int a ATTRIBUTE_UNUSED)
1478 {
1479 as_bad (_("invalid syntax for .req directive"));
1480 }
1481
1482 /* The .unreq directive deletes an alias which was previously defined
1483 by .req. For example:
1484
1485 my_alias .req r11
1486 .unreq my_alias */
1487
1488 static void
1489 s_unreq (int a ATTRIBUTE_UNUSED)
1490 {
1491 char *name;
1492 char saved_char;
1493
1494 name = input_line_pointer;
1495 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1496 saved_char = *input_line_pointer;
1497 *input_line_pointer = 0;
1498
1499 if (!*name)
1500 as_bad (_("invalid syntax for .unreq directive"));
1501 else
1502 {
1503 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1504
1505 if (!reg)
1506 as_bad (_("unknown register alias '%s'"), name);
1507 else if (reg->builtin)
1508 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1509 name);
1510 else
1511 {
1512 char *p;
1513 char *nbuf;
1514
1515 str_hash_delete (aarch64_reg_hsh, name);
1516 free ((char *) reg->name);
1517 free (reg);
1518
1519 /* Also locate the all upper case and all lower case versions.
1520 Do not complain if we cannot find one or the other as it
1521 was probably deleted above. */
1522
1523 nbuf = strdup (name);
1524 for (p = nbuf; *p; p++)
1525 *p = TOUPPER (*p);
1526 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1527 if (reg)
1528 {
1529 str_hash_delete (aarch64_reg_hsh, nbuf);
1530 free ((char *) reg->name);
1531 free (reg);
1532 }
1533
1534 for (p = nbuf; *p; p++)
1535 *p = TOLOWER (*p);
1536 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1537 if (reg)
1538 {
1539 str_hash_delete (aarch64_reg_hsh, nbuf);
1540 free ((char *) reg->name);
1541 free (reg);
1542 }
1543
1544 free (nbuf);
1545 }
1546 }
1547
1548 *input_line_pointer = saved_char;
1549 demand_empty_rest_of_line ();
1550 }
1551
1552 /* Directives: Instruction set selection. */
1553
1554 #if defined OBJ_ELF || defined OBJ_COFF
1555 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1556 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1557 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1558 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1559
1560 /* Create a new mapping symbol for the transition to STATE. */
1561
1562 static void
1563 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1564 {
1565 symbolS *symbolP;
1566 const char *symname;
1567 int type;
1568
1569 switch (state)
1570 {
1571 case MAP_DATA:
1572 symname = "$d";
1573 type = BSF_NO_FLAGS;
1574 break;
1575 case MAP_INSN:
1576 symname = "$x";
1577 type = BSF_NO_FLAGS;
1578 break;
1579 default:
1580 abort ();
1581 }
1582
1583 symbolP = symbol_new (symname, now_seg, frag, value);
1584 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1585
1586 /* Save the mapping symbols for future reference. Also check that
1587 we do not place two mapping symbols at the same offset within a
1588 frag. We'll handle overlap between frags in
1589 check_mapping_symbols.
1590
1591 If .fill or other data filling directive generates zero sized data,
1592 the mapping symbol for the following code will have the same value
1593 as the one generated for the data filling directive. In this case,
1594 we replace the old symbol with the new one at the same address. */
1595 if (value == 0)
1596 {
1597 if (frag->tc_frag_data.first_map != NULL)
1598 {
1599 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1600 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1601 &symbol_lastP);
1602 }
1603 frag->tc_frag_data.first_map = symbolP;
1604 }
1605 if (frag->tc_frag_data.last_map != NULL)
1606 {
1607 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1608 S_GET_VALUE (symbolP));
1609 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1610 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1611 &symbol_lastP);
1612 }
1613 frag->tc_frag_data.last_map = symbolP;
1614 }
1615
1616 /* We must sometimes convert a region marked as code to data during
1617 code alignment, if an odd number of bytes have to be padded. The
1618 code mapping symbol is pushed to an aligned address. */
1619
1620 static void
1621 insert_data_mapping_symbol (enum mstate state,
1622 valueT value, fragS * frag, offsetT bytes)
1623 {
1624 /* If there was already a mapping symbol, remove it. */
1625 if (frag->tc_frag_data.last_map != NULL
1626 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1627 frag->fr_address + value)
1628 {
1629 symbolS *symp = frag->tc_frag_data.last_map;
1630
1631 if (value == 0)
1632 {
1633 know (frag->tc_frag_data.first_map == symp);
1634 frag->tc_frag_data.first_map = NULL;
1635 }
1636 frag->tc_frag_data.last_map = NULL;
1637 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1638 }
1639
1640 make_mapping_symbol (MAP_DATA, value, frag);
1641 make_mapping_symbol (state, value + bytes, frag);
1642 }
1643
1644 static void mapping_state_2 (enum mstate state, int max_chars);
1645
1646 /* Set the mapping state to STATE. Only call this when about to
1647 emit some STATE bytes to the file. */
1648
1649 void
1650 mapping_state (enum mstate state)
1651 {
1652 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1653
1654 if (state == MAP_INSN)
1655 /* AArch64 instructions require 4-byte alignment. When emitting
1656 instructions into any section, record the appropriate section
1657 alignment. */
1658 record_alignment (now_seg, 2);
1659
1660 if (mapstate == state)
1661 /* The mapping symbol has already been emitted.
1662 There is nothing else to do. */
1663 return;
1664
1665 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1666 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1667 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1668 evaluated later in the next else. */
1669 return;
1670 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1671 {
1672 /* Only add the symbol if the offset is > 0:
1673 if we're at the first frag, check it's size > 0;
1674 if we're not at the first frag, then for sure
1675 the offset is > 0. */
1676 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1677 const int add_symbol = (frag_now != frag_first)
1678 || (frag_now_fix () > 0);
1679
1680 if (add_symbol)
1681 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1682 }
1683 #undef TRANSITION
1684
1685 mapping_state_2 (state, 0);
1686 }
1687
1688 /* Same as mapping_state, but MAX_CHARS bytes have already been
1689 allocated. Put the mapping symbol that far back. */
1690
1691 static void
1692 mapping_state_2 (enum mstate state, int max_chars)
1693 {
1694 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1695
1696 if (!SEG_NORMAL (now_seg))
1697 return;
1698
1699 if (mapstate == state)
1700 /* The mapping symbol has already been emitted.
1701 There is nothing else to do. */
1702 return;
1703
1704 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1705 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1706 }
1707 #else
1708 #define mapping_state(x) /* nothing */
1709 #define mapping_state_2(x, y) /* nothing */
1710 #endif
1711
1712 /* Directives: sectioning and alignment. */
1713
1714 static void
1715 s_bss (int ignore ATTRIBUTE_UNUSED)
1716 {
1717 /* We don't support putting frags in the BSS segment, we fake it by
1718 marking in_bss, then looking at s_skip for clues. */
1719 subseg_set (bss_section, 0);
1720 demand_empty_rest_of_line ();
1721 mapping_state (MAP_DATA);
1722 }
1723
1724 static void
1725 s_even (int ignore ATTRIBUTE_UNUSED)
1726 {
1727 /* Never make frag if expect extra pass. */
1728 if (!need_pass_2)
1729 frag_align (1, 0, 0);
1730
1731 record_alignment (now_seg, 1);
1732
1733 demand_empty_rest_of_line ();
1734 }
1735
1736 /* Directives: Literal pools. */
1737
1738 static literal_pool *
1739 find_literal_pool (int size)
1740 {
1741 literal_pool *pool;
1742
1743 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1744 {
1745 if (pool->section == now_seg
1746 && pool->sub_section == now_subseg && pool->size == size)
1747 break;
1748 }
1749
1750 return pool;
1751 }
1752
1753 static literal_pool *
1754 find_or_make_literal_pool (int size)
1755 {
1756 /* Next literal pool ID number. */
1757 static unsigned int latest_pool_num = 1;
1758 literal_pool *pool;
1759
1760 pool = find_literal_pool (size);
1761
1762 if (pool == NULL)
1763 {
1764 /* Create a new pool. */
1765 pool = XNEW (literal_pool);
1766 if (!pool)
1767 return NULL;
1768
1769 /* Currently we always put the literal pool in the current text
1770 section. If we were generating "small" model code where we
1771 knew that all code and initialised data was within 1MB then
1772 we could output literals to mergeable, read-only data
1773 sections. */
1774
1775 pool->next_free_entry = 0;
1776 pool->section = now_seg;
1777 pool->sub_section = now_subseg;
1778 pool->size = size;
1779 pool->next = list_of_pools;
1780 pool->symbol = NULL;
1781
1782 /* Add it to the list. */
1783 list_of_pools = pool;
1784 }
1785
1786 /* New pools, and emptied pools, will have a NULL symbol. */
1787 if (pool->symbol == NULL)
1788 {
1789 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1790 &zero_address_frag, 0);
1791 pool->id = latest_pool_num++;
1792 }
1793
1794 /* Done. */
1795 return pool;
1796 }
1797
1798 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1799 Return TRUE on success, otherwise return FALSE. */
1800 static bool
1801 add_to_lit_pool (expressionS *exp, int size)
1802 {
1803 literal_pool *pool;
1804 unsigned int entry;
1805
1806 pool = find_or_make_literal_pool (size);
1807
1808 /* Check if this literal value is already in the pool. */
1809 for (entry = 0; entry < pool->next_free_entry; entry++)
1810 {
1811 expressionS * litexp = & pool->literals[entry].exp;
1812
1813 if ((litexp->X_op == exp->X_op)
1814 && (exp->X_op == O_constant)
1815 && (litexp->X_add_number == exp->X_add_number)
1816 && (litexp->X_unsigned == exp->X_unsigned))
1817 break;
1818
1819 if ((litexp->X_op == exp->X_op)
1820 && (exp->X_op == O_symbol)
1821 && (litexp->X_add_number == exp->X_add_number)
1822 && (litexp->X_add_symbol == exp->X_add_symbol)
1823 && (litexp->X_op_symbol == exp->X_op_symbol))
1824 break;
1825 }
1826
1827 /* Do we need to create a new entry? */
1828 if (entry == pool->next_free_entry)
1829 {
1830 if (entry >= MAX_LITERAL_POOL_SIZE)
1831 {
1832 set_syntax_error (_("literal pool overflow"));
1833 return false;
1834 }
1835
1836 pool->literals[entry].exp = *exp;
1837 pool->next_free_entry += 1;
1838 if (exp->X_op == O_big)
1839 {
1840 /* PR 16688: Bignums are held in a single global array. We must
1841 copy and preserve that value now, before it is overwritten. */
1842 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1843 exp->X_add_number);
1844 memcpy (pool->literals[entry].bignum, generic_bignum,
1845 CHARS_PER_LITTLENUM * exp->X_add_number);
1846 }
1847 else
1848 pool->literals[entry].bignum = NULL;
1849 }
1850
1851 exp->X_op = O_symbol;
1852 exp->X_add_number = ((int) entry) * size;
1853 exp->X_add_symbol = pool->symbol;
1854
1855 return true;
1856 }
1857
1858 /* Can't use symbol_new here, so have to create a symbol and then at
1859 a later date assign it a value. That's what these functions do. */
1860
1861 static void
1862 symbol_locate (symbolS * symbolP,
1863 const char *name,/* It is copied, the caller can modify. */
1864 segT segment, /* Segment identifier (SEG_<something>). */
1865 valueT valu, /* Symbol value. */
1866 fragS * frag) /* Associated fragment. */
1867 {
1868 size_t name_length;
1869 char *preserved_copy_of_name;
1870
1871 name_length = strlen (name) + 1; /* +1 for \0. */
1872 obstack_grow (&notes, name, name_length);
1873 preserved_copy_of_name = obstack_finish (&notes);
1874
1875 #ifdef tc_canonicalize_symbol_name
1876 preserved_copy_of_name =
1877 tc_canonicalize_symbol_name (preserved_copy_of_name);
1878 #endif
1879
1880 S_SET_NAME (symbolP, preserved_copy_of_name);
1881
1882 S_SET_SEGMENT (symbolP, segment);
1883 S_SET_VALUE (symbolP, valu);
1884 symbol_clear_list_pointers (symbolP);
1885
1886 symbol_set_frag (symbolP, frag);
1887
1888 /* Link to end of symbol chain. */
1889 {
1890 extern int symbol_table_frozen;
1891
1892 if (symbol_table_frozen)
1893 abort ();
1894 }
1895
1896 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1897
1898 obj_symbol_new_hook (symbolP);
1899
1900 #ifdef tc_symbol_new_hook
1901 tc_symbol_new_hook (symbolP);
1902 #endif
1903
1904 #ifdef DEBUG_SYMS
1905 verify_symbol_chain (symbol_rootP, symbol_lastP);
1906 #endif /* DEBUG_SYMS */
1907 }
1908
1909
1910 static void
1911 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1912 {
1913 unsigned int entry;
1914 literal_pool *pool;
1915 char sym_name[20];
1916 int align;
1917
1918 for (align = 2; align <= 4; align++)
1919 {
1920 int size = 1 << align;
1921
1922 pool = find_literal_pool (size);
1923 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1924 continue;
1925
1926 /* Align pool as you have word accesses.
1927 Only make a frag if we have to. */
1928 if (!need_pass_2)
1929 frag_align (align, 0, 0);
1930
1931 mapping_state (MAP_DATA);
1932
1933 record_alignment (now_seg, align);
1934
1935 sprintf (sym_name, "$$lit_\002%x", pool->id);
1936
1937 symbol_locate (pool->symbol, sym_name, now_seg,
1938 (valueT) frag_now_fix (), frag_now);
1939 symbol_table_insert (pool->symbol);
1940
1941 for (entry = 0; entry < pool->next_free_entry; entry++)
1942 {
1943 expressionS * exp = & pool->literals[entry].exp;
1944
1945 if (exp->X_op == O_big)
1946 {
1947 /* PR 16688: Restore the global bignum value. */
1948 gas_assert (pool->literals[entry].bignum != NULL);
1949 memcpy (generic_bignum, pool->literals[entry].bignum,
1950 CHARS_PER_LITTLENUM * exp->X_add_number);
1951 }
1952
1953 /* First output the expression in the instruction to the pool. */
1954 emit_expr (exp, size); /* .word|.xword */
1955
1956 if (exp->X_op == O_big)
1957 {
1958 free (pool->literals[entry].bignum);
1959 pool->literals[entry].bignum = NULL;
1960 }
1961 }
1962
1963 /* Mark the pool as empty. */
1964 pool->next_free_entry = 0;
1965 pool->symbol = NULL;
1966 }
1967 }
1968
1969 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1970 /* Forward declarations for functions below, in the MD interface
1971 section. */
1972 static struct reloc_table_entry * find_reloc_table_entry (char **);
1973
1974 /* Directives: Data. */
1975 /* N.B. the support for relocation suffix in this directive needs to be
1976 implemented properly. */
1977
1978 static void
1979 s_aarch64_cons (int nbytes)
1980 {
1981 expressionS exp;
1982
1983 #ifdef md_flush_pending_output
1984 md_flush_pending_output ();
1985 #endif
1986
1987 if (is_it_end_of_statement ())
1988 {
1989 demand_empty_rest_of_line ();
1990 return;
1991 }
1992
1993 #ifdef md_cons_align
1994 md_cons_align (nbytes);
1995 #endif
1996
1997 mapping_state (MAP_DATA);
1998 do
1999 {
2000 struct reloc_table_entry *reloc;
2001
2002 expression (&exp);
2003
2004 if (exp.X_op != O_symbol)
2005 emit_expr (&exp, (unsigned int) nbytes);
2006 else
2007 {
2008 skip_past_char (&input_line_pointer, '#');
2009 if (skip_past_char (&input_line_pointer, ':'))
2010 {
2011 reloc = find_reloc_table_entry (&input_line_pointer);
2012 if (reloc == NULL)
2013 as_bad (_("unrecognized relocation suffix"));
2014 else
2015 as_bad (_("unimplemented relocation suffix"));
2016 ignore_rest_of_line ();
2017 return;
2018 }
2019 else
2020 emit_expr (&exp, (unsigned int) nbytes);
2021 }
2022 }
2023 while (*input_line_pointer++ == ',');
2024
2025 /* Put terminator back into stream. */
2026 input_line_pointer--;
2027 demand_empty_rest_of_line ();
2028 }
2029 #endif
2030
2031 #ifdef OBJ_ELF
2032 /* Forward declarations for functions below, in the MD interface
2033 section. */
2034 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2035
2036 /* Mark symbol that it follows a variant PCS convention. */
2037
2038 static void
2039 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2040 {
2041 char *name;
2042 char c;
2043 symbolS *sym;
2044 asymbol *bfdsym;
2045 elf_symbol_type *elfsym;
2046
2047 c = get_symbol_name (&name);
2048 if (!*name)
2049 as_bad (_("Missing symbol name in directive"));
2050 sym = symbol_find_or_make (name);
2051 restore_line_pointer (c);
2052 demand_empty_rest_of_line ();
2053 bfdsym = symbol_get_bfdsym (sym);
2054 elfsym = elf_symbol_from (bfdsym);
2055 gas_assert (elfsym);
2056 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2057 }
2058 #endif /* OBJ_ELF */
2059
2060 /* Output a 32-bit word, but mark as an instruction. */
2061
2062 static void
2063 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2064 {
2065 expressionS exp;
2066 unsigned n = 0;
2067
2068 #ifdef md_flush_pending_output
2069 md_flush_pending_output ();
2070 #endif
2071
2072 if (is_it_end_of_statement ())
2073 {
2074 demand_empty_rest_of_line ();
2075 return;
2076 }
2077
2078 /* Sections are assumed to start aligned. In executable section, there is no
2079 MAP_DATA symbol pending. So we only align the address during
2080 MAP_DATA --> MAP_INSN transition.
2081 For other sections, this is not guaranteed. */
2082 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2083 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2084 frag_align_code (2, 0);
2085
2086 #ifdef OBJ_ELF
2087 mapping_state (MAP_INSN);
2088 #endif
2089
2090 do
2091 {
2092 expression (&exp);
2093 if (exp.X_op != O_constant)
2094 {
2095 as_bad (_("constant expression required"));
2096 ignore_rest_of_line ();
2097 return;
2098 }
2099
2100 if (target_big_endian)
2101 {
2102 unsigned int val = exp.X_add_number;
2103 exp.X_add_number = SWAP_32 (val);
2104 }
2105 emit_expr (&exp, INSN_SIZE);
2106 ++n;
2107 }
2108 while (*input_line_pointer++ == ',');
2109
2110 dwarf2_emit_insn (n * INSN_SIZE);
2111
2112 /* Put terminator back into stream. */
2113 input_line_pointer--;
2114 demand_empty_rest_of_line ();
2115 }
2116
2117 static void
2118 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2119 {
2120 demand_empty_rest_of_line ();
2121 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2122 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2123 }
2124
2125 #ifdef OBJ_ELF
2126 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2127
2128 static void
2129 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2130 {
2131 expressionS exp;
2132
2133 expression (&exp);
2134 frag_grow (4);
2135 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2136 BFD_RELOC_AARCH64_TLSDESC_ADD);
2137
2138 demand_empty_rest_of_line ();
2139 }
2140
2141 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2142
2143 static void
2144 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2145 {
2146 expressionS exp;
2147
2148 /* Since we're just labelling the code, there's no need to define a
2149 mapping symbol. */
2150 expression (&exp);
2151 /* Make sure there is enough room in this frag for the following
2152 blr. This trick only works if the blr follows immediately after
2153 the .tlsdesc directive. */
2154 frag_grow (4);
2155 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2156 BFD_RELOC_AARCH64_TLSDESC_CALL);
2157
2158 demand_empty_rest_of_line ();
2159 }
2160
2161 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2162
2163 static void
2164 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2165 {
2166 expressionS exp;
2167
2168 expression (&exp);
2169 frag_grow (4);
2170 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2171 BFD_RELOC_AARCH64_TLSDESC_LDR);
2172
2173 demand_empty_rest_of_line ();
2174 }
2175 #endif /* OBJ_ELF */
2176
2177 #ifdef TE_PE
2178 static void
2179 s_secrel (int dummy ATTRIBUTE_UNUSED)
2180 {
2181 expressionS exp;
2182
2183 do
2184 {
2185 expression (&exp);
2186 if (exp.X_op == O_symbol)
2187 exp.X_op = O_secrel;
2188
2189 emit_expr (&exp, 4);
2190 }
2191 while (*input_line_pointer++ == ',');
2192
2193 input_line_pointer--;
2194 demand_empty_rest_of_line ();
2195 }
2196
2197 void
2198 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2199 {
2200 expressionS exp;
2201
2202 exp.X_op = O_secrel;
2203 exp.X_add_symbol = symbol;
2204 exp.X_add_number = 0;
2205 emit_expr (&exp, size);
2206 }
2207
2208 static void
2209 s_secidx (int dummy ATTRIBUTE_UNUSED)
2210 {
2211 expressionS exp;
2212
2213 do
2214 {
2215 expression (&exp);
2216 if (exp.X_op == O_symbol)
2217 exp.X_op = O_secidx;
2218
2219 emit_expr (&exp, 2);
2220 }
2221 while (*input_line_pointer++ == ',');
2222
2223 input_line_pointer--;
2224 demand_empty_rest_of_line ();
2225 }
2226 #endif /* TE_PE */
2227
2228 static void s_aarch64_arch (int);
2229 static void s_aarch64_cpu (int);
2230 static void s_aarch64_arch_extension (int);
2231
2232 /* This table describes all the machine specific pseudo-ops the assembler
2233 has to support. The fields are:
2234 pseudo-op name without dot
2235 function to call to execute this pseudo-op
2236 Integer arg to pass to the function. */
2237
2238 const pseudo_typeS md_pseudo_table[] = {
2239 /* Never called because '.req' does not start a line. */
2240 {"req", s_req, 0},
2241 {"unreq", s_unreq, 0},
2242 {"bss", s_bss, 0},
2243 {"even", s_even, 0},
2244 {"ltorg", s_ltorg, 0},
2245 {"pool", s_ltorg, 0},
2246 {"cpu", s_aarch64_cpu, 0},
2247 {"arch", s_aarch64_arch, 0},
2248 {"arch_extension", s_aarch64_arch_extension, 0},
2249 {"inst", s_aarch64_inst, 0},
2250 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2251 #ifdef OBJ_ELF
2252 {"tlsdescadd", s_tlsdescadd, 0},
2253 {"tlsdesccall", s_tlsdesccall, 0},
2254 {"tlsdescldr", s_tlsdescldr, 0},
2255 {"variant_pcs", s_variant_pcs, 0},
2256 #endif
2257 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2258 {"word", s_aarch64_cons, 4},
2259 {"long", s_aarch64_cons, 4},
2260 {"xword", s_aarch64_cons, 8},
2261 {"dword", s_aarch64_cons, 8},
2262 #endif
2263 #ifdef TE_PE
2264 {"secrel32", s_secrel, 0},
2265 {"secidx", s_secidx, 0},
2266 #endif
2267 {"float16", float_cons, 'h'},
2268 {"bfloat16", float_cons, 'b'},
2269 {0, 0, 0}
2270 };
2271 \f
2272
2273 /* Check whether STR points to a register name followed by a comma or the
2274 end of line; REG_TYPE indicates which register types are checked
2275 against. Return TRUE if STR is such a register name; otherwise return
2276 FALSE. The function does not intend to produce any diagnostics, but since
2277 the register parser aarch64_reg_parse, which is called by this function,
2278 does produce diagnostics, we call clear_error to clear any diagnostics
2279 that may be generated by aarch64_reg_parse.
2280 Also, the function returns FALSE directly if there is any user error
2281 present at the function entry. This prevents the existing diagnostics
2282 state from being spoiled.
2283 The function currently serves parse_constant_immediate and
2284 parse_big_immediate only. */
2285 static bool
2286 reg_name_p (char *str, aarch64_reg_type reg_type)
2287 {
2288 const reg_entry *reg;
2289
2290 /* Prevent the diagnostics state from being spoiled. */
2291 if (error_p ())
2292 return false;
2293
2294 reg = aarch64_reg_parse (&str, reg_type, NULL);
2295
2296 /* Clear the parsing error that may be set by the reg parser. */
2297 clear_error ();
2298
2299 if (!reg)
2300 return false;
2301
2302 skip_whitespace (str);
2303 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2304 return true;
2305
2306 return false;
2307 }
2308
2309 /* Parser functions used exclusively in instruction operands. */
2310
2311 /* Parse an immediate expression which may not be constant.
2312
2313 To prevent the expression parser from pushing a register name
2314 into the symbol table as an undefined symbol, firstly a check is
2315 done to find out whether STR is a register of type REG_TYPE followed
2316 by a comma or the end of line. Return FALSE if STR is such a string. */
2317
2318 static bool
2319 parse_immediate_expression (char **str, expressionS *exp,
2320 aarch64_reg_type reg_type)
2321 {
2322 if (reg_name_p (*str, reg_type))
2323 {
2324 set_recoverable_error (_("immediate operand required"));
2325 return false;
2326 }
2327
2328 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2329
2330 if (exp->X_op == O_absent)
2331 {
2332 set_fatal_syntax_error (_("missing immediate expression"));
2333 return false;
2334 }
2335
2336 return true;
2337 }
2338
2339 /* Constant immediate-value read function for use in insn parsing.
2340 STR points to the beginning of the immediate (with the optional
2341 leading #); *VAL receives the value. REG_TYPE says which register
2342 names should be treated as registers rather than as symbolic immediates.
2343
2344 Return TRUE on success; otherwise return FALSE. */
2345
2346 static bool
2347 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2348 {
2349 expressionS exp;
2350
2351 if (! parse_immediate_expression (str, &exp, reg_type))
2352 return false;
2353
2354 if (exp.X_op != O_constant)
2355 {
2356 set_syntax_error (_("constant expression required"));
2357 return false;
2358 }
2359
2360 *val = exp.X_add_number;
2361 return true;
2362 }
2363
2364 static uint32_t
2365 encode_imm_float_bits (uint32_t imm)
2366 {
2367 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2368 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2369 }
2370
2371 /* Return TRUE if the single-precision floating-point value encoded in IMM
2372 can be expressed in the AArch64 8-bit signed floating-point format with
2373 3-bit exponent and normalized 4 bits of precision; in other words, the
2374 floating-point value must be expressable as
2375 (+/-) n / 16 * power (2, r)
2376 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2377
2378 static bool
2379 aarch64_imm_float_p (uint32_t imm)
2380 {
2381 /* If a single-precision floating-point value has the following bit
2382 pattern, it can be expressed in the AArch64 8-bit floating-point
2383 format:
2384
2385 3 32222222 2221111111111
2386 1 09876543 21098765432109876543210
2387 n Eeeeeexx xxxx0000000000000000000
2388
2389 where n, e and each x are either 0 or 1 independently, with
2390 E == ~ e. */
2391
2392 uint32_t pattern;
2393
2394 /* Prepare the pattern for 'Eeeeee'. */
2395 if (((imm >> 30) & 0x1) == 0)
2396 pattern = 0x3e000000;
2397 else
2398 pattern = 0x40000000;
2399
2400 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2401 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2402 }
2403
2404 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2405 as an IEEE float without any loss of precision. Store the value in
2406 *FPWORD if so. */
2407
2408 static bool
2409 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2410 {
2411 /* If a double-precision floating-point value has the following bit
2412 pattern, it can be expressed in a float:
2413
2414 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2415 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2416 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2417
2418 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2419 if Eeee_eeee != 1111_1111
2420
2421 where n, e, s and S are either 0 or 1 independently and where ~ is the
2422 inverse of E. */
2423
2424 uint32_t pattern;
2425 uint32_t high32 = imm >> 32;
2426 uint32_t low32 = imm;
2427
2428 /* Lower 29 bits need to be 0s. */
2429 if ((imm & 0x1fffffff) != 0)
2430 return false;
2431
2432 /* Prepare the pattern for 'Eeeeeeeee'. */
2433 if (((high32 >> 30) & 0x1) == 0)
2434 pattern = 0x38000000;
2435 else
2436 pattern = 0x40000000;
2437
2438 /* Check E~~~. */
2439 if ((high32 & 0x78000000) != pattern)
2440 return false;
2441
2442 /* Check Eeee_eeee != 1111_1111. */
2443 if ((high32 & 0x7ff00000) == 0x47f00000)
2444 return false;
2445
2446 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2447 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2448 | (low32 >> 29)); /* 3 S bits. */
2449 return true;
2450 }
2451
2452 /* Return true if we should treat OPERAND as a double-precision
2453 floating-point operand rather than a single-precision one. */
2454 static bool
2455 double_precision_operand_p (const aarch64_opnd_info *operand)
2456 {
2457 /* Check for unsuffixed SVE registers, which are allowed
2458 for LDR and STR but not in instructions that require an
2459 immediate. We get better error messages if we arbitrarily
2460 pick one size, parse the immediate normally, and then
2461 report the match failure in the normal way. */
2462 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2463 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2464 }
2465
2466 /* Parse a floating-point immediate. Return TRUE on success and return the
2467 value in *IMMED in the format of IEEE754 single-precision encoding.
2468 *CCP points to the start of the string; DP_P is TRUE when the immediate
2469 is expected to be in double-precision (N.B. this only matters when
2470 hexadecimal representation is involved). REG_TYPE says which register
2471 names should be treated as registers rather than as symbolic immediates.
2472
2473 This routine accepts any IEEE float; it is up to the callers to reject
2474 invalid ones. */
2475
2476 static bool
2477 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2478 aarch64_reg_type reg_type)
2479 {
2480 char *str = *ccp;
2481 char *fpnum;
2482 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2483 int64_t val = 0;
2484 unsigned fpword = 0;
2485 bool hex_p = false;
2486
2487 skip_past_char (&str, '#');
2488
2489 fpnum = str;
2490 skip_whitespace (fpnum);
2491
2492 if (startswith (fpnum, "0x"))
2493 {
2494 /* Support the hexadecimal representation of the IEEE754 encoding.
2495 Double-precision is expected when DP_P is TRUE, otherwise the
2496 representation should be in single-precision. */
2497 if (! parse_constant_immediate (&str, &val, reg_type))
2498 goto invalid_fp;
2499
2500 if (dp_p)
2501 {
2502 if (!can_convert_double_to_float (val, &fpword))
2503 goto invalid_fp;
2504 }
2505 else if ((uint64_t) val > 0xffffffff)
2506 goto invalid_fp;
2507 else
2508 fpword = val;
2509
2510 hex_p = true;
2511 }
2512 else if (reg_name_p (str, reg_type))
2513 {
2514 set_recoverable_error (_("immediate operand required"));
2515 return false;
2516 }
2517
2518 if (! hex_p)
2519 {
2520 int i;
2521
2522 if ((str = atof_ieee (str, 's', words)) == NULL)
2523 goto invalid_fp;
2524
2525 /* Our FP word must be 32 bits (single-precision FP). */
2526 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2527 {
2528 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2529 fpword |= words[i];
2530 }
2531 }
2532
2533 *immed = fpword;
2534 *ccp = str;
2535 return true;
2536
2537 invalid_fp:
2538 set_fatal_syntax_error (_("invalid floating-point constant"));
2539 return false;
2540 }
2541
2542 /* Less-generic immediate-value read function with the possibility of loading
2543 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2544 instructions.
2545
2546 To prevent the expression parser from pushing a register name into the
2547 symbol table as an undefined symbol, a check is firstly done to find
2548 out whether STR is a register of type REG_TYPE followed by a comma or
2549 the end of line. Return FALSE if STR is such a register. */
2550
2551 static bool
2552 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2553 {
2554 char *ptr = *str;
2555
2556 if (reg_name_p (ptr, reg_type))
2557 {
2558 set_syntax_error (_("immediate operand required"));
2559 return false;
2560 }
2561
2562 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2563
2564 if (inst.reloc.exp.X_op == O_constant)
2565 *imm = inst.reloc.exp.X_add_number;
2566
2567 *str = ptr;
2568
2569 return true;
2570 }
2571
2572 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2573 if NEED_LIBOPCODES is non-zero, the fixup will need
2574 assistance from the libopcodes. */
2575
2576 static inline void
2577 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2578 const aarch64_opnd_info *operand,
2579 int need_libopcodes_p)
2580 {
2581 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2582 reloc->opnd = operand->type;
2583 if (need_libopcodes_p)
2584 reloc->need_libopcodes_p = 1;
2585 };
2586
2587 /* Return TRUE if the instruction needs to be fixed up later internally by
2588 the GAS; otherwise return FALSE. */
2589
2590 static inline bool
2591 aarch64_gas_internal_fixup_p (void)
2592 {
2593 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2594 }
2595
2596 /* Assign the immediate value to the relevant field in *OPERAND if
2597 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2598 needs an internal fixup in a later stage.
2599 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2600 IMM.VALUE that may get assigned with the constant. */
2601 static inline void
2602 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2603 aarch64_opnd_info *operand,
2604 int addr_off_p,
2605 int need_libopcodes_p,
2606 int skip_p)
2607 {
2608 if (reloc->exp.X_op == O_constant)
2609 {
2610 if (addr_off_p)
2611 operand->addr.offset.imm = reloc->exp.X_add_number;
2612 else
2613 operand->imm.value = reloc->exp.X_add_number;
2614 reloc->type = BFD_RELOC_UNUSED;
2615 }
2616 else
2617 {
2618 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2619 /* Tell libopcodes to ignore this operand or not. This is helpful
2620 when one of the operands needs to be fixed up later but we need
2621 libopcodes to check the other operands. */
2622 operand->skip = skip_p;
2623 }
2624 }
2625
2626 /* Relocation modifiers. Each entry in the table contains the textual
2627 name for the relocation which may be placed before a symbol used as
2628 a load/store offset, or add immediate. It must be surrounded by a
2629 leading and trailing colon, for example:
2630
2631 ldr x0, [x1, #:rello:varsym]
2632 add x0, x1, #:rello:varsym */
2633
2634 struct reloc_table_entry
2635 {
2636 const char *name;
2637 int pc_rel;
2638 bfd_reloc_code_real_type adr_type;
2639 bfd_reloc_code_real_type adrp_type;
2640 bfd_reloc_code_real_type movw_type;
2641 bfd_reloc_code_real_type add_type;
2642 bfd_reloc_code_real_type ldst_type;
2643 bfd_reloc_code_real_type ld_literal_type;
2644 };
2645
2646 static struct reloc_table_entry reloc_table[] =
2647 {
2648 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2649 {"lo12", 0,
2650 0, /* adr_type */
2651 0,
2652 0,
2653 BFD_RELOC_AARCH64_ADD_LO12,
2654 BFD_RELOC_AARCH64_LDST_LO12,
2655 0},
2656
2657 /* Higher 21 bits of pc-relative page offset: ADRP */
2658 {"pg_hi21", 1,
2659 0, /* adr_type */
2660 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2661 0,
2662 0,
2663 0,
2664 0},
2665
2666 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2667 {"pg_hi21_nc", 1,
2668 0, /* adr_type */
2669 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2670 0,
2671 0,
2672 0,
2673 0},
2674
2675 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2676 {"abs_g0", 0,
2677 0, /* adr_type */
2678 0,
2679 BFD_RELOC_AARCH64_MOVW_G0,
2680 0,
2681 0,
2682 0},
2683
2684 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2685 {"abs_g0_s", 0,
2686 0, /* adr_type */
2687 0,
2688 BFD_RELOC_AARCH64_MOVW_G0_S,
2689 0,
2690 0,
2691 0},
2692
2693 /* Less significant bits 0-15 of address/value: MOVK, no check */
2694 {"abs_g0_nc", 0,
2695 0, /* adr_type */
2696 0,
2697 BFD_RELOC_AARCH64_MOVW_G0_NC,
2698 0,
2699 0,
2700 0},
2701
2702 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2703 {"abs_g1", 0,
2704 0, /* adr_type */
2705 0,
2706 BFD_RELOC_AARCH64_MOVW_G1,
2707 0,
2708 0,
2709 0},
2710
2711 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2712 {"abs_g1_s", 0,
2713 0, /* adr_type */
2714 0,
2715 BFD_RELOC_AARCH64_MOVW_G1_S,
2716 0,
2717 0,
2718 0},
2719
2720 /* Less significant bits 16-31 of address/value: MOVK, no check */
2721 {"abs_g1_nc", 0,
2722 0, /* adr_type */
2723 0,
2724 BFD_RELOC_AARCH64_MOVW_G1_NC,
2725 0,
2726 0,
2727 0},
2728
2729 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2730 {"abs_g2", 0,
2731 0, /* adr_type */
2732 0,
2733 BFD_RELOC_AARCH64_MOVW_G2,
2734 0,
2735 0,
2736 0},
2737
2738 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2739 {"abs_g2_s", 0,
2740 0, /* adr_type */
2741 0,
2742 BFD_RELOC_AARCH64_MOVW_G2_S,
2743 0,
2744 0,
2745 0},
2746
2747 /* Less significant bits 32-47 of address/value: MOVK, no check */
2748 {"abs_g2_nc", 0,
2749 0, /* adr_type */
2750 0,
2751 BFD_RELOC_AARCH64_MOVW_G2_NC,
2752 0,
2753 0,
2754 0},
2755
2756 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2757 {"abs_g3", 0,
2758 0, /* adr_type */
2759 0,
2760 BFD_RELOC_AARCH64_MOVW_G3,
2761 0,
2762 0,
2763 0},
2764
2765 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2766 {"prel_g0", 1,
2767 0, /* adr_type */
2768 0,
2769 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2770 0,
2771 0,
2772 0},
2773
2774 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2775 {"prel_g0_nc", 1,
2776 0, /* adr_type */
2777 0,
2778 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2779 0,
2780 0,
2781 0},
2782
2783 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2784 {"prel_g1", 1,
2785 0, /* adr_type */
2786 0,
2787 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2788 0,
2789 0,
2790 0},
2791
2792 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2793 {"prel_g1_nc", 1,
2794 0, /* adr_type */
2795 0,
2796 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2797 0,
2798 0,
2799 0},
2800
2801 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2802 {"prel_g2", 1,
2803 0, /* adr_type */
2804 0,
2805 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2806 0,
2807 0,
2808 0},
2809
2810 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2811 {"prel_g2_nc", 1,
2812 0, /* adr_type */
2813 0,
2814 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2815 0,
2816 0,
2817 0},
2818
2819 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2820 {"prel_g3", 1,
2821 0, /* adr_type */
2822 0,
2823 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2824 0,
2825 0,
2826 0},
2827
2828 /* Get to the page containing GOT entry for a symbol. */
2829 {"got", 1,
2830 0, /* adr_type */
2831 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2832 0,
2833 0,
2834 0,
2835 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2836
2837 /* 12 bit offset into the page containing GOT entry for that symbol. */
2838 {"got_lo12", 0,
2839 0, /* adr_type */
2840 0,
2841 0,
2842 0,
2843 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2844 0},
2845
2846 /* 0-15 bits of address/value: MOVk, no check. */
2847 {"gotoff_g0_nc", 0,
2848 0, /* adr_type */
2849 0,
2850 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2851 0,
2852 0,
2853 0},
2854
2855 /* Most significant bits 16-31 of address/value: MOVZ. */
2856 {"gotoff_g1", 0,
2857 0, /* adr_type */
2858 0,
2859 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2860 0,
2861 0,
2862 0},
2863
2864 /* 15 bit offset into the page containing GOT entry for that symbol. */
2865 {"gotoff_lo15", 0,
2866 0, /* adr_type */
2867 0,
2868 0,
2869 0,
2870 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2871 0},
2872
2873 /* Get to the page containing GOT TLS entry for a symbol */
2874 {"gottprel_g0_nc", 0,
2875 0, /* adr_type */
2876 0,
2877 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2878 0,
2879 0,
2880 0},
2881
2882 /* Get to the page containing GOT TLS entry for a symbol */
2883 {"gottprel_g1", 0,
2884 0, /* adr_type */
2885 0,
2886 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2887 0,
2888 0,
2889 0},
2890
2891 /* Get to the page containing GOT TLS entry for a symbol */
2892 {"tlsgd", 0,
2893 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2894 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2895 0,
2896 0,
2897 0,
2898 0},
2899
2900 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2901 {"tlsgd_lo12", 0,
2902 0, /* adr_type */
2903 0,
2904 0,
2905 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2906 0,
2907 0},
2908
2909 /* Lower 16 bits address/value: MOVk. */
2910 {"tlsgd_g0_nc", 0,
2911 0, /* adr_type */
2912 0,
2913 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2914 0,
2915 0,
2916 0},
2917
2918 /* Most significant bits 16-31 of address/value: MOVZ. */
2919 {"tlsgd_g1", 0,
2920 0, /* adr_type */
2921 0,
2922 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2923 0,
2924 0,
2925 0},
2926
2927 /* Get to the page containing GOT TLS entry for a symbol */
2928 {"tlsdesc", 0,
2929 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2930 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2931 0,
2932 0,
2933 0,
2934 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2935
2936 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2937 {"tlsdesc_lo12", 0,
2938 0, /* adr_type */
2939 0,
2940 0,
2941 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2942 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2943 0},
2944
2945 /* Get to the page containing GOT TLS entry for a symbol.
2946 The same as GD, we allocate two consecutive GOT slots
2947 for module index and module offset, the only difference
2948 with GD is the module offset should be initialized to
2949 zero without any outstanding runtime relocation. */
2950 {"tlsldm", 0,
2951 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2952 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2953 0,
2954 0,
2955 0,
2956 0},
2957
2958 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2959 {"tlsldm_lo12_nc", 0,
2960 0, /* adr_type */
2961 0,
2962 0,
2963 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2964 0,
2965 0},
2966
2967 /* 12 bit offset into the module TLS base address. */
2968 {"dtprel_lo12", 0,
2969 0, /* adr_type */
2970 0,
2971 0,
2972 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2973 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2974 0},
2975
2976 /* Same as dtprel_lo12, no overflow check. */
2977 {"dtprel_lo12_nc", 0,
2978 0, /* adr_type */
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2982 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2983 0},
2984
2985 /* bits[23:12] of offset to the module TLS base address. */
2986 {"dtprel_hi12", 0,
2987 0, /* adr_type */
2988 0,
2989 0,
2990 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2991 0,
2992 0},
2993
2994 /* bits[15:0] of offset to the module TLS base address. */
2995 {"dtprel_g0", 0,
2996 0, /* adr_type */
2997 0,
2998 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2999 0,
3000 0,
3001 0},
3002
3003 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3004 {"dtprel_g0_nc", 0,
3005 0, /* adr_type */
3006 0,
3007 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3008 0,
3009 0,
3010 0},
3011
3012 /* bits[31:16] of offset to the module TLS base address. */
3013 {"dtprel_g1", 0,
3014 0, /* adr_type */
3015 0,
3016 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3017 0,
3018 0,
3019 0},
3020
3021 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3022 {"dtprel_g1_nc", 0,
3023 0, /* adr_type */
3024 0,
3025 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3026 0,
3027 0,
3028 0},
3029
3030 /* bits[47:32] of offset to the module TLS base address. */
3031 {"dtprel_g2", 0,
3032 0, /* adr_type */
3033 0,
3034 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3035 0,
3036 0,
3037 0},
3038
3039 /* Lower 16 bit offset into GOT entry for a symbol */
3040 {"tlsdesc_off_g0_nc", 0,
3041 0, /* adr_type */
3042 0,
3043 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3044 0,
3045 0,
3046 0},
3047
3048 /* Higher 16 bit offset into GOT entry for a symbol */
3049 {"tlsdesc_off_g1", 0,
3050 0, /* adr_type */
3051 0,
3052 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3053 0,
3054 0,
3055 0},
3056
3057 /* Get to the page containing GOT TLS entry for a symbol */
3058 {"gottprel", 0,
3059 0, /* adr_type */
3060 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3061 0,
3062 0,
3063 0,
3064 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3065
3066 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3067 {"gottprel_lo12", 0,
3068 0, /* adr_type */
3069 0,
3070 0,
3071 0,
3072 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3073 0},
3074
3075 /* Get tp offset for a symbol. */
3076 {"tprel", 0,
3077 0, /* adr_type */
3078 0,
3079 0,
3080 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3081 0,
3082 0},
3083
3084 /* Get tp offset for a symbol. */
3085 {"tprel_lo12", 0,
3086 0, /* adr_type */
3087 0,
3088 0,
3089 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3090 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3091 0},
3092
3093 /* Get tp offset for a symbol. */
3094 {"tprel_hi12", 0,
3095 0, /* adr_type */
3096 0,
3097 0,
3098 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3099 0,
3100 0},
3101
3102 /* Get tp offset for a symbol. */
3103 {"tprel_lo12_nc", 0,
3104 0, /* adr_type */
3105 0,
3106 0,
3107 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3108 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3109 0},
3110
3111 /* Most significant bits 32-47 of address/value: MOVZ. */
3112 {"tprel_g2", 0,
3113 0, /* adr_type */
3114 0,
3115 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3116 0,
3117 0,
3118 0},
3119
3120 /* Most significant bits 16-31 of address/value: MOVZ. */
3121 {"tprel_g1", 0,
3122 0, /* adr_type */
3123 0,
3124 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3125 0,
3126 0,
3127 0},
3128
3129 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3130 {"tprel_g1_nc", 0,
3131 0, /* adr_type */
3132 0,
3133 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3134 0,
3135 0,
3136 0},
3137
3138 /* Most significant bits 0-15 of address/value: MOVZ. */
3139 {"tprel_g0", 0,
3140 0, /* adr_type */
3141 0,
3142 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3143 0,
3144 0,
3145 0},
3146
3147 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3148 {"tprel_g0_nc", 0,
3149 0, /* adr_type */
3150 0,
3151 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3152 0,
3153 0,
3154 0},
3155
3156 /* 15bit offset from got entry to base address of GOT table. */
3157 {"gotpage_lo15", 0,
3158 0,
3159 0,
3160 0,
3161 0,
3162 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3163 0},
3164
3165 /* 14bit offset from got entry to base address of GOT table. */
3166 {"gotpage_lo14", 0,
3167 0,
3168 0,
3169 0,
3170 0,
3171 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3172 0},
3173 };
3174
3175 /* Given the address of a pointer pointing to the textual name of a
3176 relocation as may appear in assembler source, attempt to find its
3177 details in reloc_table. The pointer will be updated to the character
3178 after the trailing colon. On failure, NULL will be returned;
3179 otherwise return the reloc_table_entry. */
3180
3181 static struct reloc_table_entry *
3182 find_reloc_table_entry (char **str)
3183 {
3184 unsigned int i;
3185 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3186 {
3187 int length = strlen (reloc_table[i].name);
3188
3189 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3190 && (*str)[length] == ':')
3191 {
3192 *str += (length + 1);
3193 return &reloc_table[i];
3194 }
3195 }
3196
3197 return NULL;
3198 }
3199
3200 /* Returns 0 if the relocation should never be forced,
3201 1 if the relocation must be forced, and -1 if either
3202 result is OK. */
3203
3204 static signed int
3205 aarch64_force_reloc (unsigned int type)
3206 {
3207 switch (type)
3208 {
3209 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3210 /* Perform these "immediate" internal relocations
3211 even if the symbol is extern or weak. */
3212 return 0;
3213
3214 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3215 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3216 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3217 /* Pseudo relocs that need to be fixed up according to
3218 ilp32_p. */
3219 return 1;
3220
3221 case BFD_RELOC_AARCH64_ADD_LO12:
3222 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3223 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3224 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3225 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3226 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3227 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3228 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3229 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3230 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3231 case BFD_RELOC_AARCH64_LDST128_LO12:
3232 case BFD_RELOC_AARCH64_LDST16_LO12:
3233 case BFD_RELOC_AARCH64_LDST32_LO12:
3234 case BFD_RELOC_AARCH64_LDST64_LO12:
3235 case BFD_RELOC_AARCH64_LDST8_LO12:
3236 case BFD_RELOC_AARCH64_LDST_LO12:
3237 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3238 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3239 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3240 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3241 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3242 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3243 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3244 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3245 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3246 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3247 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3248 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3249 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3250 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3251 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3252 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3253 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3254 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3255 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3256 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3257 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3258 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3259 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3260 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3261 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3262 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3263 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3264 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3265 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3266 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3267 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3268 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3269 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3270 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3271 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3272 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3273 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3274 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3275 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3276 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3277 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3278 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3279 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3280 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3281 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3282 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3283 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3284 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3285 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3286 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3287 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3288 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3289 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3290 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3291 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3292 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3293 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3294 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3295 /* Always leave these relocations for the linker. */
3296 return 1;
3297
3298 default:
3299 return -1;
3300 }
3301 }
3302
3303 int
3304 aarch64_force_relocation (struct fix *fixp)
3305 {
3306 int res = aarch64_force_reloc (fixp->fx_r_type);
3307
3308 if (res == -1)
3309 return generic_force_reloc (fixp);
3310 return res;
3311 }
3312
3313 /* Mode argument to parse_shift and parser_shifter_operand. */
3314 enum parse_shift_mode
3315 {
3316 SHIFTED_NONE, /* no shifter allowed */
3317 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3318 "#imm{,lsl #n}" */
3319 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3320 "#imm" */
3321 SHIFTED_LSL, /* bare "lsl #n" */
3322 SHIFTED_MUL, /* bare "mul #n" */
3323 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3324 SHIFTED_MUL_VL, /* "mul vl" */
3325 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3326 };
3327
3328 /* Parse a <shift> operator on an AArch64 data processing instruction.
3329 Return TRUE on success; otherwise return FALSE. */
3330 static bool
3331 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3332 {
3333 const struct aarch64_name_value_pair *shift_op;
3334 enum aarch64_modifier_kind kind;
3335 expressionS exp;
3336 int exp_has_prefix;
3337 char *s = *str;
3338 char *p = s;
3339
3340 for (p = *str; ISALPHA (*p); p++)
3341 ;
3342
3343 if (p == *str)
3344 {
3345 set_syntax_error (_("shift expression expected"));
3346 return false;
3347 }
3348
3349 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3350
3351 if (shift_op == NULL)
3352 {
3353 set_syntax_error (_("shift operator expected"));
3354 return false;
3355 }
3356
3357 kind = aarch64_get_operand_modifier (shift_op);
3358
3359 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3360 {
3361 set_syntax_error (_("invalid use of 'MSL'"));
3362 return false;
3363 }
3364
3365 if (kind == AARCH64_MOD_MUL
3366 && mode != SHIFTED_MUL
3367 && mode != SHIFTED_MUL_VL)
3368 {
3369 set_syntax_error (_("invalid use of 'MUL'"));
3370 return false;
3371 }
3372
3373 switch (mode)
3374 {
3375 case SHIFTED_LOGIC_IMM:
3376 if (aarch64_extend_operator_p (kind))
3377 {
3378 set_syntax_error (_("extending shift is not permitted"));
3379 return false;
3380 }
3381 break;
3382
3383 case SHIFTED_ARITH_IMM:
3384 if (kind == AARCH64_MOD_ROR)
3385 {
3386 set_syntax_error (_("'ROR' shift is not permitted"));
3387 return false;
3388 }
3389 break;
3390
3391 case SHIFTED_LSL:
3392 if (kind != AARCH64_MOD_LSL)
3393 {
3394 set_syntax_error (_("only 'LSL' shift is permitted"));
3395 return false;
3396 }
3397 break;
3398
3399 case SHIFTED_MUL:
3400 if (kind != AARCH64_MOD_MUL)
3401 {
3402 set_syntax_error (_("only 'MUL' is permitted"));
3403 return false;
3404 }
3405 break;
3406
3407 case SHIFTED_MUL_VL:
3408 /* "MUL VL" consists of two separate tokens. Require the first
3409 token to be "MUL" and look for a following "VL". */
3410 if (kind == AARCH64_MOD_MUL)
3411 {
3412 skip_whitespace (p);
3413 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3414 {
3415 p += 2;
3416 kind = AARCH64_MOD_MUL_VL;
3417 break;
3418 }
3419 }
3420 set_syntax_error (_("only 'MUL VL' is permitted"));
3421 return false;
3422
3423 case SHIFTED_REG_OFFSET:
3424 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3425 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3426 {
3427 set_fatal_syntax_error
3428 (_("invalid shift for the register offset addressing mode"));
3429 return false;
3430 }
3431 break;
3432
3433 case SHIFTED_LSL_MSL:
3434 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3435 {
3436 set_syntax_error (_("invalid shift operator"));
3437 return false;
3438 }
3439 break;
3440
3441 default:
3442 abort ();
3443 }
3444
3445 /* Whitespace can appear here if the next thing is a bare digit. */
3446 skip_whitespace (p);
3447
3448 /* Parse shift amount. */
3449 exp_has_prefix = 0;
3450 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3451 exp.X_op = O_absent;
3452 else
3453 {
3454 if (is_immediate_prefix (*p))
3455 {
3456 p++;
3457 exp_has_prefix = 1;
3458 }
3459 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3460 }
3461 if (kind == AARCH64_MOD_MUL_VL)
3462 /* For consistency, give MUL VL the same shift amount as an implicit
3463 MUL #1. */
3464 operand->shifter.amount = 1;
3465 else if (exp.X_op == O_absent)
3466 {
3467 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3468 {
3469 set_syntax_error (_("missing shift amount"));
3470 return false;
3471 }
3472 operand->shifter.amount = 0;
3473 }
3474 else if (exp.X_op != O_constant)
3475 {
3476 set_syntax_error (_("constant shift amount required"));
3477 return false;
3478 }
3479 /* For parsing purposes, MUL #n has no inherent range. The range
3480 depends on the operand and will be checked by operand-specific
3481 routines. */
3482 else if (kind != AARCH64_MOD_MUL
3483 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3484 {
3485 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3486 return false;
3487 }
3488 else
3489 {
3490 operand->shifter.amount = exp.X_add_number;
3491 operand->shifter.amount_present = 1;
3492 }
3493
3494 operand->shifter.operator_present = 1;
3495 operand->shifter.kind = kind;
3496
3497 *str = p;
3498 return true;
3499 }
3500
3501 /* Parse a <shifter_operand> for a data processing instruction:
3502
3503 #<immediate>
3504 #<immediate>, LSL #imm
3505
3506 Validation of immediate operands is deferred to md_apply_fix.
3507
3508 Return TRUE on success; otherwise return FALSE. */
3509
3510 static bool
3511 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3512 enum parse_shift_mode mode)
3513 {
3514 char *p;
3515
3516 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3517 return false;
3518
3519 p = *str;
3520
3521 /* Accept an immediate expression. */
3522 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3523 REJECT_ABSENT))
3524 return false;
3525
3526 /* Accept optional LSL for arithmetic immediate values. */
3527 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3528 if (! parse_shift (&p, operand, SHIFTED_LSL))
3529 return false;
3530
3531 /* Not accept any shifter for logical immediate values. */
3532 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3533 && parse_shift (&p, operand, mode))
3534 {
3535 set_syntax_error (_("unexpected shift operator"));
3536 return false;
3537 }
3538
3539 *str = p;
3540 return true;
3541 }
3542
3543 /* Parse a <shifter_operand> for a data processing instruction:
3544
3545 <Rm>
3546 <Rm>, <shift>
3547 #<immediate>
3548 #<immediate>, LSL #imm
3549
3550 where <shift> is handled by parse_shift above, and the last two
3551 cases are handled by the function above.
3552
3553 Validation of immediate operands is deferred to md_apply_fix.
3554
3555 Return TRUE on success; otherwise return FALSE. */
3556
3557 static bool
3558 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3559 enum parse_shift_mode mode)
3560 {
3561 const reg_entry *reg;
3562 aarch64_opnd_qualifier_t qualifier;
3563 enum aarch64_operand_class opd_class
3564 = aarch64_get_operand_class (operand->type);
3565
3566 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3567 if (reg)
3568 {
3569 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3570 {
3571 set_syntax_error (_("unexpected register in the immediate operand"));
3572 return false;
3573 }
3574
3575 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3576 {
3577 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3578 return false;
3579 }
3580
3581 operand->reg.regno = reg->number;
3582 operand->qualifier = qualifier;
3583
3584 /* Accept optional shift operation on register. */
3585 if (! skip_past_comma (str))
3586 return true;
3587
3588 if (! parse_shift (str, operand, mode))
3589 return false;
3590
3591 return true;
3592 }
3593 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3594 {
3595 set_syntax_error
3596 (_("integer register expected in the extended/shifted operand "
3597 "register"));
3598 return false;
3599 }
3600
3601 /* We have a shifted immediate variable. */
3602 return parse_shifter_operand_imm (str, operand, mode);
3603 }
3604
3605 /* Return TRUE on success; return FALSE otherwise. */
3606
3607 static bool
3608 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3609 enum parse_shift_mode mode)
3610 {
3611 char *p = *str;
3612
3613 /* Determine if we have the sequence of characters #: or just :
3614 coming next. If we do, then we check for a :rello: relocation
3615 modifier. If we don't, punt the whole lot to
3616 parse_shifter_operand. */
3617
3618 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3619 {
3620 struct reloc_table_entry *entry;
3621
3622 if (p[0] == '#')
3623 p += 2;
3624 else
3625 p++;
3626 *str = p;
3627
3628 /* Try to parse a relocation. Anything else is an error. */
3629 if (!(entry = find_reloc_table_entry (str)))
3630 {
3631 set_syntax_error (_("unknown relocation modifier"));
3632 return false;
3633 }
3634
3635 if (entry->add_type == 0)
3636 {
3637 set_syntax_error
3638 (_("this relocation modifier is not allowed on this instruction"));
3639 return false;
3640 }
3641
3642 /* Save str before we decompose it. */
3643 p = *str;
3644
3645 /* Next, we parse the expression. */
3646 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3647 REJECT_ABSENT))
3648 return false;
3649
3650 /* Record the relocation type (use the ADD variant here). */
3651 inst.reloc.type = entry->add_type;
3652 inst.reloc.pc_rel = entry->pc_rel;
3653
3654 /* If str is empty, we've reached the end, stop here. */
3655 if (**str == '\0')
3656 return true;
3657
3658 /* Otherwise, we have a shifted reloc modifier, so rewind to
3659 recover the variable name and continue parsing for the shifter. */
3660 *str = p;
3661 return parse_shifter_operand_imm (str, operand, mode);
3662 }
3663
3664 return parse_shifter_operand (str, operand, mode);
3665 }
3666
3667 /* Parse all forms of an address expression. Information is written
3668 to *OPERAND and/or inst.reloc.
3669
3670 The A64 instruction set has the following addressing modes:
3671
3672 Offset
3673 [base] // in SIMD ld/st structure
3674 [base{,#0}] // in ld/st exclusive
3675 [base{,#imm}]
3676 [base,Xm{,LSL #imm}]
3677 [base,Xm,SXTX {#imm}]
3678 [base,Wm,(S|U)XTW {#imm}]
3679 Pre-indexed
3680 [base]! // in ldraa/ldrab exclusive
3681 [base,#imm]!
3682 Post-indexed
3683 [base],#imm
3684 [base],Xm // in SIMD ld/st structure
3685 PC-relative (literal)
3686 label
3687 SVE:
3688 [base,#imm,MUL VL]
3689 [base,Zm.D{,LSL #imm}]
3690 [base,Zm.S,(S|U)XTW {#imm}]
3691 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3692 [Zn.S,#imm]
3693 [Zn.D,#imm]
3694 [Zn.S{, Xm}]
3695 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3696 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3697 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3698
3699 (As a convenience, the notation "=immediate" is permitted in conjunction
3700 with the pc-relative literal load instructions to automatically place an
3701 immediate value or symbolic address in a nearby literal pool and generate
3702 a hidden label which references it.)
3703
3704 Upon a successful parsing, the address structure in *OPERAND will be
3705 filled in the following way:
3706
3707 .base_regno = <base>
3708 .offset.is_reg // 1 if the offset is a register
3709 .offset.imm = <imm>
3710 .offset.regno = <Rm>
3711
3712 For different addressing modes defined in the A64 ISA:
3713
3714 Offset
3715 .pcrel=0; .preind=1; .postind=0; .writeback=0
3716 Pre-indexed
3717 .pcrel=0; .preind=1; .postind=0; .writeback=1
3718 Post-indexed
3719 .pcrel=0; .preind=0; .postind=1; .writeback=1
3720 PC-relative (literal)
3721 .pcrel=1; .preind=1; .postind=0; .writeback=0
3722
3723 The shift/extension information, if any, will be stored in .shifter.
3724 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3725 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3726 corresponding register.
3727
3728 BASE_TYPE says which types of base register should be accepted and
3729 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3730 is the type of shifter that is allowed for immediate offsets,
3731 or SHIFTED_NONE if none.
3732
3733 In all other respects, it is the caller's responsibility to check
3734 for addressing modes not supported by the instruction, and to set
3735 inst.reloc.type. */
3736
3737 static bool
3738 parse_address_main (char **str, aarch64_opnd_info *operand,
3739 aarch64_opnd_qualifier_t *base_qualifier,
3740 aarch64_opnd_qualifier_t *offset_qualifier,
3741 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3742 enum parse_shift_mode imm_shift_mode)
3743 {
3744 char *p = *str;
3745 const reg_entry *reg;
3746 expressionS *exp = &inst.reloc.exp;
3747
3748 *base_qualifier = AARCH64_OPND_QLF_NIL;
3749 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3750 if (! skip_past_char (&p, '['))
3751 {
3752 /* =immediate or label. */
3753 operand->addr.pcrel = 1;
3754 operand->addr.preind = 1;
3755
3756 /* #:<reloc_op>:<symbol> */
3757 skip_past_char (&p, '#');
3758 if (skip_past_char (&p, ':'))
3759 {
3760 bfd_reloc_code_real_type ty;
3761 struct reloc_table_entry *entry;
3762
3763 /* Try to parse a relocation modifier. Anything else is
3764 an error. */
3765 entry = find_reloc_table_entry (&p);
3766 if (! entry)
3767 {
3768 set_syntax_error (_("unknown relocation modifier"));
3769 return false;
3770 }
3771
3772 switch (operand->type)
3773 {
3774 case AARCH64_OPND_ADDR_PCREL21:
3775 /* adr */
3776 ty = entry->adr_type;
3777 break;
3778
3779 default:
3780 ty = entry->ld_literal_type;
3781 break;
3782 }
3783
3784 if (ty == 0)
3785 {
3786 set_syntax_error
3787 (_("this relocation modifier is not allowed on this "
3788 "instruction"));
3789 return false;
3790 }
3791
3792 /* #:<reloc_op>: */
3793 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3794 {
3795 set_syntax_error (_("invalid relocation expression"));
3796 return false;
3797 }
3798 /* #:<reloc_op>:<expr> */
3799 /* Record the relocation type. */
3800 inst.reloc.type = ty;
3801 inst.reloc.pc_rel = entry->pc_rel;
3802 }
3803 else
3804 {
3805 if (skip_past_char (&p, '='))
3806 /* =immediate; need to generate the literal in the literal pool. */
3807 inst.gen_lit_pool = 1;
3808
3809 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3810 {
3811 set_syntax_error (_("invalid address"));
3812 return false;
3813 }
3814 }
3815
3816 *str = p;
3817 return true;
3818 }
3819
3820 /* [ */
3821
3822 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3823 if (!reg || !aarch64_check_reg_type (reg, base_type))
3824 {
3825 set_syntax_error (_(get_reg_expected_msg (base_type)));
3826 return false;
3827 }
3828 operand->addr.base_regno = reg->number;
3829
3830 /* [Xn */
3831 if (skip_past_comma (&p))
3832 {
3833 /* [Xn, */
3834 operand->addr.preind = 1;
3835
3836 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3837 if (reg)
3838 {
3839 if (!aarch64_check_reg_type (reg, offset_type))
3840 {
3841 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3842 return false;
3843 }
3844
3845 /* [Xn,Rm */
3846 operand->addr.offset.regno = reg->number;
3847 operand->addr.offset.is_reg = 1;
3848 /* Shifted index. */
3849 if (skip_past_comma (&p))
3850 {
3851 /* [Xn,Rm, */
3852 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3853 /* Use the diagnostics set in parse_shift, so not set new
3854 error message here. */
3855 return false;
3856 }
3857 /* We only accept:
3858 [base,Xm] # For vector plus scalar SVE2 indexing.
3859 [base,Xm{,LSL #imm}]
3860 [base,Xm,SXTX {#imm}]
3861 [base,Wm,(S|U)XTW {#imm}] */
3862 if (operand->shifter.kind == AARCH64_MOD_NONE
3863 || operand->shifter.kind == AARCH64_MOD_LSL
3864 || operand->shifter.kind == AARCH64_MOD_SXTX)
3865 {
3866 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3867 {
3868 set_syntax_error (_("invalid use of 32-bit register offset"));
3869 return false;
3870 }
3871 if (aarch64_get_qualifier_esize (*base_qualifier)
3872 != aarch64_get_qualifier_esize (*offset_qualifier)
3873 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3874 || *base_qualifier != AARCH64_OPND_QLF_S_S
3875 || *offset_qualifier != AARCH64_OPND_QLF_X))
3876 {
3877 set_syntax_error (_("offset has different size from base"));
3878 return false;
3879 }
3880 }
3881 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3882 {
3883 set_syntax_error (_("invalid use of 64-bit register offset"));
3884 return false;
3885 }
3886 }
3887 else
3888 {
3889 /* [Xn,#:<reloc_op>:<symbol> */
3890 skip_past_char (&p, '#');
3891 if (skip_past_char (&p, ':'))
3892 {
3893 struct reloc_table_entry *entry;
3894
3895 /* Try to parse a relocation modifier. Anything else is
3896 an error. */
3897 if (!(entry = find_reloc_table_entry (&p)))
3898 {
3899 set_syntax_error (_("unknown relocation modifier"));
3900 return false;
3901 }
3902
3903 if (entry->ldst_type == 0)
3904 {
3905 set_syntax_error
3906 (_("this relocation modifier is not allowed on this "
3907 "instruction"));
3908 return false;
3909 }
3910
3911 /* [Xn,#:<reloc_op>: */
3912 /* We now have the group relocation table entry corresponding to
3913 the name in the assembler source. Next, we parse the
3914 expression. */
3915 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3916 {
3917 set_syntax_error (_("invalid relocation expression"));
3918 return false;
3919 }
3920
3921 /* [Xn,#:<reloc_op>:<expr> */
3922 /* Record the load/store relocation type. */
3923 inst.reloc.type = entry->ldst_type;
3924 inst.reloc.pc_rel = entry->pc_rel;
3925 }
3926 else
3927 {
3928 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3929 {
3930 set_syntax_error (_("invalid expression in the address"));
3931 return false;
3932 }
3933 /* [Xn,<expr> */
3934 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3935 /* [Xn,<expr>,<shifter> */
3936 if (! parse_shift (&p, operand, imm_shift_mode))
3937 return false;
3938 }
3939 }
3940 }
3941
3942 if (! skip_past_char (&p, ']'))
3943 {
3944 set_syntax_error (_("']' expected"));
3945 return false;
3946 }
3947
3948 if (skip_past_char (&p, '!'))
3949 {
3950 if (operand->addr.preind && operand->addr.offset.is_reg)
3951 {
3952 set_syntax_error (_("register offset not allowed in pre-indexed "
3953 "addressing mode"));
3954 return false;
3955 }
3956 /* [Xn]! */
3957 operand->addr.writeback = 1;
3958 }
3959 else if (skip_past_comma (&p))
3960 {
3961 /* [Xn], */
3962 operand->addr.postind = 1;
3963 operand->addr.writeback = 1;
3964
3965 if (operand->addr.preind)
3966 {
3967 set_syntax_error (_("cannot combine pre- and post-indexing"));
3968 return false;
3969 }
3970
3971 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3972 if (reg)
3973 {
3974 /* [Xn],Xm */
3975 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3976 {
3977 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3978 return false;
3979 }
3980
3981 operand->addr.offset.regno = reg->number;
3982 operand->addr.offset.is_reg = 1;
3983 }
3984 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3985 {
3986 /* [Xn],#expr */
3987 set_syntax_error (_("invalid expression in the address"));
3988 return false;
3989 }
3990 }
3991
3992 /* If at this point neither .preind nor .postind is set, we have a
3993 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3994 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3995 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3996 [Zn.<T>, xzr]. */
3997 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3998 {
3999 if (operand->addr.writeback)
4000 {
4001 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4002 {
4003 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4004 operand->addr.offset.is_reg = 0;
4005 operand->addr.offset.imm = 0;
4006 operand->addr.preind = 1;
4007 }
4008 else
4009 {
4010 /* Reject [Rn]! */
4011 set_syntax_error (_("missing offset in the pre-indexed address"));
4012 return false;
4013 }
4014 }
4015 else
4016 {
4017 operand->addr.preind = 1;
4018 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4019 {
4020 operand->addr.offset.is_reg = 1;
4021 operand->addr.offset.regno = REG_ZR;
4022 *offset_qualifier = AARCH64_OPND_QLF_X;
4023 }
4024 else
4025 {
4026 inst.reloc.exp.X_op = O_constant;
4027 inst.reloc.exp.X_add_number = 0;
4028 }
4029 }
4030 }
4031
4032 *str = p;
4033 return true;
4034 }
4035
4036 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4037 on success. */
4038 static bool
4039 parse_address (char **str, aarch64_opnd_info *operand)
4040 {
4041 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4042 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4043 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4044 }
4045
4046 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4047 The arguments have the same meaning as for parse_address_main.
4048 Return TRUE on success. */
4049 static bool
4050 parse_sve_address (char **str, aarch64_opnd_info *operand,
4051 aarch64_opnd_qualifier_t *base_qualifier,
4052 aarch64_opnd_qualifier_t *offset_qualifier)
4053 {
4054 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4055 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4056 SHIFTED_MUL_VL);
4057 }
4058
4059 /* Parse a register X0-X30. The register must be 64-bit and register 31
4060 is unallocated. */
4061 static bool
4062 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4063 {
4064 const reg_entry *reg = parse_reg (str);
4065 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4066 {
4067 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
4068 return false;
4069 }
4070 operand->reg.regno = reg->number;
4071 operand->qualifier = AARCH64_OPND_QLF_X;
4072 return true;
4073 }
4074
4075 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4076 Return TRUE on success; otherwise return FALSE. */
4077 static bool
4078 parse_half (char **str, int *internal_fixup_p)
4079 {
4080 char *p = *str;
4081
4082 skip_past_char (&p, '#');
4083
4084 gas_assert (internal_fixup_p);
4085 *internal_fixup_p = 0;
4086
4087 if (*p == ':')
4088 {
4089 struct reloc_table_entry *entry;
4090
4091 /* Try to parse a relocation. Anything else is an error. */
4092 ++p;
4093
4094 if (!(entry = find_reloc_table_entry (&p)))
4095 {
4096 set_syntax_error (_("unknown relocation modifier"));
4097 return false;
4098 }
4099
4100 if (entry->movw_type == 0)
4101 {
4102 set_syntax_error
4103 (_("this relocation modifier is not allowed on this instruction"));
4104 return false;
4105 }
4106
4107 inst.reloc.type = entry->movw_type;
4108 }
4109 else
4110 *internal_fixup_p = 1;
4111
4112 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4113 return false;
4114
4115 *str = p;
4116 return true;
4117 }
4118
4119 /* Parse an operand for an ADRP instruction:
4120 ADRP <Xd>, <label>
4121 Return TRUE on success; otherwise return FALSE. */
4122
4123 static bool
4124 parse_adrp (char **str)
4125 {
4126 char *p;
4127
4128 p = *str;
4129 if (*p == ':')
4130 {
4131 struct reloc_table_entry *entry;
4132
4133 /* Try to parse a relocation. Anything else is an error. */
4134 ++p;
4135 if (!(entry = find_reloc_table_entry (&p)))
4136 {
4137 set_syntax_error (_("unknown relocation modifier"));
4138 return false;
4139 }
4140
4141 if (entry->adrp_type == 0)
4142 {
4143 set_syntax_error
4144 (_("this relocation modifier is not allowed on this instruction"));
4145 return false;
4146 }
4147
4148 inst.reloc.type = entry->adrp_type;
4149 }
4150 else
4151 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4152
4153 inst.reloc.pc_rel = 1;
4154 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4155 return false;
4156 *str = p;
4157 return true;
4158 }
4159
4160 /* Miscellaneous. */
4161
4162 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4163 of SIZE tokens in which index I gives the token for field value I,
4164 or is null if field value I is invalid. REG_TYPE says which register
4165 names should be treated as registers rather than as symbolic immediates.
4166
4167 Return true on success, moving *STR past the operand and storing the
4168 field value in *VAL. */
4169
4170 static int
4171 parse_enum_string (char **str, int64_t *val, const char *const *array,
4172 size_t size, aarch64_reg_type reg_type)
4173 {
4174 expressionS exp;
4175 char *p, *q;
4176 size_t i;
4177
4178 /* Match C-like tokens. */
4179 p = q = *str;
4180 while (ISALNUM (*q))
4181 q++;
4182
4183 for (i = 0; i < size; ++i)
4184 if (array[i]
4185 && strncasecmp (array[i], p, q - p) == 0
4186 && array[i][q - p] == 0)
4187 {
4188 *val = i;
4189 *str = q;
4190 return true;
4191 }
4192
4193 if (!parse_immediate_expression (&p, &exp, reg_type))
4194 return false;
4195
4196 if (exp.X_op == O_constant
4197 && (uint64_t) exp.X_add_number < size)
4198 {
4199 *val = exp.X_add_number;
4200 *str = p;
4201 return true;
4202 }
4203
4204 /* Use the default error for this operand. */
4205 return false;
4206 }
4207
4208 /* Parse an option for a preload instruction. Returns the encoding for the
4209 option, or PARSE_FAIL. */
4210
4211 static int
4212 parse_pldop (char **str)
4213 {
4214 char *p, *q;
4215 const struct aarch64_name_value_pair *o;
4216
4217 p = q = *str;
4218 while (ISALNUM (*q))
4219 q++;
4220
4221 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4222 if (!o)
4223 return PARSE_FAIL;
4224
4225 *str = q;
4226 return o->value;
4227 }
4228
4229 /* Parse an option for a barrier instruction. Returns the encoding for the
4230 option, or PARSE_FAIL. */
4231
4232 static int
4233 parse_barrier (char **str)
4234 {
4235 char *p, *q;
4236 const struct aarch64_name_value_pair *o;
4237
4238 p = q = *str;
4239 while (ISALPHA (*q))
4240 q++;
4241
4242 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4243 if (!o)
4244 return PARSE_FAIL;
4245
4246 *str = q;
4247 return o->value;
4248 }
4249
4250 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4251 return 0 if successful. Otherwise return PARSE_FAIL. */
4252
4253 static int
4254 parse_barrier_psb (char **str,
4255 const struct aarch64_name_value_pair ** hint_opt)
4256 {
4257 char *p, *q;
4258 const struct aarch64_name_value_pair *o;
4259
4260 p = q = *str;
4261 while (ISALPHA (*q))
4262 q++;
4263
4264 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4265 if (!o)
4266 {
4267 set_fatal_syntax_error
4268 ( _("unknown or missing option to PSB/TSB"));
4269 return PARSE_FAIL;
4270 }
4271
4272 if (o->value != 0x11)
4273 {
4274 /* PSB only accepts option name 'CSYNC'. */
4275 set_syntax_error
4276 (_("the specified option is not accepted for PSB/TSB"));
4277 return PARSE_FAIL;
4278 }
4279
4280 *str = q;
4281 *hint_opt = o;
4282 return 0;
4283 }
4284
4285 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4286 return 0 if successful. Otherwise return PARSE_FAIL. */
4287
4288 static int
4289 parse_bti_operand (char **str,
4290 const struct aarch64_name_value_pair ** hint_opt)
4291 {
4292 char *p, *q;
4293 const struct aarch64_name_value_pair *o;
4294
4295 p = q = *str;
4296 while (ISALPHA (*q))
4297 q++;
4298
4299 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4300 if (!o)
4301 {
4302 set_fatal_syntax_error
4303 ( _("unknown option to BTI"));
4304 return PARSE_FAIL;
4305 }
4306
4307 switch (o->value)
4308 {
4309 /* Valid BTI operands. */
4310 case HINT_OPD_C:
4311 case HINT_OPD_J:
4312 case HINT_OPD_JC:
4313 break;
4314
4315 default:
4316 set_syntax_error
4317 (_("unknown option to BTI"));
4318 return PARSE_FAIL;
4319 }
4320
4321 *str = q;
4322 *hint_opt = o;
4323 return 0;
4324 }
4325
4326 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4327 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4328 on failure. Format:
4329
4330 REG_TYPE.QUALIFIER
4331
4332 Side effect: Update STR with current parse position of success.
4333 */
4334
4335 static const reg_entry *
4336 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4337 aarch64_opnd_qualifier_t *qualifier)
4338 {
4339 struct vector_type_el vectype;
4340 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4341 PTR_FULL_REG);
4342 if (!reg)
4343 return NULL;
4344
4345 if (vectype.type == NT_invtype)
4346 *qualifier = AARCH64_OPND_QLF_NIL;
4347 else
4348 {
4349 *qualifier = vectype_to_qualifier (&vectype);
4350 if (*qualifier == AARCH64_OPND_QLF_NIL)
4351 return NULL;
4352 }
4353
4354 return reg;
4355 }
4356
4357 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4358
4359 #<imm>
4360 <imm>
4361
4362 Function return TRUE if immediate was found, or FALSE.
4363 */
4364 static bool
4365 parse_sme_immediate (char **str, int64_t *imm)
4366 {
4367 int64_t val;
4368 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4369 return false;
4370
4371 *imm = val;
4372 return true;
4373 }
4374
4375 /* Parse index with vector select register and immediate:
4376
4377 [<Wv>, <imm>]
4378 [<Wv>, #<imm>]
4379 where <Wv> is in W12-W15 range and # is optional for immediate.
4380
4381 Return true on success, populating OPND with the parsed index. */
4382
4383 static bool
4384 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4385 {
4386 const reg_entry *reg;
4387
4388 if (!skip_past_char (str, '['))
4389 {
4390 set_syntax_error (_("expected '['"));
4391 return false;
4392 }
4393
4394 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4395 reg = parse_reg (str);
4396 if (reg == NULL || reg->type != REG_TYPE_R_32
4397 || reg->number < 12 || reg->number > 15)
4398 {
4399 set_syntax_error (_("expected vector select register W12-W15"));
4400 return false;
4401 }
4402 opnd->index.regno = reg->number;
4403
4404 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4405 {
4406 set_syntax_error (_("expected ','"));
4407 return false;
4408 }
4409
4410 if (!parse_sme_immediate (str, &opnd->index.imm))
4411 {
4412 set_syntax_error (_("index offset immediate expected"));
4413 return false;
4414 }
4415
4416 if (!skip_past_char (str, ']'))
4417 {
4418 set_syntax_error (_("expected ']'"));
4419 return false;
4420 }
4421
4422 return true;
4423 }
4424
4425 /* Parse a register of type REG_TYPE that might have an element type
4426 qualifier and that is indexed by two values: a 32-bit register,
4427 followed by an immediate. The 32-bit register must be W12-W15.
4428 The range of the immediate varies by opcode and is checked in
4429 libopcodes.
4430
4431 Return true on success, populating OPND with information about
4432 the operand and setting QUALIFIER to the register qualifier.
4433
4434 Field format examples:
4435
4436 <Pm>.<T>[<Wv>< #<imm>]
4437 ZA[<Wv>, #<imm>]
4438 <ZAn><HV>.<T>[<Wv>, #<imm>]
4439 */
4440 static bool
4441 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4442 struct aarch64_indexed_za *opnd,
4443 aarch64_opnd_qualifier_t *qualifier)
4444 {
4445 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier);
4446 if (!reg)
4447 return false;
4448
4449 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4450 opnd->regno = reg->number;
4451
4452 return parse_sme_za_index (str, opnd);
4453 }
4454
4455 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4456 operand. */
4457
4458 static bool
4459 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4460 struct aarch64_indexed_za *opnd,
4461 aarch64_opnd_qualifier_t *qualifier)
4462 {
4463 if (!skip_past_char (str, '{'))
4464 {
4465 set_syntax_error (_("expected '{'"));
4466 return false;
4467 }
4468
4469 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier))
4470 return false;
4471
4472 if (!skip_past_char (str, '}'))
4473 {
4474 set_syntax_error (_("expected '}'"));
4475 return false;
4476 }
4477
4478 return true;
4479 }
4480
4481 /* Parse list of up to eight 64-bit element tile names separated by commas in
4482 SME's ZERO instruction:
4483
4484 ZERO { <mask> }
4485
4486 Function returns <mask>:
4487
4488 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4489 */
4490 static int
4491 parse_sme_zero_mask(char **str)
4492 {
4493 char *q;
4494 int mask;
4495 aarch64_opnd_qualifier_t qualifier;
4496
4497 mask = 0x00;
4498 q = *str;
4499 do
4500 {
4501 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4502 &qualifier);
4503 if (!reg)
4504 return PARSE_FAIL;
4505
4506 if (reg->type == REG_TYPE_ZA)
4507 {
4508 if (qualifier != AARCH64_OPND_QLF_NIL)
4509 {
4510 set_syntax_error ("ZA should not have a size suffix");
4511 return PARSE_FAIL;
4512 }
4513 /* { ZA } is assembled as all-ones immediate. */
4514 mask = 0xff;
4515 }
4516 else
4517 {
4518 int regno = reg->number;
4519 if (qualifier == AARCH64_OPND_QLF_S_B)
4520 {
4521 /* { ZA0.B } is assembled as all-ones immediate. */
4522 mask = 0xff;
4523 }
4524 else if (qualifier == AARCH64_OPND_QLF_S_H)
4525 mask |= 0x55 << regno;
4526 else if (qualifier == AARCH64_OPND_QLF_S_S)
4527 mask |= 0x11 << regno;
4528 else if (qualifier == AARCH64_OPND_QLF_S_D)
4529 mask |= 0x01 << regno;
4530 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4531 {
4532 set_syntax_error (_("ZA tile masks do not operate at .Q"
4533 " granularity"));
4534 return PARSE_FAIL;
4535 }
4536 else if (qualifier == AARCH64_OPND_QLF_NIL)
4537 {
4538 set_syntax_error (_("missing ZA tile size"));
4539 return PARSE_FAIL;
4540 }
4541 else
4542 {
4543 set_syntax_error (_("invalid ZA tile"));
4544 return PARSE_FAIL;
4545 }
4546 }
4547 }
4548 while (skip_past_char (&q, ','));
4549
4550 *str = q;
4551 return mask;
4552 }
4553
4554 /* Wraps in curly braces <mask> operand ZERO instruction:
4555
4556 ZERO { <mask> }
4557
4558 Function returns value of <mask> bit-field.
4559 */
4560 static int
4561 parse_sme_list_of_64bit_tiles (char **str)
4562 {
4563 int regno;
4564
4565 if (!skip_past_char (str, '{'))
4566 {
4567 set_syntax_error (_("expected '{'"));
4568 return PARSE_FAIL;
4569 }
4570
4571 /* Empty <mask> list is an all-zeros immediate. */
4572 if (!skip_past_char (str, '}'))
4573 {
4574 regno = parse_sme_zero_mask (str);
4575 if (regno == PARSE_FAIL)
4576 return PARSE_FAIL;
4577
4578 if (!skip_past_char (str, '}'))
4579 {
4580 set_syntax_error (_("expected '}'"));
4581 return PARSE_FAIL;
4582 }
4583 }
4584 else
4585 regno = 0x00;
4586
4587 return regno;
4588 }
4589
4590 /* Parse streaming mode operand for SMSTART and SMSTOP.
4591
4592 {SM | ZA}
4593
4594 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4595 */
4596 static int
4597 parse_sme_sm_za (char **str)
4598 {
4599 char *p, *q;
4600
4601 p = q = *str;
4602 while (ISALPHA (*q))
4603 q++;
4604
4605 if ((q - p != 2)
4606 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4607 {
4608 set_syntax_error (_("expected SM or ZA operand"));
4609 return PARSE_FAIL;
4610 }
4611
4612 *str = q;
4613 return TOLOWER (p[0]);
4614 }
4615
4616 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4617 Returns the encoding for the option, or PARSE_FAIL.
4618
4619 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4620 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4621
4622 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4623 field, otherwise as a system register.
4624 */
4625
4626 static int
4627 parse_sys_reg (char **str, htab_t sys_regs,
4628 int imple_defined_p, int pstatefield_p,
4629 uint32_t* flags)
4630 {
4631 char *p, *q;
4632 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4633 const aarch64_sys_reg *o;
4634 int value;
4635
4636 p = buf;
4637 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4638 if (p < buf + (sizeof (buf) - 1))
4639 *p++ = TOLOWER (*q);
4640 *p = '\0';
4641
4642 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4643 valid system register. This is enforced by construction of the hash
4644 table. */
4645 if (p - buf != q - *str)
4646 return PARSE_FAIL;
4647
4648 o = str_hash_find (sys_regs, buf);
4649 if (!o)
4650 {
4651 if (!imple_defined_p)
4652 return PARSE_FAIL;
4653 else
4654 {
4655 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4656 unsigned int op0, op1, cn, cm, op2;
4657
4658 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4659 != 5)
4660 return PARSE_FAIL;
4661 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4662 return PARSE_FAIL;
4663 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4664 if (flags)
4665 *flags = 0;
4666 }
4667 }
4668 else
4669 {
4670 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4671 as_bad (_("selected processor does not support PSTATE field "
4672 "name '%s'"), buf);
4673 if (!pstatefield_p
4674 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4675 o->value, o->flags, o->features))
4676 as_bad (_("selected processor does not support system register "
4677 "name '%s'"), buf);
4678 if (aarch64_sys_reg_deprecated_p (o->flags))
4679 as_warn (_("system register name '%s' is deprecated and may be "
4680 "removed in a future release"), buf);
4681 value = o->value;
4682 if (flags)
4683 *flags = o->flags;
4684 }
4685
4686 *str = q;
4687 return value;
4688 }
4689
4690 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4691 for the option, or NULL. */
4692
4693 static const aarch64_sys_ins_reg *
4694 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4695 {
4696 char *p, *q;
4697 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4698 const aarch64_sys_ins_reg *o;
4699
4700 p = buf;
4701 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4702 if (p < buf + (sizeof (buf) - 1))
4703 *p++ = TOLOWER (*q);
4704 *p = '\0';
4705
4706 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4707 valid system register. This is enforced by construction of the hash
4708 table. */
4709 if (p - buf != q - *str)
4710 return NULL;
4711
4712 o = str_hash_find (sys_ins_regs, buf);
4713 if (!o)
4714 return NULL;
4715
4716 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4717 o->name, o->value, o->flags, 0))
4718 as_bad (_("selected processor does not support system register "
4719 "name '%s'"), buf);
4720 if (aarch64_sys_reg_deprecated_p (o->flags))
4721 as_warn (_("system register name '%s' is deprecated and may be "
4722 "removed in a future release"), buf);
4723
4724 *str = q;
4725 return o;
4726 }
4727 \f
4728 #define po_char_or_fail(chr) do { \
4729 if (! skip_past_char (&str, chr)) \
4730 goto failure; \
4731 } while (0)
4732
4733 #define po_reg_or_fail(regtype) do { \
4734 reg = aarch64_reg_parse (&str, regtype, NULL); \
4735 if (!reg) \
4736 { \
4737 set_default_error (); \
4738 goto failure; \
4739 } \
4740 } while (0)
4741
4742 #define po_int_reg_or_fail(reg_type) do { \
4743 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4744 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4745 { \
4746 set_default_error (); \
4747 goto failure; \
4748 } \
4749 info->reg.regno = reg->number; \
4750 info->qualifier = qualifier; \
4751 } while (0)
4752
4753 #define po_imm_nc_or_fail() do { \
4754 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4755 goto failure; \
4756 } while (0)
4757
4758 #define po_imm_or_fail(min, max) do { \
4759 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4760 goto failure; \
4761 if (val < min || val > max) \
4762 { \
4763 set_fatal_syntax_error (_("immediate value out of range "\
4764 #min " to "#max)); \
4765 goto failure; \
4766 } \
4767 } while (0)
4768
4769 #define po_enum_or_fail(array) do { \
4770 if (!parse_enum_string (&str, &val, array, \
4771 ARRAY_SIZE (array), imm_reg_type)) \
4772 goto failure; \
4773 } while (0)
4774
4775 #define po_misc_or_fail(expr) do { \
4776 if (!expr) \
4777 goto failure; \
4778 } while (0)
4779 \f
4780 /* encode the 12-bit imm field of Add/sub immediate */
4781 static inline uint32_t
4782 encode_addsub_imm (uint32_t imm)
4783 {
4784 return imm << 10;
4785 }
4786
4787 /* encode the shift amount field of Add/sub immediate */
4788 static inline uint32_t
4789 encode_addsub_imm_shift_amount (uint32_t cnt)
4790 {
4791 return cnt << 22;
4792 }
4793
4794
4795 /* encode the imm field of Adr instruction */
4796 static inline uint32_t
4797 encode_adr_imm (uint32_t imm)
4798 {
4799 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4800 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4801 }
4802
4803 /* encode the immediate field of Move wide immediate */
4804 static inline uint32_t
4805 encode_movw_imm (uint32_t imm)
4806 {
4807 return imm << 5;
4808 }
4809
4810 /* encode the 26-bit offset of unconditional branch */
4811 static inline uint32_t
4812 encode_branch_ofs_26 (uint32_t ofs)
4813 {
4814 return ofs & ((1 << 26) - 1);
4815 }
4816
4817 /* encode the 19-bit offset of conditional branch and compare & branch */
4818 static inline uint32_t
4819 encode_cond_branch_ofs_19 (uint32_t ofs)
4820 {
4821 return (ofs & ((1 << 19) - 1)) << 5;
4822 }
4823
4824 /* encode the 19-bit offset of ld literal */
4825 static inline uint32_t
4826 encode_ld_lit_ofs_19 (uint32_t ofs)
4827 {
4828 return (ofs & ((1 << 19) - 1)) << 5;
4829 }
4830
4831 /* Encode the 14-bit offset of test & branch. */
4832 static inline uint32_t
4833 encode_tst_branch_ofs_14 (uint32_t ofs)
4834 {
4835 return (ofs & ((1 << 14) - 1)) << 5;
4836 }
4837
4838 /* Encode the 16-bit imm field of svc/hvc/smc. */
4839 static inline uint32_t
4840 encode_svc_imm (uint32_t imm)
4841 {
4842 return imm << 5;
4843 }
4844
4845 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4846 static inline uint32_t
4847 reencode_addsub_switch_add_sub (uint32_t opcode)
4848 {
4849 return opcode ^ (1 << 30);
4850 }
4851
4852 static inline uint32_t
4853 reencode_movzn_to_movz (uint32_t opcode)
4854 {
4855 return opcode | (1 << 30);
4856 }
4857
4858 static inline uint32_t
4859 reencode_movzn_to_movn (uint32_t opcode)
4860 {
4861 return opcode & ~(1 << 30);
4862 }
4863
4864 /* Overall per-instruction processing. */
4865
4866 /* We need to be able to fix up arbitrary expressions in some statements.
4867 This is so that we can handle symbols that are an arbitrary distance from
4868 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4869 which returns part of an address in a form which will be valid for
4870 a data instruction. We do this by pushing the expression into a symbol
4871 in the expr_section, and creating a fix for that. */
4872
4873 static fixS *
4874 fix_new_aarch64 (fragS * frag,
4875 int where,
4876 short int size,
4877 expressionS * exp,
4878 int pc_rel,
4879 int reloc)
4880 {
4881 fixS *new_fix;
4882
4883 switch (exp->X_op)
4884 {
4885 case O_constant:
4886 case O_symbol:
4887 case O_add:
4888 case O_subtract:
4889 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4890 break;
4891
4892 default:
4893 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4894 pc_rel, reloc);
4895 break;
4896 }
4897 return new_fix;
4898 }
4899 \f
4900 /* Diagnostics on operands errors. */
4901
4902 /* By default, output verbose error message.
4903 Disable the verbose error message by -mno-verbose-error. */
4904 static int verbose_error_p = 1;
4905
4906 #ifdef DEBUG_AARCH64
4907 /* N.B. this is only for the purpose of debugging. */
4908 const char* operand_mismatch_kind_names[] =
4909 {
4910 "AARCH64_OPDE_NIL",
4911 "AARCH64_OPDE_RECOVERABLE",
4912 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
4913 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
4914 "AARCH64_OPDE_SYNTAX_ERROR",
4915 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4916 "AARCH64_OPDE_INVALID_VARIANT",
4917 "AARCH64_OPDE_OUT_OF_RANGE",
4918 "AARCH64_OPDE_UNALIGNED",
4919 "AARCH64_OPDE_REG_LIST",
4920 "AARCH64_OPDE_OTHER_ERROR",
4921 };
4922 #endif /* DEBUG_AARCH64 */
4923
4924 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4925
4926 When multiple errors of different kinds are found in the same assembly
4927 line, only the error of the highest severity will be picked up for
4928 issuing the diagnostics. */
4929
4930 static inline bool
4931 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4932 enum aarch64_operand_error_kind rhs)
4933 {
4934 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4935 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
4936 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
4937 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
4938 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
4939 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4940 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4941 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4942 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4943 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4944 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4945 return lhs > rhs;
4946 }
4947
4948 /* Helper routine to get the mnemonic name from the assembly instruction
4949 line; should only be called for the diagnosis purpose, as there is
4950 string copy operation involved, which may affect the runtime
4951 performance if used in elsewhere. */
4952
4953 static const char*
4954 get_mnemonic_name (const char *str)
4955 {
4956 static char mnemonic[32];
4957 char *ptr;
4958
4959 /* Get the first 15 bytes and assume that the full name is included. */
4960 strncpy (mnemonic, str, 31);
4961 mnemonic[31] = '\0';
4962
4963 /* Scan up to the end of the mnemonic, which must end in white space,
4964 '.', or end of string. */
4965 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4966 ;
4967
4968 *ptr = '\0';
4969
4970 /* Append '...' to the truncated long name. */
4971 if (ptr - mnemonic == 31)
4972 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4973
4974 return mnemonic;
4975 }
4976
4977 static void
4978 reset_aarch64_instruction (aarch64_instruction *instruction)
4979 {
4980 memset (instruction, '\0', sizeof (aarch64_instruction));
4981 instruction->reloc.type = BFD_RELOC_UNUSED;
4982 }
4983
4984 /* Data structures storing one user error in the assembly code related to
4985 operands. */
4986
4987 struct operand_error_record
4988 {
4989 const aarch64_opcode *opcode;
4990 aarch64_operand_error detail;
4991 struct operand_error_record *next;
4992 };
4993
4994 typedef struct operand_error_record operand_error_record;
4995
4996 struct operand_errors
4997 {
4998 operand_error_record *head;
4999 operand_error_record *tail;
5000 };
5001
5002 typedef struct operand_errors operand_errors;
5003
5004 /* Top-level data structure reporting user errors for the current line of
5005 the assembly code.
5006 The way md_assemble works is that all opcodes sharing the same mnemonic
5007 name are iterated to find a match to the assembly line. In this data
5008 structure, each of the such opcodes will have one operand_error_record
5009 allocated and inserted. In other words, excessive errors related with
5010 a single opcode are disregarded. */
5011 operand_errors operand_error_report;
5012
5013 /* Free record nodes. */
5014 static operand_error_record *free_opnd_error_record_nodes = NULL;
5015
5016 /* Initialize the data structure that stores the operand mismatch
5017 information on assembling one line of the assembly code. */
5018 static void
5019 init_operand_error_report (void)
5020 {
5021 if (operand_error_report.head != NULL)
5022 {
5023 gas_assert (operand_error_report.tail != NULL);
5024 operand_error_report.tail->next = free_opnd_error_record_nodes;
5025 free_opnd_error_record_nodes = operand_error_report.head;
5026 operand_error_report.head = NULL;
5027 operand_error_report.tail = NULL;
5028 return;
5029 }
5030 gas_assert (operand_error_report.tail == NULL);
5031 }
5032
5033 /* Return TRUE if some operand error has been recorded during the
5034 parsing of the current assembly line using the opcode *OPCODE;
5035 otherwise return FALSE. */
5036 static inline bool
5037 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5038 {
5039 operand_error_record *record = operand_error_report.head;
5040 return record && record->opcode == opcode;
5041 }
5042
5043 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5044 OPCODE field is initialized with OPCODE.
5045 N.B. only one record for each opcode, i.e. the maximum of one error is
5046 recorded for each instruction template. */
5047
5048 static void
5049 add_operand_error_record (const operand_error_record* new_record)
5050 {
5051 const aarch64_opcode *opcode = new_record->opcode;
5052 operand_error_record* record = operand_error_report.head;
5053
5054 /* The record may have been created for this opcode. If not, we need
5055 to prepare one. */
5056 if (! opcode_has_operand_error_p (opcode))
5057 {
5058 /* Get one empty record. */
5059 if (free_opnd_error_record_nodes == NULL)
5060 {
5061 record = XNEW (operand_error_record);
5062 }
5063 else
5064 {
5065 record = free_opnd_error_record_nodes;
5066 free_opnd_error_record_nodes = record->next;
5067 }
5068 record->opcode = opcode;
5069 /* Insert at the head. */
5070 record->next = operand_error_report.head;
5071 operand_error_report.head = record;
5072 if (operand_error_report.tail == NULL)
5073 operand_error_report.tail = record;
5074 }
5075 else if (record->detail.kind != AARCH64_OPDE_NIL
5076 && record->detail.index <= new_record->detail.index
5077 && operand_error_higher_severity_p (record->detail.kind,
5078 new_record->detail.kind))
5079 {
5080 /* In the case of multiple errors found on operands related with a
5081 single opcode, only record the error of the leftmost operand and
5082 only if the error is of higher severity. */
5083 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5084 " the existing error %s on operand %d",
5085 operand_mismatch_kind_names[new_record->detail.kind],
5086 new_record->detail.index,
5087 operand_mismatch_kind_names[record->detail.kind],
5088 record->detail.index);
5089 return;
5090 }
5091
5092 record->detail = new_record->detail;
5093 }
5094
5095 static inline void
5096 record_operand_error_info (const aarch64_opcode *opcode,
5097 aarch64_operand_error *error_info)
5098 {
5099 operand_error_record record;
5100 record.opcode = opcode;
5101 record.detail = *error_info;
5102 add_operand_error_record (&record);
5103 }
5104
5105 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5106 error message *ERROR, for operand IDX (count from 0). */
5107
5108 static void
5109 record_operand_error (const aarch64_opcode *opcode, int idx,
5110 enum aarch64_operand_error_kind kind,
5111 const char* error)
5112 {
5113 aarch64_operand_error info;
5114 memset(&info, 0, sizeof (info));
5115 info.index = idx;
5116 info.kind = kind;
5117 info.error = error;
5118 info.non_fatal = false;
5119 record_operand_error_info (opcode, &info);
5120 }
5121
5122 static void
5123 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5124 enum aarch64_operand_error_kind kind,
5125 const char* error, const int *extra_data)
5126 {
5127 aarch64_operand_error info;
5128 info.index = idx;
5129 info.kind = kind;
5130 info.error = error;
5131 info.data[0].i = extra_data[0];
5132 info.data[1].i = extra_data[1];
5133 info.data[2].i = extra_data[2];
5134 info.non_fatal = false;
5135 record_operand_error_info (opcode, &info);
5136 }
5137
5138 static void
5139 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5140 const char* error, int lower_bound,
5141 int upper_bound)
5142 {
5143 int data[3] = {lower_bound, upper_bound, 0};
5144 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5145 error, data);
5146 }
5147
5148 /* Remove the operand error record for *OPCODE. */
5149 static void ATTRIBUTE_UNUSED
5150 remove_operand_error_record (const aarch64_opcode *opcode)
5151 {
5152 if (opcode_has_operand_error_p (opcode))
5153 {
5154 operand_error_record* record = operand_error_report.head;
5155 gas_assert (record != NULL && operand_error_report.tail != NULL);
5156 operand_error_report.head = record->next;
5157 record->next = free_opnd_error_record_nodes;
5158 free_opnd_error_record_nodes = record;
5159 if (operand_error_report.head == NULL)
5160 {
5161 gas_assert (operand_error_report.tail == record);
5162 operand_error_report.tail = NULL;
5163 }
5164 }
5165 }
5166
5167 /* Given the instruction in *INSTR, return the index of the best matched
5168 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5169
5170 Return -1 if there is no qualifier sequence; return the first match
5171 if there is multiple matches found. */
5172
5173 static int
5174 find_best_match (const aarch64_inst *instr,
5175 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5176 {
5177 int i, num_opnds, max_num_matched, idx;
5178
5179 num_opnds = aarch64_num_of_operands (instr->opcode);
5180 if (num_opnds == 0)
5181 {
5182 DEBUG_TRACE ("no operand");
5183 return -1;
5184 }
5185
5186 max_num_matched = 0;
5187 idx = 0;
5188
5189 /* For each pattern. */
5190 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5191 {
5192 int j, num_matched;
5193 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5194
5195 /* Most opcodes has much fewer patterns in the list. */
5196 if (empty_qualifier_sequence_p (qualifiers))
5197 {
5198 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5199 break;
5200 }
5201
5202 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5203 if (*qualifiers == instr->operands[j].qualifier)
5204 ++num_matched;
5205
5206 if (num_matched > max_num_matched)
5207 {
5208 max_num_matched = num_matched;
5209 idx = i;
5210 }
5211 }
5212
5213 DEBUG_TRACE ("return with %d", idx);
5214 return idx;
5215 }
5216
5217 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5218 corresponding operands in *INSTR. */
5219
5220 static inline void
5221 assign_qualifier_sequence (aarch64_inst *instr,
5222 const aarch64_opnd_qualifier_t *qualifiers)
5223 {
5224 int i = 0;
5225 int num_opnds = aarch64_num_of_operands (instr->opcode);
5226 gas_assert (num_opnds);
5227 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5228 instr->operands[i].qualifier = *qualifiers;
5229 }
5230
5231 /* Callback used by aarch64_print_operand to apply STYLE to the
5232 disassembler output created from FMT and ARGS. The STYLER object holds
5233 any required state. Must return a pointer to a string (created from FMT
5234 and ARGS) that will continue to be valid until the complete disassembled
5235 instruction has been printed.
5236
5237 We don't currently add any styling to the output of the disassembler as
5238 used within assembler error messages, and so STYLE is ignored here. A
5239 new string is allocated on the obstack help within STYLER and returned
5240 to the caller. */
5241
5242 static const char *aarch64_apply_style
5243 (struct aarch64_styler *styler,
5244 enum disassembler_style style ATTRIBUTE_UNUSED,
5245 const char *fmt, va_list args)
5246 {
5247 int res;
5248 char *ptr;
5249 struct obstack *stack = (struct obstack *) styler->state;
5250 va_list ap;
5251
5252 /* Calculate the required space. */
5253 va_copy (ap, args);
5254 res = vsnprintf (NULL, 0, fmt, ap);
5255 va_end (ap);
5256 gas_assert (res >= 0);
5257
5258 /* Allocate space on the obstack and format the result. */
5259 ptr = (char *) obstack_alloc (stack, res + 1);
5260 res = vsnprintf (ptr, (res + 1), fmt, args);
5261 gas_assert (res >= 0);
5262
5263 return ptr;
5264 }
5265
5266 /* Print operands for the diagnosis purpose. */
5267
5268 static void
5269 print_operands (char *buf, const aarch64_opcode *opcode,
5270 const aarch64_opnd_info *opnds)
5271 {
5272 int i;
5273 struct aarch64_styler styler;
5274 struct obstack content;
5275 obstack_init (&content);
5276
5277 styler.apply_style = aarch64_apply_style;
5278 styler.state = (void *) &content;
5279
5280 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5281 {
5282 char str[128];
5283 char cmt[128];
5284
5285 /* We regard the opcode operand info more, however we also look into
5286 the inst->operands to support the disassembling of the optional
5287 operand.
5288 The two operand code should be the same in all cases, apart from
5289 when the operand can be optional. */
5290 if (opcode->operands[i] == AARCH64_OPND_NIL
5291 || opnds[i].type == AARCH64_OPND_NIL)
5292 break;
5293
5294 /* Generate the operand string in STR. */
5295 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5296 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5297
5298 /* Delimiter. */
5299 if (str[0] != '\0')
5300 strcat (buf, i == 0 ? " " : ", ");
5301
5302 /* Append the operand string. */
5303 strcat (buf, str);
5304
5305 /* Append a comment. This works because only the last operand ever
5306 adds a comment. If that ever changes then we'll need to be
5307 smarter here. */
5308 if (cmt[0] != '\0')
5309 {
5310 strcat (buf, "\t// ");
5311 strcat (buf, cmt);
5312 }
5313 }
5314
5315 obstack_free (&content, NULL);
5316 }
5317
5318 /* Send to stderr a string as information. */
5319
5320 static void
5321 output_info (const char *format, ...)
5322 {
5323 const char *file;
5324 unsigned int line;
5325 va_list args;
5326
5327 file = as_where (&line);
5328 if (file)
5329 {
5330 if (line != 0)
5331 fprintf (stderr, "%s:%u: ", file, line);
5332 else
5333 fprintf (stderr, "%s: ", file);
5334 }
5335 fprintf (stderr, _("Info: "));
5336 va_start (args, format);
5337 vfprintf (stderr, format, args);
5338 va_end (args);
5339 (void) putc ('\n', stderr);
5340 }
5341
5342 /* Output one operand error record. */
5343
5344 static void
5345 output_operand_error_record (const operand_error_record *record, char *str)
5346 {
5347 const aarch64_operand_error *detail = &record->detail;
5348 int idx = detail->index;
5349 const aarch64_opcode *opcode = record->opcode;
5350 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5351 : AARCH64_OPND_NIL);
5352
5353 typedef void (*handler_t)(const char *format, ...);
5354 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5355
5356 switch (detail->kind)
5357 {
5358 case AARCH64_OPDE_NIL:
5359 gas_assert (0);
5360 break;
5361
5362 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5363 handler (_("this `%s' should have an immediately preceding `%s'"
5364 " -- `%s'"),
5365 detail->data[0].s, detail->data[1].s, str);
5366 break;
5367
5368 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5369 handler (_("the preceding `%s' should be followed by `%s` rather"
5370 " than `%s` -- `%s'"),
5371 detail->data[1].s, detail->data[0].s, opcode->name, str);
5372 break;
5373
5374 case AARCH64_OPDE_SYNTAX_ERROR:
5375 case AARCH64_OPDE_RECOVERABLE:
5376 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5377 case AARCH64_OPDE_OTHER_ERROR:
5378 /* Use the prepared error message if there is, otherwise use the
5379 operand description string to describe the error. */
5380 if (detail->error != NULL)
5381 {
5382 if (idx < 0)
5383 handler (_("%s -- `%s'"), detail->error, str);
5384 else
5385 handler (_("%s at operand %d -- `%s'"),
5386 detail->error, idx + 1, str);
5387 }
5388 else
5389 {
5390 gas_assert (idx >= 0);
5391 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5392 aarch64_get_operand_desc (opd_code), str);
5393 }
5394 break;
5395
5396 case AARCH64_OPDE_INVALID_VARIANT:
5397 handler (_("operand mismatch -- `%s'"), str);
5398 if (verbose_error_p)
5399 {
5400 /* We will try to correct the erroneous instruction and also provide
5401 more information e.g. all other valid variants.
5402
5403 The string representation of the corrected instruction and other
5404 valid variants are generated by
5405
5406 1) obtaining the intermediate representation of the erroneous
5407 instruction;
5408 2) manipulating the IR, e.g. replacing the operand qualifier;
5409 3) printing out the instruction by calling the printer functions
5410 shared with the disassembler.
5411
5412 The limitation of this method is that the exact input assembly
5413 line cannot be accurately reproduced in some cases, for example an
5414 optional operand present in the actual assembly line will be
5415 omitted in the output; likewise for the optional syntax rules,
5416 e.g. the # before the immediate. Another limitation is that the
5417 assembly symbols and relocation operations in the assembly line
5418 currently cannot be printed out in the error report. Last but not
5419 least, when there is other error(s) co-exist with this error, the
5420 'corrected' instruction may be still incorrect, e.g. given
5421 'ldnp h0,h1,[x0,#6]!'
5422 this diagnosis will provide the version:
5423 'ldnp s0,s1,[x0,#6]!'
5424 which is still not right. */
5425 size_t len = strlen (get_mnemonic_name (str));
5426 int i, qlf_idx;
5427 bool result;
5428 char buf[2048];
5429 aarch64_inst *inst_base = &inst.base;
5430 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5431
5432 /* Init inst. */
5433 reset_aarch64_instruction (&inst);
5434 inst_base->opcode = opcode;
5435
5436 /* Reset the error report so that there is no side effect on the
5437 following operand parsing. */
5438 init_operand_error_report ();
5439
5440 /* Fill inst. */
5441 result = parse_operands (str + len, opcode)
5442 && programmer_friendly_fixup (&inst);
5443 gas_assert (result);
5444 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5445 NULL, NULL, insn_sequence);
5446 gas_assert (!result);
5447
5448 /* Find the most matched qualifier sequence. */
5449 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5450 gas_assert (qlf_idx > -1);
5451
5452 /* Assign the qualifiers. */
5453 assign_qualifier_sequence (inst_base,
5454 opcode->qualifiers_list[qlf_idx]);
5455
5456 /* Print the hint. */
5457 output_info (_(" did you mean this?"));
5458 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5459 print_operands (buf, opcode, inst_base->operands);
5460 output_info (_(" %s"), buf);
5461
5462 /* Print out other variant(s) if there is any. */
5463 if (qlf_idx != 0 ||
5464 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5465 output_info (_(" other valid variant(s):"));
5466
5467 /* For each pattern. */
5468 qualifiers_list = opcode->qualifiers_list;
5469 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5470 {
5471 /* Most opcodes has much fewer patterns in the list.
5472 First NIL qualifier indicates the end in the list. */
5473 if (empty_qualifier_sequence_p (*qualifiers_list))
5474 break;
5475
5476 if (i != qlf_idx)
5477 {
5478 /* Mnemonics name. */
5479 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5480
5481 /* Assign the qualifiers. */
5482 assign_qualifier_sequence (inst_base, *qualifiers_list);
5483
5484 /* Print instruction. */
5485 print_operands (buf, opcode, inst_base->operands);
5486
5487 output_info (_(" %s"), buf);
5488 }
5489 }
5490 }
5491 break;
5492
5493 case AARCH64_OPDE_UNTIED_IMMS:
5494 handler (_("operand %d must have the same immediate value "
5495 "as operand 1 -- `%s'"),
5496 detail->index + 1, str);
5497 break;
5498
5499 case AARCH64_OPDE_UNTIED_OPERAND:
5500 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5501 detail->index + 1, str);
5502 break;
5503
5504 case AARCH64_OPDE_OUT_OF_RANGE:
5505 if (detail->data[0].i != detail->data[1].i)
5506 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5507 detail->error ? detail->error : _("immediate value"),
5508 detail->data[0].i, detail->data[1].i, idx + 1, str);
5509 else
5510 handler (_("%s must be %d at operand %d -- `%s'"),
5511 detail->error ? detail->error : _("immediate value"),
5512 detail->data[0].i, idx + 1, str);
5513 break;
5514
5515 case AARCH64_OPDE_REG_LIST:
5516 if (detail->data[0].i == 1)
5517 handler (_("invalid number of registers in the list; "
5518 "only 1 register is expected at operand %d -- `%s'"),
5519 idx + 1, str);
5520 else
5521 handler (_("invalid number of registers in the list; "
5522 "%d registers are expected at operand %d -- `%s'"),
5523 detail->data[0].i, idx + 1, str);
5524 break;
5525
5526 case AARCH64_OPDE_UNALIGNED:
5527 handler (_("immediate value must be a multiple of "
5528 "%d at operand %d -- `%s'"),
5529 detail->data[0].i, idx + 1, str);
5530 break;
5531
5532 default:
5533 gas_assert (0);
5534 break;
5535 }
5536 }
5537
5538 /* Process and output the error message about the operand mismatching.
5539
5540 When this function is called, the operand error information had
5541 been collected for an assembly line and there will be multiple
5542 errors in the case of multiple instruction templates; output the
5543 error message that most closely describes the problem.
5544
5545 The errors to be printed can be filtered on printing all errors
5546 or only non-fatal errors. This distinction has to be made because
5547 the error buffer may already be filled with fatal errors we don't want to
5548 print due to the different instruction templates. */
5549
5550 static void
5551 output_operand_error_report (char *str, bool non_fatal_only)
5552 {
5553 int largest_error_pos;
5554 const char *msg = NULL;
5555 enum aarch64_operand_error_kind kind;
5556 operand_error_record *curr;
5557 operand_error_record *head = operand_error_report.head;
5558 operand_error_record *record = NULL;
5559
5560 /* No error to report. */
5561 if (head == NULL)
5562 return;
5563
5564 gas_assert (head != NULL && operand_error_report.tail != NULL);
5565
5566 /* Only one error. */
5567 if (head == operand_error_report.tail)
5568 {
5569 /* If the only error is a non-fatal one and we don't want to print it,
5570 just exit. */
5571 if (!non_fatal_only || head->detail.non_fatal)
5572 {
5573 DEBUG_TRACE ("single opcode entry with error kind: %s",
5574 operand_mismatch_kind_names[head->detail.kind]);
5575 output_operand_error_record (head, str);
5576 }
5577 return;
5578 }
5579
5580 /* Find the error kind of the highest severity. */
5581 DEBUG_TRACE ("multiple opcode entries with error kind");
5582 kind = AARCH64_OPDE_NIL;
5583 for (curr = head; curr != NULL; curr = curr->next)
5584 {
5585 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5586 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5587 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5588 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5589 kind = curr->detail.kind;
5590 }
5591
5592 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5593
5594 /* Pick up one of errors of KIND to report. */
5595 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5596 for (curr = head; curr != NULL; curr = curr->next)
5597 {
5598 /* If we don't want to print non-fatal errors then don't consider them
5599 at all. */
5600 if (curr->detail.kind != kind
5601 || (non_fatal_only && !curr->detail.non_fatal))
5602 continue;
5603 /* If there are multiple errors, pick up the one with the highest
5604 mismatching operand index. In the case of multiple errors with
5605 the equally highest operand index, pick up the first one or the
5606 first one with non-NULL error message. */
5607 if (curr->detail.index > largest_error_pos
5608 || (curr->detail.index == largest_error_pos && msg == NULL
5609 && curr->detail.error != NULL))
5610 {
5611 largest_error_pos = curr->detail.index;
5612 record = curr;
5613 msg = record->detail.error;
5614 }
5615 }
5616
5617 /* The way errors are collected in the back-end is a bit non-intuitive. But
5618 essentially, because each operand template is tried recursively you may
5619 always have errors collected from the previous tried OPND. These are
5620 usually skipped if there is one successful match. However now with the
5621 non-fatal errors we have to ignore those previously collected hard errors
5622 when we're only interested in printing the non-fatal ones. This condition
5623 prevents us from printing errors that are not appropriate, since we did
5624 match a condition, but it also has warnings that it wants to print. */
5625 if (non_fatal_only && !record)
5626 return;
5627
5628 gas_assert (largest_error_pos != -2 && record != NULL);
5629 DEBUG_TRACE ("Pick up error kind %s to report",
5630 operand_mismatch_kind_names[record->detail.kind]);
5631
5632 /* Output. */
5633 output_operand_error_record (record, str);
5634 }
5635 \f
5636 /* Write an AARCH64 instruction to buf - always little-endian. */
5637 static void
5638 put_aarch64_insn (char *buf, uint32_t insn)
5639 {
5640 unsigned char *where = (unsigned char *) buf;
5641 where[0] = insn;
5642 where[1] = insn >> 8;
5643 where[2] = insn >> 16;
5644 where[3] = insn >> 24;
5645 }
5646
5647 static uint32_t
5648 get_aarch64_insn (char *buf)
5649 {
5650 unsigned char *where = (unsigned char *) buf;
5651 uint32_t result;
5652 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5653 | ((uint32_t) where[3] << 24)));
5654 return result;
5655 }
5656
5657 static void
5658 output_inst (struct aarch64_inst *new_inst)
5659 {
5660 char *to = NULL;
5661
5662 to = frag_more (INSN_SIZE);
5663
5664 frag_now->tc_frag_data.recorded = 1;
5665
5666 put_aarch64_insn (to, inst.base.value);
5667
5668 if (inst.reloc.type != BFD_RELOC_UNUSED)
5669 {
5670 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5671 INSN_SIZE, &inst.reloc.exp,
5672 inst.reloc.pc_rel,
5673 inst.reloc.type);
5674 DEBUG_TRACE ("Prepared relocation fix up");
5675 /* Don't check the addend value against the instruction size,
5676 that's the job of our code in md_apply_fix(). */
5677 fixp->fx_no_overflow = 1;
5678 if (new_inst != NULL)
5679 fixp->tc_fix_data.inst = new_inst;
5680 if (aarch64_gas_internal_fixup_p ())
5681 {
5682 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5683 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5684 fixp->fx_addnumber = inst.reloc.flags;
5685 }
5686 }
5687
5688 dwarf2_emit_insn (INSN_SIZE);
5689 }
5690
5691 /* Link together opcodes of the same name. */
5692
5693 struct templates
5694 {
5695 const aarch64_opcode *opcode;
5696 struct templates *next;
5697 };
5698
5699 typedef struct templates templates;
5700
5701 static templates *
5702 lookup_mnemonic (const char *start, int len)
5703 {
5704 templates *templ = NULL;
5705
5706 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5707 return templ;
5708 }
5709
5710 /* Subroutine of md_assemble, responsible for looking up the primary
5711 opcode from the mnemonic the user wrote. BASE points to the beginning
5712 of the mnemonic, DOT points to the first '.' within the mnemonic
5713 (if any) and END points to the end of the mnemonic. */
5714
5715 static templates *
5716 opcode_lookup (char *base, char *dot, char *end)
5717 {
5718 const aarch64_cond *cond;
5719 char condname[16];
5720 int len;
5721
5722 if (dot == end)
5723 return 0;
5724
5725 inst.cond = COND_ALWAYS;
5726
5727 /* Handle a possible condition. */
5728 if (dot)
5729 {
5730 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5731 if (!cond)
5732 return 0;
5733 inst.cond = cond->value;
5734 len = dot - base;
5735 }
5736 else
5737 len = end - base;
5738
5739 if (inst.cond == COND_ALWAYS)
5740 {
5741 /* Look for unaffixed mnemonic. */
5742 return lookup_mnemonic (base, len);
5743 }
5744 else if (len <= 13)
5745 {
5746 /* append ".c" to mnemonic if conditional */
5747 memcpy (condname, base, len);
5748 memcpy (condname + len, ".c", 2);
5749 base = condname;
5750 len += 2;
5751 return lookup_mnemonic (base, len);
5752 }
5753
5754 return NULL;
5755 }
5756
5757 /* Process an optional operand that is found omitted from the assembly line.
5758 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5759 instruction's opcode entry while IDX is the index of this omitted operand.
5760 */
5761
5762 static void
5763 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5764 int idx, aarch64_opnd_info *operand)
5765 {
5766 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5767 gas_assert (optional_operand_p (opcode, idx));
5768 gas_assert (!operand->present);
5769
5770 switch (type)
5771 {
5772 case AARCH64_OPND_Rd:
5773 case AARCH64_OPND_Rn:
5774 case AARCH64_OPND_Rm:
5775 case AARCH64_OPND_Rt:
5776 case AARCH64_OPND_Rt2:
5777 case AARCH64_OPND_Rt_LS64:
5778 case AARCH64_OPND_Rt_SP:
5779 case AARCH64_OPND_Rs:
5780 case AARCH64_OPND_Ra:
5781 case AARCH64_OPND_Rt_SYS:
5782 case AARCH64_OPND_Rd_SP:
5783 case AARCH64_OPND_Rn_SP:
5784 case AARCH64_OPND_Rm_SP:
5785 case AARCH64_OPND_Fd:
5786 case AARCH64_OPND_Fn:
5787 case AARCH64_OPND_Fm:
5788 case AARCH64_OPND_Fa:
5789 case AARCH64_OPND_Ft:
5790 case AARCH64_OPND_Ft2:
5791 case AARCH64_OPND_Sd:
5792 case AARCH64_OPND_Sn:
5793 case AARCH64_OPND_Sm:
5794 case AARCH64_OPND_Va:
5795 case AARCH64_OPND_Vd:
5796 case AARCH64_OPND_Vn:
5797 case AARCH64_OPND_Vm:
5798 case AARCH64_OPND_VdD1:
5799 case AARCH64_OPND_VnD1:
5800 operand->reg.regno = default_value;
5801 break;
5802
5803 case AARCH64_OPND_Ed:
5804 case AARCH64_OPND_En:
5805 case AARCH64_OPND_Em:
5806 case AARCH64_OPND_Em16:
5807 case AARCH64_OPND_SM3_IMM2:
5808 operand->reglane.regno = default_value;
5809 break;
5810
5811 case AARCH64_OPND_IDX:
5812 case AARCH64_OPND_BIT_NUM:
5813 case AARCH64_OPND_IMMR:
5814 case AARCH64_OPND_IMMS:
5815 case AARCH64_OPND_SHLL_IMM:
5816 case AARCH64_OPND_IMM_VLSL:
5817 case AARCH64_OPND_IMM_VLSR:
5818 case AARCH64_OPND_CCMP_IMM:
5819 case AARCH64_OPND_FBITS:
5820 case AARCH64_OPND_UIMM4:
5821 case AARCH64_OPND_UIMM3_OP1:
5822 case AARCH64_OPND_UIMM3_OP2:
5823 case AARCH64_OPND_IMM:
5824 case AARCH64_OPND_IMM_2:
5825 case AARCH64_OPND_WIDTH:
5826 case AARCH64_OPND_UIMM7:
5827 case AARCH64_OPND_NZCV:
5828 case AARCH64_OPND_SVE_PATTERN:
5829 case AARCH64_OPND_SVE_PRFOP:
5830 operand->imm.value = default_value;
5831 break;
5832
5833 case AARCH64_OPND_SVE_PATTERN_SCALED:
5834 operand->imm.value = default_value;
5835 operand->shifter.kind = AARCH64_MOD_MUL;
5836 operand->shifter.amount = 1;
5837 break;
5838
5839 case AARCH64_OPND_EXCEPTION:
5840 inst.reloc.type = BFD_RELOC_UNUSED;
5841 break;
5842
5843 case AARCH64_OPND_BARRIER_ISB:
5844 operand->barrier = aarch64_barrier_options + default_value;
5845 break;
5846
5847 case AARCH64_OPND_BTI_TARGET:
5848 operand->hint_option = aarch64_hint_options + default_value;
5849 break;
5850
5851 default:
5852 break;
5853 }
5854 }
5855
5856 /* Process the relocation type for move wide instructions.
5857 Return TRUE on success; otherwise return FALSE. */
5858
5859 static bool
5860 process_movw_reloc_info (void)
5861 {
5862 int is32;
5863 unsigned shift;
5864
5865 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5866
5867 if (inst.base.opcode->op == OP_MOVK)
5868 switch (inst.reloc.type)
5869 {
5870 case BFD_RELOC_AARCH64_MOVW_G0_S:
5871 case BFD_RELOC_AARCH64_MOVW_G1_S:
5872 case BFD_RELOC_AARCH64_MOVW_G2_S:
5873 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5874 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5875 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5876 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5877 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5878 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5879 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5880 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5881 set_syntax_error
5882 (_("the specified relocation type is not allowed for MOVK"));
5883 return false;
5884 default:
5885 break;
5886 }
5887
5888 switch (inst.reloc.type)
5889 {
5890 case BFD_RELOC_AARCH64_MOVW_G0:
5891 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5892 case BFD_RELOC_AARCH64_MOVW_G0_S:
5893 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5894 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5895 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5896 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5897 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5898 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5899 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5900 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5901 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5902 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5903 shift = 0;
5904 break;
5905 case BFD_RELOC_AARCH64_MOVW_G1:
5906 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5907 case BFD_RELOC_AARCH64_MOVW_G1_S:
5908 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5909 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5910 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5911 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5912 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5913 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5914 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5915 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5916 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5917 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5918 shift = 16;
5919 break;
5920 case BFD_RELOC_AARCH64_MOVW_G2:
5921 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5922 case BFD_RELOC_AARCH64_MOVW_G2_S:
5923 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5924 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5925 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5926 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5927 if (is32)
5928 {
5929 set_fatal_syntax_error
5930 (_("the specified relocation type is not allowed for 32-bit "
5931 "register"));
5932 return false;
5933 }
5934 shift = 32;
5935 break;
5936 case BFD_RELOC_AARCH64_MOVW_G3:
5937 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5938 if (is32)
5939 {
5940 set_fatal_syntax_error
5941 (_("the specified relocation type is not allowed for 32-bit "
5942 "register"));
5943 return false;
5944 }
5945 shift = 48;
5946 break;
5947 default:
5948 /* More cases should be added when more MOVW-related relocation types
5949 are supported in GAS. */
5950 gas_assert (aarch64_gas_internal_fixup_p ());
5951 /* The shift amount should have already been set by the parser. */
5952 return true;
5953 }
5954 inst.base.operands[1].shifter.amount = shift;
5955 return true;
5956 }
5957
5958 /* A primitive log calculator. */
5959
5960 static inline unsigned int
5961 get_logsz (unsigned int size)
5962 {
5963 const unsigned char ls[16] =
5964 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5965 if (size > 16)
5966 {
5967 gas_assert (0);
5968 return -1;
5969 }
5970 gas_assert (ls[size - 1] != (unsigned char)-1);
5971 return ls[size - 1];
5972 }
5973
5974 /* Determine and return the real reloc type code for an instruction
5975 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5976
5977 static inline bfd_reloc_code_real_type
5978 ldst_lo12_determine_real_reloc_type (void)
5979 {
5980 unsigned logsz, max_logsz;
5981 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5982 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5983
5984 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5985 {
5986 BFD_RELOC_AARCH64_LDST8_LO12,
5987 BFD_RELOC_AARCH64_LDST16_LO12,
5988 BFD_RELOC_AARCH64_LDST32_LO12,
5989 BFD_RELOC_AARCH64_LDST64_LO12,
5990 BFD_RELOC_AARCH64_LDST128_LO12
5991 },
5992 {
5993 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5994 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5995 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5996 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5997 BFD_RELOC_AARCH64_NONE
5998 },
5999 {
6000 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6001 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6002 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6003 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6004 BFD_RELOC_AARCH64_NONE
6005 },
6006 {
6007 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6008 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6009 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6010 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6011 BFD_RELOC_AARCH64_NONE
6012 },
6013 {
6014 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6015 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6016 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6017 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6018 BFD_RELOC_AARCH64_NONE
6019 }
6020 };
6021
6022 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6023 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6024 || (inst.reloc.type
6025 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6026 || (inst.reloc.type
6027 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6028 || (inst.reloc.type
6029 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6030 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6031
6032 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6033 opd1_qlf =
6034 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6035 1, opd0_qlf, 0);
6036 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6037
6038 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6039
6040 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6041 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6042 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6043 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6044 max_logsz = 3;
6045 else
6046 max_logsz = 4;
6047
6048 if (logsz > max_logsz)
6049 {
6050 /* SEE PR 27904 for an example of this. */
6051 set_fatal_syntax_error
6052 (_("relocation qualifier does not match instruction size"));
6053 return BFD_RELOC_AARCH64_NONE;
6054 }
6055
6056 /* In reloc.c, these pseudo relocation types should be defined in similar
6057 order as above reloc_ldst_lo12 array. Because the array index calculation
6058 below relies on this. */
6059 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6060 }
6061
6062 /* Check whether a register list REGINFO is valid. The registers must be
6063 numbered in increasing order (modulo 32), in increments of one or two.
6064
6065 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6066 increments of two.
6067
6068 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6069
6070 static bool
6071 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6072 {
6073 uint32_t i, nb_regs, prev_regno, incr;
6074
6075 nb_regs = 1 + (reginfo & 0x3);
6076 reginfo >>= 2;
6077 prev_regno = reginfo & 0x1f;
6078 incr = accept_alternate ? 2 : 1;
6079
6080 for (i = 1; i < nb_regs; ++i)
6081 {
6082 uint32_t curr_regno;
6083 reginfo >>= 5;
6084 curr_regno = reginfo & 0x1f;
6085 if (curr_regno != ((prev_regno + incr) & 0x1f))
6086 return false;
6087 prev_regno = curr_regno;
6088 }
6089
6090 return true;
6091 }
6092
6093 /* Generic instruction operand parser. This does no encoding and no
6094 semantic validation; it merely squirrels values away in the inst
6095 structure. Returns TRUE or FALSE depending on whether the
6096 specified grammar matched. */
6097
6098 static bool
6099 parse_operands (char *str, const aarch64_opcode *opcode)
6100 {
6101 int i;
6102 char *backtrack_pos = 0;
6103 const enum aarch64_opnd *operands = opcode->operands;
6104 aarch64_reg_type imm_reg_type;
6105
6106 clear_error ();
6107 skip_whitespace (str);
6108
6109 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6110 AARCH64_FEATURE_SVE
6111 | AARCH64_FEATURE_SVE2))
6112 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6113 else
6114 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6115
6116 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6117 {
6118 int64_t val;
6119 const reg_entry *reg;
6120 int comma_skipped_p = 0;
6121 struct vector_type_el vectype;
6122 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6123 aarch64_opnd_info *info = &inst.base.operands[i];
6124 aarch64_reg_type reg_type;
6125
6126 DEBUG_TRACE ("parse operand %d", i);
6127
6128 /* Assign the operand code. */
6129 info->type = operands[i];
6130
6131 if (optional_operand_p (opcode, i))
6132 {
6133 /* Remember where we are in case we need to backtrack. */
6134 gas_assert (!backtrack_pos);
6135 backtrack_pos = str;
6136 }
6137
6138 /* Expect comma between operands; the backtrack mechanism will take
6139 care of cases of omitted optional operand. */
6140 if (i > 0 && ! skip_past_char (&str, ','))
6141 {
6142 set_syntax_error (_("comma expected between operands"));
6143 goto failure;
6144 }
6145 else
6146 comma_skipped_p = 1;
6147
6148 switch (operands[i])
6149 {
6150 case AARCH64_OPND_Rd:
6151 case AARCH64_OPND_Rn:
6152 case AARCH64_OPND_Rm:
6153 case AARCH64_OPND_Rt:
6154 case AARCH64_OPND_Rt2:
6155 case AARCH64_OPND_Rs:
6156 case AARCH64_OPND_Ra:
6157 case AARCH64_OPND_Rt_LS64:
6158 case AARCH64_OPND_Rt_SYS:
6159 case AARCH64_OPND_PAIRREG:
6160 case AARCH64_OPND_SVE_Rm:
6161 po_int_reg_or_fail (REG_TYPE_R_Z);
6162
6163 /* In LS64 load/store instructions Rt register number must be even
6164 and <=22. */
6165 if (operands[i] == AARCH64_OPND_Rt_LS64)
6166 {
6167 /* We've already checked if this is valid register.
6168 This will check if register number (Rt) is not undefined for LS64
6169 instructions:
6170 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6171 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6172 {
6173 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6174 goto failure;
6175 }
6176 }
6177 break;
6178
6179 case AARCH64_OPND_Rd_SP:
6180 case AARCH64_OPND_Rn_SP:
6181 case AARCH64_OPND_Rt_SP:
6182 case AARCH64_OPND_SVE_Rn_SP:
6183 case AARCH64_OPND_Rm_SP:
6184 po_int_reg_or_fail (REG_TYPE_R_SP);
6185 break;
6186
6187 case AARCH64_OPND_Rm_EXT:
6188 case AARCH64_OPND_Rm_SFT:
6189 po_misc_or_fail (parse_shifter_operand
6190 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6191 ? SHIFTED_ARITH_IMM
6192 : SHIFTED_LOGIC_IMM)));
6193 if (!info->shifter.operator_present)
6194 {
6195 /* Default to LSL if not present. Libopcodes prefers shifter
6196 kind to be explicit. */
6197 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6198 info->shifter.kind = AARCH64_MOD_LSL;
6199 /* For Rm_EXT, libopcodes will carry out further check on whether
6200 or not stack pointer is used in the instruction (Recall that
6201 "the extend operator is not optional unless at least one of
6202 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6203 }
6204 break;
6205
6206 case AARCH64_OPND_Fd:
6207 case AARCH64_OPND_Fn:
6208 case AARCH64_OPND_Fm:
6209 case AARCH64_OPND_Fa:
6210 case AARCH64_OPND_Ft:
6211 case AARCH64_OPND_Ft2:
6212 case AARCH64_OPND_Sd:
6213 case AARCH64_OPND_Sn:
6214 case AARCH64_OPND_Sm:
6215 case AARCH64_OPND_SVE_VZn:
6216 case AARCH64_OPND_SVE_Vd:
6217 case AARCH64_OPND_SVE_Vm:
6218 case AARCH64_OPND_SVE_Vn:
6219 reg = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, NULL);
6220 if (!reg)
6221 {
6222 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6223 goto failure;
6224 }
6225 gas_assert (reg->type >= REG_TYPE_FP_B
6226 && reg->type <= REG_TYPE_FP_Q);
6227
6228 info->reg.regno = reg->number;
6229 info->qualifier = AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
6230 break;
6231
6232 case AARCH64_OPND_SVE_Pd:
6233 case AARCH64_OPND_SVE_Pg3:
6234 case AARCH64_OPND_SVE_Pg4_5:
6235 case AARCH64_OPND_SVE_Pg4_10:
6236 case AARCH64_OPND_SVE_Pg4_16:
6237 case AARCH64_OPND_SVE_Pm:
6238 case AARCH64_OPND_SVE_Pn:
6239 case AARCH64_OPND_SVE_Pt:
6240 case AARCH64_OPND_SME_Pm:
6241 reg_type = REG_TYPE_PN;
6242 goto vector_reg;
6243
6244 case AARCH64_OPND_SVE_Za_5:
6245 case AARCH64_OPND_SVE_Za_16:
6246 case AARCH64_OPND_SVE_Zd:
6247 case AARCH64_OPND_SVE_Zm_5:
6248 case AARCH64_OPND_SVE_Zm_16:
6249 case AARCH64_OPND_SVE_Zn:
6250 case AARCH64_OPND_SVE_Zt:
6251 reg_type = REG_TYPE_ZN;
6252 goto vector_reg;
6253
6254 case AARCH64_OPND_Va:
6255 case AARCH64_OPND_Vd:
6256 case AARCH64_OPND_Vn:
6257 case AARCH64_OPND_Vm:
6258 reg_type = REG_TYPE_VN;
6259 vector_reg:
6260 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6261 if (!reg)
6262 {
6263 first_error (_(get_reg_expected_msg (reg_type)));
6264 goto failure;
6265 }
6266 if (vectype.defined & NTA_HASINDEX)
6267 goto failure;
6268
6269 info->reg.regno = reg->number;
6270 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6271 && vectype.type == NT_invtype)
6272 /* Unqualified Pn and Zn registers are allowed in certain
6273 contexts. Rely on F_STRICT qualifier checking to catch
6274 invalid uses. */
6275 info->qualifier = AARCH64_OPND_QLF_NIL;
6276 else
6277 {
6278 info->qualifier = vectype_to_qualifier (&vectype);
6279 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6280 goto failure;
6281 }
6282 break;
6283
6284 case AARCH64_OPND_VdD1:
6285 case AARCH64_OPND_VnD1:
6286 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6287 if (!reg)
6288 {
6289 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6290 goto failure;
6291 }
6292 if (vectype.type != NT_d || vectype.index != 1)
6293 {
6294 set_fatal_syntax_error
6295 (_("the top half of a 128-bit FP/SIMD register is expected"));
6296 goto failure;
6297 }
6298 info->reg.regno = reg->number;
6299 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6300 here; it is correct for the purpose of encoding/decoding since
6301 only the register number is explicitly encoded in the related
6302 instructions, although this appears a bit hacky. */
6303 info->qualifier = AARCH64_OPND_QLF_S_D;
6304 break;
6305
6306 case AARCH64_OPND_SVE_Zm3_INDEX:
6307 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6308 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6309 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6310 case AARCH64_OPND_SVE_Zm4_INDEX:
6311 case AARCH64_OPND_SVE_Zn_INDEX:
6312 reg_type = REG_TYPE_ZN;
6313 goto vector_reg_index;
6314
6315 case AARCH64_OPND_Ed:
6316 case AARCH64_OPND_En:
6317 case AARCH64_OPND_Em:
6318 case AARCH64_OPND_Em16:
6319 case AARCH64_OPND_SM3_IMM2:
6320 reg_type = REG_TYPE_VN;
6321 vector_reg_index:
6322 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6323 if (!reg)
6324 {
6325 first_error (_(get_reg_expected_msg (reg_type)));
6326 goto failure;
6327 }
6328 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6329 goto failure;
6330
6331 info->reglane.regno = reg->number;
6332 info->reglane.index = vectype.index;
6333 info->qualifier = vectype_to_qualifier (&vectype);
6334 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6335 goto failure;
6336 break;
6337
6338 case AARCH64_OPND_SVE_ZnxN:
6339 case AARCH64_OPND_SVE_ZtxN:
6340 reg_type = REG_TYPE_ZN;
6341 goto vector_reg_list;
6342
6343 case AARCH64_OPND_LVn:
6344 case AARCH64_OPND_LVt:
6345 case AARCH64_OPND_LVt_AL:
6346 case AARCH64_OPND_LEt:
6347 reg_type = REG_TYPE_VN;
6348 vector_reg_list:
6349 if (reg_type == REG_TYPE_ZN
6350 && get_opcode_dependent_value (opcode) == 1
6351 && *str != '{')
6352 {
6353 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6354 if (!reg)
6355 {
6356 first_error (_(get_reg_expected_msg (reg_type)));
6357 goto failure;
6358 }
6359 info->reglist.first_regno = reg->number;
6360 info->reglist.num_regs = 1;
6361 }
6362 else
6363 {
6364 val = parse_vector_reg_list (&str, reg_type, &vectype);
6365 if (val == PARSE_FAIL)
6366 goto failure;
6367
6368 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6369 {
6370 set_fatal_syntax_error (_("invalid register list"));
6371 goto failure;
6372 }
6373
6374 if (vectype.width != 0 && *str != ',')
6375 {
6376 set_fatal_syntax_error
6377 (_("expected element type rather than vector type"));
6378 goto failure;
6379 }
6380
6381 info->reglist.first_regno = (val >> 2) & 0x1f;
6382 info->reglist.num_regs = (val & 0x3) + 1;
6383 }
6384 if (operands[i] == AARCH64_OPND_LEt)
6385 {
6386 if (!(vectype.defined & NTA_HASINDEX))
6387 goto failure;
6388 info->reglist.has_index = 1;
6389 info->reglist.index = vectype.index;
6390 }
6391 else
6392 {
6393 if (vectype.defined & NTA_HASINDEX)
6394 goto failure;
6395 if (!(vectype.defined & NTA_HASTYPE))
6396 {
6397 if (reg_type == REG_TYPE_ZN)
6398 set_fatal_syntax_error (_("missing type suffix"));
6399 goto failure;
6400 }
6401 }
6402 info->qualifier = vectype_to_qualifier (&vectype);
6403 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6404 goto failure;
6405 break;
6406
6407 case AARCH64_OPND_CRn:
6408 case AARCH64_OPND_CRm:
6409 {
6410 char prefix = *(str++);
6411 if (prefix != 'c' && prefix != 'C')
6412 goto failure;
6413
6414 po_imm_nc_or_fail ();
6415 if (val > 15)
6416 {
6417 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6418 goto failure;
6419 }
6420 info->qualifier = AARCH64_OPND_QLF_CR;
6421 info->imm.value = val;
6422 break;
6423 }
6424
6425 case AARCH64_OPND_SHLL_IMM:
6426 case AARCH64_OPND_IMM_VLSR:
6427 po_imm_or_fail (1, 64);
6428 info->imm.value = val;
6429 break;
6430
6431 case AARCH64_OPND_CCMP_IMM:
6432 case AARCH64_OPND_SIMM5:
6433 case AARCH64_OPND_FBITS:
6434 case AARCH64_OPND_TME_UIMM16:
6435 case AARCH64_OPND_UIMM4:
6436 case AARCH64_OPND_UIMM4_ADDG:
6437 case AARCH64_OPND_UIMM10:
6438 case AARCH64_OPND_UIMM3_OP1:
6439 case AARCH64_OPND_UIMM3_OP2:
6440 case AARCH64_OPND_IMM_VLSL:
6441 case AARCH64_OPND_IMM:
6442 case AARCH64_OPND_IMM_2:
6443 case AARCH64_OPND_WIDTH:
6444 case AARCH64_OPND_SVE_INV_LIMM:
6445 case AARCH64_OPND_SVE_LIMM:
6446 case AARCH64_OPND_SVE_LIMM_MOV:
6447 case AARCH64_OPND_SVE_SHLIMM_PRED:
6448 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6449 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6450 case AARCH64_OPND_SVE_SHRIMM_PRED:
6451 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6452 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6453 case AARCH64_OPND_SVE_SIMM5:
6454 case AARCH64_OPND_SVE_SIMM5B:
6455 case AARCH64_OPND_SVE_SIMM6:
6456 case AARCH64_OPND_SVE_SIMM8:
6457 case AARCH64_OPND_SVE_UIMM3:
6458 case AARCH64_OPND_SVE_UIMM7:
6459 case AARCH64_OPND_SVE_UIMM8:
6460 case AARCH64_OPND_SVE_UIMM8_53:
6461 case AARCH64_OPND_IMM_ROT1:
6462 case AARCH64_OPND_IMM_ROT2:
6463 case AARCH64_OPND_IMM_ROT3:
6464 case AARCH64_OPND_SVE_IMM_ROT1:
6465 case AARCH64_OPND_SVE_IMM_ROT2:
6466 case AARCH64_OPND_SVE_IMM_ROT3:
6467 case AARCH64_OPND_CSSC_SIMM8:
6468 case AARCH64_OPND_CSSC_UIMM8:
6469 po_imm_nc_or_fail ();
6470 info->imm.value = val;
6471 break;
6472
6473 case AARCH64_OPND_SVE_AIMM:
6474 case AARCH64_OPND_SVE_ASIMM:
6475 po_imm_nc_or_fail ();
6476 info->imm.value = val;
6477 skip_whitespace (str);
6478 if (skip_past_comma (&str))
6479 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6480 else
6481 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6482 break;
6483
6484 case AARCH64_OPND_SVE_PATTERN:
6485 po_enum_or_fail (aarch64_sve_pattern_array);
6486 info->imm.value = val;
6487 break;
6488
6489 case AARCH64_OPND_SVE_PATTERN_SCALED:
6490 po_enum_or_fail (aarch64_sve_pattern_array);
6491 info->imm.value = val;
6492 if (skip_past_comma (&str)
6493 && !parse_shift (&str, info, SHIFTED_MUL))
6494 goto failure;
6495 if (!info->shifter.operator_present)
6496 {
6497 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6498 info->shifter.kind = AARCH64_MOD_MUL;
6499 info->shifter.amount = 1;
6500 }
6501 break;
6502
6503 case AARCH64_OPND_SVE_PRFOP:
6504 po_enum_or_fail (aarch64_sve_prfop_array);
6505 info->imm.value = val;
6506 break;
6507
6508 case AARCH64_OPND_UIMM7:
6509 po_imm_or_fail (0, 127);
6510 info->imm.value = val;
6511 break;
6512
6513 case AARCH64_OPND_IDX:
6514 case AARCH64_OPND_MASK:
6515 case AARCH64_OPND_BIT_NUM:
6516 case AARCH64_OPND_IMMR:
6517 case AARCH64_OPND_IMMS:
6518 po_imm_or_fail (0, 63);
6519 info->imm.value = val;
6520 break;
6521
6522 case AARCH64_OPND_IMM0:
6523 po_imm_nc_or_fail ();
6524 if (val != 0)
6525 {
6526 set_fatal_syntax_error (_("immediate zero expected"));
6527 goto failure;
6528 }
6529 info->imm.value = 0;
6530 break;
6531
6532 case AARCH64_OPND_FPIMM0:
6533 {
6534 int qfloat;
6535 bool res1 = false, res2 = false;
6536 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6537 it is probably not worth the effort to support it. */
6538 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6539 imm_reg_type))
6540 && (error_p ()
6541 || !(res2 = parse_constant_immediate (&str, &val,
6542 imm_reg_type))))
6543 goto failure;
6544 if ((res1 && qfloat == 0) || (res2 && val == 0))
6545 {
6546 info->imm.value = 0;
6547 info->imm.is_fp = 1;
6548 break;
6549 }
6550 set_fatal_syntax_error (_("immediate zero expected"));
6551 goto failure;
6552 }
6553
6554 case AARCH64_OPND_IMM_MOV:
6555 {
6556 char *saved = str;
6557 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6558 reg_name_p (str, REG_TYPE_VN))
6559 goto failure;
6560 str = saved;
6561 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6562 GE_OPT_PREFIX, REJECT_ABSENT));
6563 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6564 later. fix_mov_imm_insn will try to determine a machine
6565 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6566 message if the immediate cannot be moved by a single
6567 instruction. */
6568 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6569 inst.base.operands[i].skip = 1;
6570 }
6571 break;
6572
6573 case AARCH64_OPND_SIMD_IMM:
6574 case AARCH64_OPND_SIMD_IMM_SFT:
6575 if (! parse_big_immediate (&str, &val, imm_reg_type))
6576 goto failure;
6577 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6578 /* addr_off_p */ 0,
6579 /* need_libopcodes_p */ 1,
6580 /* skip_p */ 1);
6581 /* Parse shift.
6582 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6583 shift, we don't check it here; we leave the checking to
6584 the libopcodes (operand_general_constraint_met_p). By
6585 doing this, we achieve better diagnostics. */
6586 if (skip_past_comma (&str)
6587 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6588 goto failure;
6589 if (!info->shifter.operator_present
6590 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6591 {
6592 /* Default to LSL if not present. Libopcodes prefers shifter
6593 kind to be explicit. */
6594 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6595 info->shifter.kind = AARCH64_MOD_LSL;
6596 }
6597 break;
6598
6599 case AARCH64_OPND_FPIMM:
6600 case AARCH64_OPND_SIMD_FPIMM:
6601 case AARCH64_OPND_SVE_FPIMM8:
6602 {
6603 int qfloat;
6604 bool dp_p;
6605
6606 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6607 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6608 || !aarch64_imm_float_p (qfloat))
6609 {
6610 if (!error_p ())
6611 set_fatal_syntax_error (_("invalid floating-point"
6612 " constant"));
6613 goto failure;
6614 }
6615 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6616 inst.base.operands[i].imm.is_fp = 1;
6617 }
6618 break;
6619
6620 case AARCH64_OPND_SVE_I1_HALF_ONE:
6621 case AARCH64_OPND_SVE_I1_HALF_TWO:
6622 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6623 {
6624 int qfloat;
6625 bool dp_p;
6626
6627 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6628 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6629 {
6630 if (!error_p ())
6631 set_fatal_syntax_error (_("invalid floating-point"
6632 " constant"));
6633 goto failure;
6634 }
6635 inst.base.operands[i].imm.value = qfloat;
6636 inst.base.operands[i].imm.is_fp = 1;
6637 }
6638 break;
6639
6640 case AARCH64_OPND_LIMM:
6641 po_misc_or_fail (parse_shifter_operand (&str, info,
6642 SHIFTED_LOGIC_IMM));
6643 if (info->shifter.operator_present)
6644 {
6645 set_fatal_syntax_error
6646 (_("shift not allowed for bitmask immediate"));
6647 goto failure;
6648 }
6649 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6650 /* addr_off_p */ 0,
6651 /* need_libopcodes_p */ 1,
6652 /* skip_p */ 1);
6653 break;
6654
6655 case AARCH64_OPND_AIMM:
6656 if (opcode->op == OP_ADD)
6657 /* ADD may have relocation types. */
6658 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6659 SHIFTED_ARITH_IMM));
6660 else
6661 po_misc_or_fail (parse_shifter_operand (&str, info,
6662 SHIFTED_ARITH_IMM));
6663 switch (inst.reloc.type)
6664 {
6665 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6666 info->shifter.amount = 12;
6667 break;
6668 case BFD_RELOC_UNUSED:
6669 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6670 if (info->shifter.kind != AARCH64_MOD_NONE)
6671 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6672 inst.reloc.pc_rel = 0;
6673 break;
6674 default:
6675 break;
6676 }
6677 info->imm.value = 0;
6678 if (!info->shifter.operator_present)
6679 {
6680 /* Default to LSL if not present. Libopcodes prefers shifter
6681 kind to be explicit. */
6682 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6683 info->shifter.kind = AARCH64_MOD_LSL;
6684 }
6685 break;
6686
6687 case AARCH64_OPND_HALF:
6688 {
6689 /* #<imm16> or relocation. */
6690 int internal_fixup_p;
6691 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6692 if (internal_fixup_p)
6693 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6694 skip_whitespace (str);
6695 if (skip_past_comma (&str))
6696 {
6697 /* {, LSL #<shift>} */
6698 if (! aarch64_gas_internal_fixup_p ())
6699 {
6700 set_fatal_syntax_error (_("can't mix relocation modifier "
6701 "with explicit shift"));
6702 goto failure;
6703 }
6704 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6705 }
6706 else
6707 inst.base.operands[i].shifter.amount = 0;
6708 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6709 inst.base.operands[i].imm.value = 0;
6710 if (! process_movw_reloc_info ())
6711 goto failure;
6712 }
6713 break;
6714
6715 case AARCH64_OPND_EXCEPTION:
6716 case AARCH64_OPND_UNDEFINED:
6717 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6718 imm_reg_type));
6719 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6720 /* addr_off_p */ 0,
6721 /* need_libopcodes_p */ 0,
6722 /* skip_p */ 1);
6723 break;
6724
6725 case AARCH64_OPND_NZCV:
6726 {
6727 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6728 if (nzcv != NULL)
6729 {
6730 str += 4;
6731 info->imm.value = nzcv->value;
6732 break;
6733 }
6734 po_imm_or_fail (0, 15);
6735 info->imm.value = val;
6736 }
6737 break;
6738
6739 case AARCH64_OPND_COND:
6740 case AARCH64_OPND_COND1:
6741 {
6742 char *start = str;
6743 do
6744 str++;
6745 while (ISALPHA (*str));
6746 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6747 if (info->cond == NULL)
6748 {
6749 set_syntax_error (_("invalid condition"));
6750 goto failure;
6751 }
6752 else if (operands[i] == AARCH64_OPND_COND1
6753 && (info->cond->value & 0xe) == 0xe)
6754 {
6755 /* Do not allow AL or NV. */
6756 set_default_error ();
6757 goto failure;
6758 }
6759 }
6760 break;
6761
6762 case AARCH64_OPND_ADDR_ADRP:
6763 po_misc_or_fail (parse_adrp (&str));
6764 /* Clear the value as operand needs to be relocated. */
6765 info->imm.value = 0;
6766 break;
6767
6768 case AARCH64_OPND_ADDR_PCREL14:
6769 case AARCH64_OPND_ADDR_PCREL19:
6770 case AARCH64_OPND_ADDR_PCREL21:
6771 case AARCH64_OPND_ADDR_PCREL26:
6772 po_misc_or_fail (parse_address (&str, info));
6773 if (!info->addr.pcrel)
6774 {
6775 set_syntax_error (_("invalid pc-relative address"));
6776 goto failure;
6777 }
6778 if (inst.gen_lit_pool
6779 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6780 {
6781 /* Only permit "=value" in the literal load instructions.
6782 The literal will be generated by programmer_friendly_fixup. */
6783 set_syntax_error (_("invalid use of \"=immediate\""));
6784 goto failure;
6785 }
6786 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6787 {
6788 set_syntax_error (_("unrecognized relocation suffix"));
6789 goto failure;
6790 }
6791 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6792 {
6793 info->imm.value = inst.reloc.exp.X_add_number;
6794 inst.reloc.type = BFD_RELOC_UNUSED;
6795 }
6796 else
6797 {
6798 info->imm.value = 0;
6799 if (inst.reloc.type == BFD_RELOC_UNUSED)
6800 switch (opcode->iclass)
6801 {
6802 case compbranch:
6803 case condbranch:
6804 /* e.g. CBZ or B.COND */
6805 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6806 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6807 break;
6808 case testbranch:
6809 /* e.g. TBZ */
6810 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6811 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6812 break;
6813 case branch_imm:
6814 /* e.g. B or BL */
6815 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6816 inst.reloc.type =
6817 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6818 : BFD_RELOC_AARCH64_JUMP26;
6819 break;
6820 case loadlit:
6821 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6822 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6823 break;
6824 case pcreladdr:
6825 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6826 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6827 break;
6828 default:
6829 gas_assert (0);
6830 abort ();
6831 }
6832 inst.reloc.pc_rel = 1;
6833 }
6834 break;
6835
6836 case AARCH64_OPND_ADDR_SIMPLE:
6837 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6838 {
6839 /* [<Xn|SP>{, #<simm>}] */
6840 char *start = str;
6841 /* First use the normal address-parsing routines, to get
6842 the usual syntax errors. */
6843 po_misc_or_fail (parse_address (&str, info));
6844 if (info->addr.pcrel || info->addr.offset.is_reg
6845 || !info->addr.preind || info->addr.postind
6846 || info->addr.writeback)
6847 {
6848 set_syntax_error (_("invalid addressing mode"));
6849 goto failure;
6850 }
6851
6852 /* Then retry, matching the specific syntax of these addresses. */
6853 str = start;
6854 po_char_or_fail ('[');
6855 po_reg_or_fail (REG_TYPE_R64_SP);
6856 /* Accept optional ", #0". */
6857 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6858 && skip_past_char (&str, ','))
6859 {
6860 skip_past_char (&str, '#');
6861 if (! skip_past_char (&str, '0'))
6862 {
6863 set_fatal_syntax_error
6864 (_("the optional immediate offset can only be 0"));
6865 goto failure;
6866 }
6867 }
6868 po_char_or_fail (']');
6869 break;
6870 }
6871
6872 case AARCH64_OPND_ADDR_REGOFF:
6873 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6874 po_misc_or_fail (parse_address (&str, info));
6875 regoff_addr:
6876 if (info->addr.pcrel || !info->addr.offset.is_reg
6877 || !info->addr.preind || info->addr.postind
6878 || info->addr.writeback)
6879 {
6880 set_syntax_error (_("invalid addressing mode"));
6881 goto failure;
6882 }
6883 if (!info->shifter.operator_present)
6884 {
6885 /* Default to LSL if not present. Libopcodes prefers shifter
6886 kind to be explicit. */
6887 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6888 info->shifter.kind = AARCH64_MOD_LSL;
6889 }
6890 /* Qualifier to be deduced by libopcodes. */
6891 break;
6892
6893 case AARCH64_OPND_ADDR_SIMM7:
6894 po_misc_or_fail (parse_address (&str, info));
6895 if (info->addr.pcrel || info->addr.offset.is_reg
6896 || (!info->addr.preind && !info->addr.postind))
6897 {
6898 set_syntax_error (_("invalid addressing mode"));
6899 goto failure;
6900 }
6901 if (inst.reloc.type != BFD_RELOC_UNUSED)
6902 {
6903 set_syntax_error (_("relocation not allowed"));
6904 goto failure;
6905 }
6906 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6907 /* addr_off_p */ 1,
6908 /* need_libopcodes_p */ 1,
6909 /* skip_p */ 0);
6910 break;
6911
6912 case AARCH64_OPND_ADDR_SIMM9:
6913 case AARCH64_OPND_ADDR_SIMM9_2:
6914 case AARCH64_OPND_ADDR_SIMM11:
6915 case AARCH64_OPND_ADDR_SIMM13:
6916 po_misc_or_fail (parse_address (&str, info));
6917 if (info->addr.pcrel || info->addr.offset.is_reg
6918 || (!info->addr.preind && !info->addr.postind)
6919 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6920 && info->addr.writeback))
6921 {
6922 set_syntax_error (_("invalid addressing mode"));
6923 goto failure;
6924 }
6925 if (inst.reloc.type != BFD_RELOC_UNUSED)
6926 {
6927 set_syntax_error (_("relocation not allowed"));
6928 goto failure;
6929 }
6930 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6931 /* addr_off_p */ 1,
6932 /* need_libopcodes_p */ 1,
6933 /* skip_p */ 0);
6934 break;
6935
6936 case AARCH64_OPND_ADDR_SIMM10:
6937 case AARCH64_OPND_ADDR_OFFSET:
6938 po_misc_or_fail (parse_address (&str, info));
6939 if (info->addr.pcrel || info->addr.offset.is_reg
6940 || !info->addr.preind || info->addr.postind)
6941 {
6942 set_syntax_error (_("invalid addressing mode"));
6943 goto failure;
6944 }
6945 if (inst.reloc.type != BFD_RELOC_UNUSED)
6946 {
6947 set_syntax_error (_("relocation not allowed"));
6948 goto failure;
6949 }
6950 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6951 /* addr_off_p */ 1,
6952 /* need_libopcodes_p */ 1,
6953 /* skip_p */ 0);
6954 break;
6955
6956 case AARCH64_OPND_ADDR_UIMM12:
6957 po_misc_or_fail (parse_address (&str, info));
6958 if (info->addr.pcrel || info->addr.offset.is_reg
6959 || !info->addr.preind || info->addr.writeback)
6960 {
6961 set_syntax_error (_("invalid addressing mode"));
6962 goto failure;
6963 }
6964 if (inst.reloc.type == BFD_RELOC_UNUSED)
6965 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6966 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6967 || (inst.reloc.type
6968 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6969 || (inst.reloc.type
6970 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6971 || (inst.reloc.type
6972 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6973 || (inst.reloc.type
6974 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6975 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6976 /* Leave qualifier to be determined by libopcodes. */
6977 break;
6978
6979 case AARCH64_OPND_SIMD_ADDR_POST:
6980 /* [<Xn|SP>], <Xm|#<amount>> */
6981 po_misc_or_fail (parse_address (&str, info));
6982 if (!info->addr.postind || !info->addr.writeback)
6983 {
6984 set_syntax_error (_("invalid addressing mode"));
6985 goto failure;
6986 }
6987 if (!info->addr.offset.is_reg)
6988 {
6989 if (inst.reloc.exp.X_op == O_constant)
6990 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6991 else
6992 {
6993 set_fatal_syntax_error
6994 (_("writeback value must be an immediate constant"));
6995 goto failure;
6996 }
6997 }
6998 /* No qualifier. */
6999 break;
7000
7001 case AARCH64_OPND_SME_SM_ZA:
7002 /* { SM | ZA } */
7003 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7004 {
7005 set_syntax_error (_("unknown or missing PSTATE field name"));
7006 goto failure;
7007 }
7008 info->reg.regno = val;
7009 break;
7010
7011 case AARCH64_OPND_SME_PnT_Wm_imm:
7012 if (!parse_dual_indexed_reg (&str, REG_TYPE_PN,
7013 &info->indexed_za, &qualifier))
7014 goto failure;
7015 info->qualifier = qualifier;
7016 break;
7017
7018 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7019 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7020 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7021 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7022 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7023 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7024 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7025 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7026 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7027 case AARCH64_OPND_SVE_ADDR_RI_U6:
7028 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7029 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7030 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7031 /* [X<n>{, #imm, MUL VL}]
7032 [X<n>{, #imm}]
7033 but recognizing SVE registers. */
7034 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7035 &offset_qualifier));
7036 if (base_qualifier != AARCH64_OPND_QLF_X)
7037 {
7038 set_syntax_error (_("invalid addressing mode"));
7039 goto failure;
7040 }
7041 sve_regimm:
7042 if (info->addr.pcrel || info->addr.offset.is_reg
7043 || !info->addr.preind || info->addr.writeback)
7044 {
7045 set_syntax_error (_("invalid addressing mode"));
7046 goto failure;
7047 }
7048 if (inst.reloc.type != BFD_RELOC_UNUSED
7049 || inst.reloc.exp.X_op != O_constant)
7050 {
7051 /* Make sure this has priority over
7052 "invalid addressing mode". */
7053 set_fatal_syntax_error (_("constant offset required"));
7054 goto failure;
7055 }
7056 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7057 break;
7058
7059 case AARCH64_OPND_SVE_ADDR_R:
7060 /* [<Xn|SP>{, <R><m>}]
7061 but recognizing SVE registers. */
7062 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7063 &offset_qualifier));
7064 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7065 {
7066 offset_qualifier = AARCH64_OPND_QLF_X;
7067 info->addr.offset.is_reg = 1;
7068 info->addr.offset.regno = 31;
7069 }
7070 else if (base_qualifier != AARCH64_OPND_QLF_X
7071 || offset_qualifier != AARCH64_OPND_QLF_X)
7072 {
7073 set_syntax_error (_("invalid addressing mode"));
7074 goto failure;
7075 }
7076 goto regoff_addr;
7077
7078 case AARCH64_OPND_SVE_ADDR_RR:
7079 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7080 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7081 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7082 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7083 case AARCH64_OPND_SVE_ADDR_RX:
7084 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7085 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7086 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7087 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7088 but recognizing SVE registers. */
7089 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7090 &offset_qualifier));
7091 if (base_qualifier != AARCH64_OPND_QLF_X
7092 || offset_qualifier != AARCH64_OPND_QLF_X)
7093 {
7094 set_syntax_error (_("invalid addressing mode"));
7095 goto failure;
7096 }
7097 goto regoff_addr;
7098
7099 case AARCH64_OPND_SVE_ADDR_RZ:
7100 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7101 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7102 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7103 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7104 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7105 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7106 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7107 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7108 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7109 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7110 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7111 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7112 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7113 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7114 &offset_qualifier));
7115 if (base_qualifier != AARCH64_OPND_QLF_X
7116 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7117 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7118 {
7119 set_syntax_error (_("invalid addressing mode"));
7120 goto failure;
7121 }
7122 info->qualifier = offset_qualifier;
7123 goto regoff_addr;
7124
7125 case AARCH64_OPND_SVE_ADDR_ZX:
7126 /* [Zn.<T>{, <Xm>}]. */
7127 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7128 &offset_qualifier));
7129 /* Things to check:
7130 base_qualifier either S_S or S_D
7131 offset_qualifier must be X
7132 */
7133 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7134 && base_qualifier != AARCH64_OPND_QLF_S_D)
7135 || offset_qualifier != AARCH64_OPND_QLF_X)
7136 {
7137 set_syntax_error (_("invalid addressing mode"));
7138 goto failure;
7139 }
7140 info->qualifier = base_qualifier;
7141 if (!info->addr.offset.is_reg || info->addr.pcrel
7142 || !info->addr.preind || info->addr.writeback
7143 || info->shifter.operator_present != 0)
7144 {
7145 set_syntax_error (_("invalid addressing mode"));
7146 goto failure;
7147 }
7148 info->shifter.kind = AARCH64_MOD_LSL;
7149 break;
7150
7151
7152 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7153 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7154 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7155 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7156 /* [Z<n>.<T>{, #imm}] */
7157 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7158 &offset_qualifier));
7159 if (base_qualifier != AARCH64_OPND_QLF_S_S
7160 && base_qualifier != AARCH64_OPND_QLF_S_D)
7161 {
7162 set_syntax_error (_("invalid addressing mode"));
7163 goto failure;
7164 }
7165 info->qualifier = base_qualifier;
7166 goto sve_regimm;
7167
7168 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7169 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7170 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7171 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7172 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7173
7174 We don't reject:
7175
7176 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7177
7178 here since we get better error messages by leaving it to
7179 the qualifier checking routines. */
7180 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7181 &offset_qualifier));
7182 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7183 && base_qualifier != AARCH64_OPND_QLF_S_D)
7184 || offset_qualifier != base_qualifier)
7185 {
7186 set_syntax_error (_("invalid addressing mode"));
7187 goto failure;
7188 }
7189 info->qualifier = base_qualifier;
7190 goto regoff_addr;
7191
7192 case AARCH64_OPND_SYSREG:
7193 {
7194 uint32_t sysreg_flags;
7195 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7196 &sysreg_flags)) == PARSE_FAIL)
7197 {
7198 set_syntax_error (_("unknown or missing system register name"));
7199 goto failure;
7200 }
7201 inst.base.operands[i].sysreg.value = val;
7202 inst.base.operands[i].sysreg.flags = sysreg_flags;
7203 break;
7204 }
7205
7206 case AARCH64_OPND_PSTATEFIELD:
7207 {
7208 uint32_t sysreg_flags;
7209 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7210 &sysreg_flags)) == PARSE_FAIL)
7211 {
7212 set_syntax_error (_("unknown or missing PSTATE field name"));
7213 goto failure;
7214 }
7215 inst.base.operands[i].pstatefield = val;
7216 inst.base.operands[i].sysreg.flags = sysreg_flags;
7217 break;
7218 }
7219
7220 case AARCH64_OPND_SYSREG_IC:
7221 inst.base.operands[i].sysins_op =
7222 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7223 goto sys_reg_ins;
7224
7225 case AARCH64_OPND_SYSREG_DC:
7226 inst.base.operands[i].sysins_op =
7227 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7228 goto sys_reg_ins;
7229
7230 case AARCH64_OPND_SYSREG_AT:
7231 inst.base.operands[i].sysins_op =
7232 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7233 goto sys_reg_ins;
7234
7235 case AARCH64_OPND_SYSREG_SR:
7236 inst.base.operands[i].sysins_op =
7237 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7238 goto sys_reg_ins;
7239
7240 case AARCH64_OPND_SYSREG_TLBI:
7241 inst.base.operands[i].sysins_op =
7242 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7243 sys_reg_ins:
7244 if (inst.base.operands[i].sysins_op == NULL)
7245 {
7246 set_fatal_syntax_error ( _("unknown or missing operation name"));
7247 goto failure;
7248 }
7249 break;
7250
7251 case AARCH64_OPND_BARRIER:
7252 case AARCH64_OPND_BARRIER_ISB:
7253 val = parse_barrier (&str);
7254 if (val != PARSE_FAIL
7255 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7256 {
7257 /* ISB only accepts options name 'sy'. */
7258 set_syntax_error
7259 (_("the specified option is not accepted in ISB"));
7260 /* Turn off backtrack as this optional operand is present. */
7261 backtrack_pos = 0;
7262 goto failure;
7263 }
7264 if (val != PARSE_FAIL
7265 && operands[i] == AARCH64_OPND_BARRIER)
7266 {
7267 /* Regular barriers accept options CRm (C0-C15).
7268 DSB nXS barrier variant accepts values > 15. */
7269 if (val < 0 || val > 15)
7270 {
7271 set_syntax_error (_("the specified option is not accepted in DSB"));
7272 goto failure;
7273 }
7274 }
7275 /* This is an extension to accept a 0..15 immediate. */
7276 if (val == PARSE_FAIL)
7277 po_imm_or_fail (0, 15);
7278 info->barrier = aarch64_barrier_options + val;
7279 break;
7280
7281 case AARCH64_OPND_BARRIER_DSB_NXS:
7282 val = parse_barrier (&str);
7283 if (val != PARSE_FAIL)
7284 {
7285 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7286 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7287 {
7288 set_syntax_error (_("the specified option is not accepted in DSB"));
7289 /* Turn off backtrack as this optional operand is present. */
7290 backtrack_pos = 0;
7291 goto failure;
7292 }
7293 }
7294 else
7295 {
7296 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7297 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7298 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7299 goto failure;
7300 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7301 {
7302 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7303 goto failure;
7304 }
7305 }
7306 /* Option index is encoded as 2-bit value in val<3:2>. */
7307 val = (val >> 2) - 4;
7308 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7309 break;
7310
7311 case AARCH64_OPND_PRFOP:
7312 val = parse_pldop (&str);
7313 /* This is an extension to accept a 0..31 immediate. */
7314 if (val == PARSE_FAIL)
7315 po_imm_or_fail (0, 31);
7316 inst.base.operands[i].prfop = aarch64_prfops + val;
7317 break;
7318
7319 case AARCH64_OPND_BARRIER_PSB:
7320 val = parse_barrier_psb (&str, &(info->hint_option));
7321 if (val == PARSE_FAIL)
7322 goto failure;
7323 break;
7324
7325 case AARCH64_OPND_BTI_TARGET:
7326 val = parse_bti_operand (&str, &(info->hint_option));
7327 if (val == PARSE_FAIL)
7328 goto failure;
7329 break;
7330
7331 case AARCH64_OPND_SME_ZAda_2b:
7332 case AARCH64_OPND_SME_ZAda_3b:
7333 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier);
7334 if (!reg)
7335 goto failure;
7336 info->reg.regno = reg->number;
7337 info->qualifier = qualifier;
7338 break;
7339
7340 case AARCH64_OPND_SME_ZA_HV_idx_src:
7341 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7342 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7343 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7344 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7345 &info->indexed_za,
7346 &qualifier)
7347 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7348 &info->indexed_za, &qualifier))
7349 goto failure;
7350 info->qualifier = qualifier;
7351 break;
7352
7353 case AARCH64_OPND_SME_list_of_64bit_tiles:
7354 val = parse_sme_list_of_64bit_tiles (&str);
7355 if (val == PARSE_FAIL)
7356 goto failure;
7357 info->imm.value = val;
7358 break;
7359
7360 case AARCH64_OPND_SME_ZA_array:
7361 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7362 &info->indexed_za, &qualifier))
7363 goto failure;
7364 info->qualifier = qualifier;
7365 break;
7366
7367 case AARCH64_OPND_MOPS_ADDR_Rd:
7368 case AARCH64_OPND_MOPS_ADDR_Rs:
7369 po_char_or_fail ('[');
7370 if (!parse_x0_to_x30 (&str, info))
7371 goto failure;
7372 po_char_or_fail (']');
7373 po_char_or_fail ('!');
7374 break;
7375
7376 case AARCH64_OPND_MOPS_WB_Rn:
7377 if (!parse_x0_to_x30 (&str, info))
7378 goto failure;
7379 po_char_or_fail ('!');
7380 break;
7381
7382 default:
7383 as_fatal (_("unhandled operand code %d"), operands[i]);
7384 }
7385
7386 /* If we get here, this operand was successfully parsed. */
7387 inst.base.operands[i].present = 1;
7388 continue;
7389
7390 failure:
7391 /* The parse routine should already have set the error, but in case
7392 not, set a default one here. */
7393 if (! error_p ())
7394 set_default_error ();
7395
7396 if (! backtrack_pos)
7397 goto parse_operands_return;
7398
7399 {
7400 /* We reach here because this operand is marked as optional, and
7401 either no operand was supplied or the operand was supplied but it
7402 was syntactically incorrect. In the latter case we report an
7403 error. In the former case we perform a few more checks before
7404 dropping through to the code to insert the default operand. */
7405
7406 char *tmp = backtrack_pos;
7407 char endchar = END_OF_INSN;
7408
7409 if (i != (aarch64_num_of_operands (opcode) - 1))
7410 endchar = ',';
7411 skip_past_char (&tmp, ',');
7412
7413 if (*tmp != endchar)
7414 /* The user has supplied an operand in the wrong format. */
7415 goto parse_operands_return;
7416
7417 /* Make sure there is not a comma before the optional operand.
7418 For example the fifth operand of 'sys' is optional:
7419
7420 sys #0,c0,c0,#0, <--- wrong
7421 sys #0,c0,c0,#0 <--- correct. */
7422 if (comma_skipped_p && i && endchar == END_OF_INSN)
7423 {
7424 set_fatal_syntax_error
7425 (_("unexpected comma before the omitted optional operand"));
7426 goto parse_operands_return;
7427 }
7428 }
7429
7430 /* Reaching here means we are dealing with an optional operand that is
7431 omitted from the assembly line. */
7432 gas_assert (optional_operand_p (opcode, i));
7433 info->present = 0;
7434 process_omitted_operand (operands[i], opcode, i, info);
7435
7436 /* Try again, skipping the optional operand at backtrack_pos. */
7437 str = backtrack_pos;
7438 backtrack_pos = 0;
7439
7440 /* Clear any error record after the omitted optional operand has been
7441 successfully handled. */
7442 clear_error ();
7443 }
7444
7445 /* Check if we have parsed all the operands. */
7446 if (*str != '\0' && ! error_p ())
7447 {
7448 /* Set I to the index of the last present operand; this is
7449 for the purpose of diagnostics. */
7450 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7451 ;
7452 set_fatal_syntax_error
7453 (_("unexpected characters following instruction"));
7454 }
7455
7456 parse_operands_return:
7457
7458 if (error_p ())
7459 {
7460 inst.parsing_error.index = i;
7461 DEBUG_TRACE ("parsing FAIL: %s - %s",
7462 operand_mismatch_kind_names[inst.parsing_error.kind],
7463 inst.parsing_error.error);
7464 /* Record the operand error properly; this is useful when there
7465 are multiple instruction templates for a mnemonic name, so that
7466 later on, we can select the error that most closely describes
7467 the problem. */
7468 record_operand_error_info (opcode, &inst.parsing_error);
7469 return false;
7470 }
7471 else
7472 {
7473 DEBUG_TRACE ("parsing SUCCESS");
7474 return true;
7475 }
7476 }
7477
7478 /* It does some fix-up to provide some programmer friendly feature while
7479 keeping the libopcodes happy, i.e. libopcodes only accepts
7480 the preferred architectural syntax.
7481 Return FALSE if there is any failure; otherwise return TRUE. */
7482
7483 static bool
7484 programmer_friendly_fixup (aarch64_instruction *instr)
7485 {
7486 aarch64_inst *base = &instr->base;
7487 const aarch64_opcode *opcode = base->opcode;
7488 enum aarch64_op op = opcode->op;
7489 aarch64_opnd_info *operands = base->operands;
7490
7491 DEBUG_TRACE ("enter");
7492
7493 switch (opcode->iclass)
7494 {
7495 case testbranch:
7496 /* TBNZ Xn|Wn, #uimm6, label
7497 Test and Branch Not Zero: conditionally jumps to label if bit number
7498 uimm6 in register Xn is not zero. The bit number implies the width of
7499 the register, which may be written and should be disassembled as Wn if
7500 uimm is less than 32. */
7501 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7502 {
7503 if (operands[1].imm.value >= 32)
7504 {
7505 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7506 0, 31);
7507 return false;
7508 }
7509 operands[0].qualifier = AARCH64_OPND_QLF_X;
7510 }
7511 break;
7512 case loadlit:
7513 /* LDR Wt, label | =value
7514 As a convenience assemblers will typically permit the notation
7515 "=value" in conjunction with the pc-relative literal load instructions
7516 to automatically place an immediate value or symbolic address in a
7517 nearby literal pool and generate a hidden label which references it.
7518 ISREG has been set to 0 in the case of =value. */
7519 if (instr->gen_lit_pool
7520 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7521 {
7522 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7523 if (op == OP_LDRSW_LIT)
7524 size = 4;
7525 if (instr->reloc.exp.X_op != O_constant
7526 && instr->reloc.exp.X_op != O_big
7527 && instr->reloc.exp.X_op != O_symbol)
7528 {
7529 record_operand_error (opcode, 1,
7530 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7531 _("constant expression expected"));
7532 return false;
7533 }
7534 if (! add_to_lit_pool (&instr->reloc.exp, size))
7535 {
7536 record_operand_error (opcode, 1,
7537 AARCH64_OPDE_OTHER_ERROR,
7538 _("literal pool insertion failed"));
7539 return false;
7540 }
7541 }
7542 break;
7543 case log_shift:
7544 case bitfield:
7545 /* UXT[BHW] Wd, Wn
7546 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7547 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7548 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7549 A programmer-friendly assembler should accept a destination Xd in
7550 place of Wd, however that is not the preferred form for disassembly.
7551 */
7552 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7553 && operands[1].qualifier == AARCH64_OPND_QLF_W
7554 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7555 operands[0].qualifier = AARCH64_OPND_QLF_W;
7556 break;
7557
7558 case addsub_ext:
7559 {
7560 /* In the 64-bit form, the final register operand is written as Wm
7561 for all but the (possibly omitted) UXTX/LSL and SXTX
7562 operators.
7563 As a programmer-friendly assembler, we accept e.g.
7564 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7565 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7566 int idx = aarch64_operand_index (opcode->operands,
7567 AARCH64_OPND_Rm_EXT);
7568 gas_assert (idx == 1 || idx == 2);
7569 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7570 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7571 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7572 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7573 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7574 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7575 }
7576 break;
7577
7578 default:
7579 break;
7580 }
7581
7582 DEBUG_TRACE ("exit with SUCCESS");
7583 return true;
7584 }
7585
7586 /* Check for loads and stores that will cause unpredictable behavior. */
7587
7588 static void
7589 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7590 {
7591 aarch64_inst *base = &instr->base;
7592 const aarch64_opcode *opcode = base->opcode;
7593 const aarch64_opnd_info *opnds = base->operands;
7594 switch (opcode->iclass)
7595 {
7596 case ldst_pos:
7597 case ldst_imm9:
7598 case ldst_imm10:
7599 case ldst_unscaled:
7600 case ldst_unpriv:
7601 /* Loading/storing the base register is unpredictable if writeback. */
7602 if ((aarch64_get_operand_class (opnds[0].type)
7603 == AARCH64_OPND_CLASS_INT_REG)
7604 && opnds[0].reg.regno == opnds[1].addr.base_regno
7605 && opnds[1].addr.base_regno != REG_SP
7606 /* Exempt STG/STZG/ST2G/STZ2G. */
7607 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7608 && opnds[1].addr.writeback)
7609 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7610 break;
7611
7612 case ldstpair_off:
7613 case ldstnapair_offs:
7614 case ldstpair_indexed:
7615 /* Loading/storing the base register is unpredictable if writeback. */
7616 if ((aarch64_get_operand_class (opnds[0].type)
7617 == AARCH64_OPND_CLASS_INT_REG)
7618 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7619 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7620 && opnds[2].addr.base_regno != REG_SP
7621 /* Exempt STGP. */
7622 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7623 && opnds[2].addr.writeback)
7624 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7625 /* Load operations must load different registers. */
7626 if ((opcode->opcode & (1 << 22))
7627 && opnds[0].reg.regno == opnds[1].reg.regno)
7628 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7629 break;
7630
7631 case ldstexcl:
7632 if ((aarch64_get_operand_class (opnds[0].type)
7633 == AARCH64_OPND_CLASS_INT_REG)
7634 && (aarch64_get_operand_class (opnds[1].type)
7635 == AARCH64_OPND_CLASS_INT_REG))
7636 {
7637 if ((opcode->opcode & (1 << 22)))
7638 {
7639 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7640 if ((opcode->opcode & (1 << 21))
7641 && opnds[0].reg.regno == opnds[1].reg.regno)
7642 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7643 }
7644 else
7645 {
7646 /* Store-Exclusive is unpredictable if Rt == Rs. */
7647 if (opnds[0].reg.regno == opnds[1].reg.regno)
7648 as_warn
7649 (_("unpredictable: identical transfer and status registers"
7650 " --`%s'"),str);
7651
7652 if (opnds[0].reg.regno == opnds[2].reg.regno)
7653 {
7654 if (!(opcode->opcode & (1 << 21)))
7655 /* Store-Exclusive is unpredictable if Rn == Rs. */
7656 as_warn
7657 (_("unpredictable: identical base and status registers"
7658 " --`%s'"),str);
7659 else
7660 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7661 as_warn
7662 (_("unpredictable: "
7663 "identical transfer and status registers"
7664 " --`%s'"),str);
7665 }
7666
7667 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7668 if ((opcode->opcode & (1 << 21))
7669 && opnds[0].reg.regno == opnds[3].reg.regno
7670 && opnds[3].reg.regno != REG_SP)
7671 as_warn (_("unpredictable: identical base and status registers"
7672 " --`%s'"),str);
7673 }
7674 }
7675 break;
7676
7677 default:
7678 break;
7679 }
7680 }
7681
7682 static void
7683 force_automatic_sequence_close (void)
7684 {
7685 struct aarch64_segment_info_type *tc_seg_info;
7686
7687 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7688 if (tc_seg_info->insn_sequence.instr)
7689 {
7690 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7691 _("previous `%s' sequence has not been closed"),
7692 tc_seg_info->insn_sequence.instr->opcode->name);
7693 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7694 }
7695 }
7696
7697 /* A wrapper function to interface with libopcodes on encoding and
7698 record the error message if there is any.
7699
7700 Return TRUE on success; otherwise return FALSE. */
7701
7702 static bool
7703 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7704 aarch64_insn *code)
7705 {
7706 aarch64_operand_error error_info;
7707 memset (&error_info, '\0', sizeof (error_info));
7708 error_info.kind = AARCH64_OPDE_NIL;
7709 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7710 && !error_info.non_fatal)
7711 return true;
7712
7713 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7714 record_operand_error_info (opcode, &error_info);
7715 return error_info.non_fatal;
7716 }
7717
7718 #ifdef DEBUG_AARCH64
7719 static inline void
7720 dump_opcode_operands (const aarch64_opcode *opcode)
7721 {
7722 int i = 0;
7723 while (opcode->operands[i] != AARCH64_OPND_NIL)
7724 {
7725 aarch64_verbose ("\t\t opnd%d: %s", i,
7726 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7727 ? aarch64_get_operand_name (opcode->operands[i])
7728 : aarch64_get_operand_desc (opcode->operands[i]));
7729 ++i;
7730 }
7731 }
7732 #endif /* DEBUG_AARCH64 */
7733
7734 /* This is the guts of the machine-dependent assembler. STR points to a
7735 machine dependent instruction. This function is supposed to emit
7736 the frags/bytes it assembles to. */
7737
7738 void
7739 md_assemble (char *str)
7740 {
7741 templates *template;
7742 const aarch64_opcode *opcode;
7743 struct aarch64_segment_info_type *tc_seg_info;
7744 aarch64_inst *inst_base;
7745 unsigned saved_cond;
7746
7747 /* Align the previous label if needed. */
7748 if (last_label_seen != NULL)
7749 {
7750 symbol_set_frag (last_label_seen, frag_now);
7751 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7752 S_SET_SEGMENT (last_label_seen, now_seg);
7753 }
7754
7755 /* Update the current insn_sequence from the segment. */
7756 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7757 insn_sequence = &tc_seg_info->insn_sequence;
7758 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7759
7760 inst.reloc.type = BFD_RELOC_UNUSED;
7761
7762 DEBUG_TRACE ("\n\n");
7763 DEBUG_TRACE ("==============================");
7764 DEBUG_TRACE ("Enter md_assemble with %s", str);
7765
7766 /* Scan up to the end of the mnemonic, which must end in whitespace,
7767 '.', or end of string. */
7768 char *p = str;
7769 char *dot = 0;
7770 for (; is_part_of_name (*p); p++)
7771 if (*p == '.' && !dot)
7772 dot = p;
7773
7774 if (p == str)
7775 {
7776 as_bad (_("unknown mnemonic -- `%s'"), str);
7777 return;
7778 }
7779
7780 if (!dot && create_register_alias (str, p))
7781 return;
7782
7783 template = opcode_lookup (str, dot, p);
7784 if (!template)
7785 {
7786 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7787 str);
7788 return;
7789 }
7790
7791 skip_whitespace (p);
7792 if (*p == ',')
7793 {
7794 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7795 get_mnemonic_name (str), str);
7796 return;
7797 }
7798
7799 init_operand_error_report ();
7800
7801 /* Sections are assumed to start aligned. In executable section, there is no
7802 MAP_DATA symbol pending. So we only align the address during
7803 MAP_DATA --> MAP_INSN transition.
7804 For other sections, this is not guaranteed. */
7805 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7806 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7807 frag_align_code (2, 0);
7808
7809 saved_cond = inst.cond;
7810 reset_aarch64_instruction (&inst);
7811 inst.cond = saved_cond;
7812
7813 /* Iterate through all opcode entries with the same mnemonic name. */
7814 do
7815 {
7816 opcode = template->opcode;
7817
7818 DEBUG_TRACE ("opcode %s found", opcode->name);
7819 #ifdef DEBUG_AARCH64
7820 if (debug_dump)
7821 dump_opcode_operands (opcode);
7822 #endif /* DEBUG_AARCH64 */
7823
7824 mapping_state (MAP_INSN);
7825
7826 inst_base = &inst.base;
7827 inst_base->opcode = opcode;
7828
7829 /* Truly conditionally executed instructions, e.g. b.cond. */
7830 if (opcode->flags & F_COND)
7831 {
7832 gas_assert (inst.cond != COND_ALWAYS);
7833 inst_base->cond = get_cond_from_value (inst.cond);
7834 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7835 }
7836 else if (inst.cond != COND_ALWAYS)
7837 {
7838 /* It shouldn't arrive here, where the assembly looks like a
7839 conditional instruction but the found opcode is unconditional. */
7840 gas_assert (0);
7841 continue;
7842 }
7843
7844 if (parse_operands (p, opcode)
7845 && programmer_friendly_fixup (&inst)
7846 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7847 {
7848 /* Check that this instruction is supported for this CPU. */
7849 if (!opcode->avariant
7850 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7851 {
7852 as_bad (_("selected processor does not support `%s'"), str);
7853 return;
7854 }
7855
7856 warn_unpredictable_ldst (&inst, str);
7857
7858 if (inst.reloc.type == BFD_RELOC_UNUSED
7859 || !inst.reloc.need_libopcodes_p)
7860 output_inst (NULL);
7861 else
7862 {
7863 /* If there is relocation generated for the instruction,
7864 store the instruction information for the future fix-up. */
7865 struct aarch64_inst *copy;
7866 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7867 copy = XNEW (struct aarch64_inst);
7868 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7869 output_inst (copy);
7870 }
7871
7872 /* Issue non-fatal messages if any. */
7873 output_operand_error_report (str, true);
7874 return;
7875 }
7876
7877 template = template->next;
7878 if (template != NULL)
7879 {
7880 reset_aarch64_instruction (&inst);
7881 inst.cond = saved_cond;
7882 }
7883 }
7884 while (template != NULL);
7885
7886 /* Issue the error messages if any. */
7887 output_operand_error_report (str, false);
7888 }
7889
7890 /* Various frobbings of labels and their addresses. */
7891
7892 void
7893 aarch64_start_line_hook (void)
7894 {
7895 last_label_seen = NULL;
7896 }
7897
7898 void
7899 aarch64_frob_label (symbolS * sym)
7900 {
7901 last_label_seen = sym;
7902
7903 dwarf2_emit_label (sym);
7904 }
7905
7906 void
7907 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7908 {
7909 /* Check to see if we have a block to close. */
7910 force_automatic_sequence_close ();
7911 }
7912
7913 int
7914 aarch64_data_in_code (void)
7915 {
7916 if (startswith (input_line_pointer + 1, "data:"))
7917 {
7918 *input_line_pointer = '/';
7919 input_line_pointer += 5;
7920 *input_line_pointer = 0;
7921 return 1;
7922 }
7923
7924 return 0;
7925 }
7926
7927 char *
7928 aarch64_canonicalize_symbol_name (char *name)
7929 {
7930 int len;
7931
7932 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7933 *(name + len - 5) = 0;
7934
7935 return name;
7936 }
7937 \f
7938 /* Table of all register names defined by default. The user can
7939 define additional names with .req. Note that all register names
7940 should appear in both upper and lowercase variants. Some registers
7941 also have mixed-case names. */
7942
7943 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7944 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7945 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7946 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
7947 #define REGSET16(p,t) \
7948 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7949 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7950 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7951 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7952 #define REGSET16S(p,s,t) \
7953 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
7954 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
7955 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
7956 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
7957 #define REGSET31(p,t) \
7958 REGSET16(p, t), \
7959 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7960 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7961 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7962 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7963 #define REGSET(p,t) \
7964 REGSET31(p,t), REGNUM(p,31,t)
7965
7966 /* These go into aarch64_reg_hsh hash-table. */
7967 static const reg_entry reg_names[] = {
7968 /* Integer registers. */
7969 REGSET31 (x, R_64), REGSET31 (X, R_64),
7970 REGSET31 (w, R_32), REGSET31 (W, R_32),
7971
7972 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7973 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7974 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7975 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7976 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7977 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7978
7979 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7980 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7981
7982 /* Floating-point single precision registers. */
7983 REGSET (s, FP_S), REGSET (S, FP_S),
7984
7985 /* Floating-point double precision registers. */
7986 REGSET (d, FP_D), REGSET (D, FP_D),
7987
7988 /* Floating-point half precision registers. */
7989 REGSET (h, FP_H), REGSET (H, FP_H),
7990
7991 /* Floating-point byte precision registers. */
7992 REGSET (b, FP_B), REGSET (B, FP_B),
7993
7994 /* Floating-point quad precision registers. */
7995 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7996
7997 /* FP/SIMD registers. */
7998 REGSET (v, VN), REGSET (V, VN),
7999
8000 /* SVE vector registers. */
8001 REGSET (z, ZN), REGSET (Z, ZN),
8002
8003 /* SVE predicate registers. */
8004 REGSET16 (p, PN), REGSET16 (P, PN),
8005
8006 /* SME ZA. We model this as a register because it acts syntactically
8007 like ZA0H, supporting qualifier suffixes and indexing. */
8008 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8009
8010 /* SME ZA tile registers. */
8011 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8012
8013 /* SME ZA tile registers (horizontal slice). */
8014 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8015
8016 /* SME ZA tile registers (vertical slice). */
8017 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8018 };
8019
8020 #undef REGDEF
8021 #undef REGDEF_ALIAS
8022 #undef REGNUM
8023 #undef REGSET16
8024 #undef REGSET31
8025 #undef REGSET
8026
8027 #define N 1
8028 #define n 0
8029 #define Z 1
8030 #define z 0
8031 #define C 1
8032 #define c 0
8033 #define V 1
8034 #define v 0
8035 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8036 static const asm_nzcv nzcv_names[] = {
8037 {"nzcv", B (n, z, c, v)},
8038 {"nzcV", B (n, z, c, V)},
8039 {"nzCv", B (n, z, C, v)},
8040 {"nzCV", B (n, z, C, V)},
8041 {"nZcv", B (n, Z, c, v)},
8042 {"nZcV", B (n, Z, c, V)},
8043 {"nZCv", B (n, Z, C, v)},
8044 {"nZCV", B (n, Z, C, V)},
8045 {"Nzcv", B (N, z, c, v)},
8046 {"NzcV", B (N, z, c, V)},
8047 {"NzCv", B (N, z, C, v)},
8048 {"NzCV", B (N, z, C, V)},
8049 {"NZcv", B (N, Z, c, v)},
8050 {"NZcV", B (N, Z, c, V)},
8051 {"NZCv", B (N, Z, C, v)},
8052 {"NZCV", B (N, Z, C, V)}
8053 };
8054
8055 #undef N
8056 #undef n
8057 #undef Z
8058 #undef z
8059 #undef C
8060 #undef c
8061 #undef V
8062 #undef v
8063 #undef B
8064 \f
8065 /* MD interface: bits in the object file. */
8066
8067 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8068 for use in the a.out file, and stores them in the array pointed to by buf.
8069 This knows about the endian-ness of the target machine and does
8070 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8071 2 (short) and 4 (long) Floating numbers are put out as a series of
8072 LITTLENUMS (shorts, here at least). */
8073
8074 void
8075 md_number_to_chars (char *buf, valueT val, int n)
8076 {
8077 if (target_big_endian)
8078 number_to_chars_bigendian (buf, val, n);
8079 else
8080 number_to_chars_littleendian (buf, val, n);
8081 }
8082
8083 /* MD interface: Sections. */
8084
8085 /* Estimate the size of a frag before relaxing. Assume everything fits in
8086 4 bytes. */
8087
8088 int
8089 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8090 {
8091 fragp->fr_var = 4;
8092 return 4;
8093 }
8094
8095 /* Round up a section size to the appropriate boundary. */
8096
8097 valueT
8098 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8099 {
8100 return size;
8101 }
8102
8103 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8104 of an rs_align_code fragment.
8105
8106 Here we fill the frag with the appropriate info for padding the
8107 output stream. The resulting frag will consist of a fixed (fr_fix)
8108 and of a repeating (fr_var) part.
8109
8110 The fixed content is always emitted before the repeating content and
8111 these two parts are used as follows in constructing the output:
8112 - the fixed part will be used to align to a valid instruction word
8113 boundary, in case that we start at a misaligned address; as no
8114 executable instruction can live at the misaligned location, we
8115 simply fill with zeros;
8116 - the variable part will be used to cover the remaining padding and
8117 we fill using the AArch64 NOP instruction.
8118
8119 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8120 enough storage space for up to 3 bytes for padding the back to a valid
8121 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8122
8123 void
8124 aarch64_handle_align (fragS * fragP)
8125 {
8126 /* NOP = d503201f */
8127 /* AArch64 instructions are always little-endian. */
8128 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8129
8130 int bytes, fix, noop_size;
8131 char *p;
8132
8133 if (fragP->fr_type != rs_align_code)
8134 return;
8135
8136 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8137 p = fragP->fr_literal + fragP->fr_fix;
8138
8139 #ifdef OBJ_ELF
8140 gas_assert (fragP->tc_frag_data.recorded);
8141 #endif
8142
8143 noop_size = sizeof (aarch64_noop);
8144
8145 fix = bytes & (noop_size - 1);
8146 if (fix)
8147 {
8148 #if defined OBJ_ELF || defined OBJ_COFF
8149 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8150 #endif
8151 memset (p, 0, fix);
8152 p += fix;
8153 fragP->fr_fix += fix;
8154 }
8155
8156 if (noop_size)
8157 memcpy (p, aarch64_noop, noop_size);
8158 fragP->fr_var = noop_size;
8159 }
8160
8161 /* Perform target specific initialisation of a frag.
8162 Note - despite the name this initialisation is not done when the frag
8163 is created, but only when its type is assigned. A frag can be created
8164 and used a long time before its type is set, so beware of assuming that
8165 this initialisation is performed first. */
8166
8167 #ifndef OBJ_ELF
8168 void
8169 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8170 int max_chars ATTRIBUTE_UNUSED)
8171 {
8172 }
8173
8174 #else /* OBJ_ELF is defined. */
8175 void
8176 aarch64_init_frag (fragS * fragP, int max_chars)
8177 {
8178 /* Record a mapping symbol for alignment frags. We will delete this
8179 later if the alignment ends up empty. */
8180 if (!fragP->tc_frag_data.recorded)
8181 fragP->tc_frag_data.recorded = 1;
8182
8183 /* PR 21809: Do not set a mapping state for debug sections
8184 - it just confuses other tools. */
8185 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8186 return;
8187
8188 switch (fragP->fr_type)
8189 {
8190 case rs_align_test:
8191 case rs_fill:
8192 mapping_state_2 (MAP_DATA, max_chars);
8193 break;
8194 case rs_align:
8195 /* PR 20364: We can get alignment frags in code sections,
8196 so do not just assume that we should use the MAP_DATA state. */
8197 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8198 break;
8199 case rs_align_code:
8200 mapping_state_2 (MAP_INSN, max_chars);
8201 break;
8202 default:
8203 break;
8204 }
8205 }
8206
8207 /* Whether SFrame stack trace info is supported. */
8208
8209 bool
8210 aarch64_support_sframe_p (void)
8211 {
8212 /* At this time, SFrame is supported for aarch64 only. */
8213 return (aarch64_abi == AARCH64_ABI_LP64);
8214 }
8215
8216 /* Specify if RA tracking is needed. */
8217
8218 bool
8219 aarch64_sframe_ra_tracking_p (void)
8220 {
8221 return true;
8222 }
8223
8224 /* Specify the fixed offset to recover RA from CFA.
8225 (useful only when RA tracking is not needed). */
8226
8227 offsetT
8228 aarch64_sframe_cfa_ra_offset (void)
8229 {
8230 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8231 }
8232
8233 /* Get the abi/arch indentifier for SFrame. */
8234
8235 unsigned char
8236 aarch64_sframe_get_abi_arch (void)
8237 {
8238 unsigned char sframe_abi_arch = 0;
8239
8240 if (aarch64_support_sframe_p ())
8241 {
8242 sframe_abi_arch = target_big_endian
8243 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8244 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8245 }
8246
8247 return sframe_abi_arch;
8248 }
8249
8250 #endif /* OBJ_ELF */
8251 \f
8252 /* Initialize the DWARF-2 unwind information for this procedure. */
8253
8254 void
8255 tc_aarch64_frame_initial_instructions (void)
8256 {
8257 cfi_add_CFA_def_cfa (REG_SP, 0);
8258 }
8259
8260 /* Convert REGNAME to a DWARF-2 register number. */
8261
8262 int
8263 tc_aarch64_regname_to_dw2regnum (char *regname)
8264 {
8265 const reg_entry *reg = parse_reg (&regname);
8266 if (reg == NULL)
8267 return -1;
8268
8269 switch (reg->type)
8270 {
8271 case REG_TYPE_SP_32:
8272 case REG_TYPE_SP_64:
8273 case REG_TYPE_R_32:
8274 case REG_TYPE_R_64:
8275 return reg->number;
8276
8277 case REG_TYPE_FP_B:
8278 case REG_TYPE_FP_H:
8279 case REG_TYPE_FP_S:
8280 case REG_TYPE_FP_D:
8281 case REG_TYPE_FP_Q:
8282 return reg->number + 64;
8283
8284 default:
8285 break;
8286 }
8287 return -1;
8288 }
8289
8290 /* Implement DWARF2_ADDR_SIZE. */
8291
8292 int
8293 aarch64_dwarf2_addr_size (void)
8294 {
8295 if (ilp32_p)
8296 return 4;
8297 else if (llp64_p)
8298 return 8;
8299 return bfd_arch_bits_per_address (stdoutput) / 8;
8300 }
8301
8302 /* MD interface: Symbol and relocation handling. */
8303
8304 /* Return the address within the segment that a PC-relative fixup is
8305 relative to. For AArch64 PC-relative fixups applied to instructions
8306 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8307
8308 long
8309 md_pcrel_from_section (fixS * fixP, segT seg)
8310 {
8311 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8312
8313 /* If this is pc-relative and we are going to emit a relocation
8314 then we just want to put out any pipeline compensation that the linker
8315 will need. Otherwise we want to use the calculated base. */
8316 if (fixP->fx_pcrel
8317 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8318 || aarch64_force_relocation (fixP)))
8319 base = 0;
8320
8321 /* AArch64 should be consistent for all pc-relative relocations. */
8322 return base + AARCH64_PCREL_OFFSET;
8323 }
8324
8325 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8326 Otherwise we have no need to default values of symbols. */
8327
8328 symbolS *
8329 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8330 {
8331 #ifdef OBJ_ELF
8332 if (name[0] == '_' && name[1] == 'G'
8333 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8334 {
8335 if (!GOT_symbol)
8336 {
8337 if (symbol_find (name))
8338 as_bad (_("GOT already in the symbol table"));
8339
8340 GOT_symbol = symbol_new (name, undefined_section,
8341 &zero_address_frag, 0);
8342 }
8343
8344 return GOT_symbol;
8345 }
8346 #endif
8347
8348 return 0;
8349 }
8350
8351 /* Return non-zero if the indicated VALUE has overflowed the maximum
8352 range expressible by a unsigned number with the indicated number of
8353 BITS. */
8354
8355 static bool
8356 unsigned_overflow (valueT value, unsigned bits)
8357 {
8358 valueT lim;
8359 if (bits >= sizeof (valueT) * 8)
8360 return false;
8361 lim = (valueT) 1 << bits;
8362 return (value >= lim);
8363 }
8364
8365
8366 /* Return non-zero if the indicated VALUE has overflowed the maximum
8367 range expressible by an signed number with the indicated number of
8368 BITS. */
8369
8370 static bool
8371 signed_overflow (offsetT value, unsigned bits)
8372 {
8373 offsetT lim;
8374 if (bits >= sizeof (offsetT) * 8)
8375 return false;
8376 lim = (offsetT) 1 << (bits - 1);
8377 return (value < -lim || value >= lim);
8378 }
8379
8380 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8381 unsigned immediate offset load/store instruction, try to encode it as
8382 an unscaled, 9-bit, signed immediate offset load/store instruction.
8383 Return TRUE if it is successful; otherwise return FALSE.
8384
8385 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8386 in response to the standard LDR/STR mnemonics when the immediate offset is
8387 unambiguous, i.e. when it is negative or unaligned. */
8388
8389 static bool
8390 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8391 {
8392 int idx;
8393 enum aarch64_op new_op;
8394 const aarch64_opcode *new_opcode;
8395
8396 gas_assert (instr->opcode->iclass == ldst_pos);
8397
8398 switch (instr->opcode->op)
8399 {
8400 case OP_LDRB_POS:new_op = OP_LDURB; break;
8401 case OP_STRB_POS: new_op = OP_STURB; break;
8402 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8403 case OP_LDRH_POS: new_op = OP_LDURH; break;
8404 case OP_STRH_POS: new_op = OP_STURH; break;
8405 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8406 case OP_LDR_POS: new_op = OP_LDUR; break;
8407 case OP_STR_POS: new_op = OP_STUR; break;
8408 case OP_LDRF_POS: new_op = OP_LDURV; break;
8409 case OP_STRF_POS: new_op = OP_STURV; break;
8410 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8411 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8412 default: new_op = OP_NIL; break;
8413 }
8414
8415 if (new_op == OP_NIL)
8416 return false;
8417
8418 new_opcode = aarch64_get_opcode (new_op);
8419 gas_assert (new_opcode != NULL);
8420
8421 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8422 instr->opcode->op, new_opcode->op);
8423
8424 aarch64_replace_opcode (instr, new_opcode);
8425
8426 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8427 qualifier matching may fail because the out-of-date qualifier will
8428 prevent the operand being updated with a new and correct qualifier. */
8429 idx = aarch64_operand_index (instr->opcode->operands,
8430 AARCH64_OPND_ADDR_SIMM9);
8431 gas_assert (idx == 1);
8432 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8433
8434 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8435
8436 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8437 insn_sequence))
8438 return false;
8439
8440 return true;
8441 }
8442
8443 /* Called by fix_insn to fix a MOV immediate alias instruction.
8444
8445 Operand for a generic move immediate instruction, which is an alias
8446 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8447 a 32-bit/64-bit immediate value into general register. An assembler error
8448 shall result if the immediate cannot be created by a single one of these
8449 instructions. If there is a choice, then to ensure reversability an
8450 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8451
8452 static void
8453 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8454 {
8455 const aarch64_opcode *opcode;
8456
8457 /* Need to check if the destination is SP/ZR. The check has to be done
8458 before any aarch64_replace_opcode. */
8459 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8460 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8461
8462 instr->operands[1].imm.value = value;
8463 instr->operands[1].skip = 0;
8464
8465 if (try_mov_wide_p)
8466 {
8467 /* Try the MOVZ alias. */
8468 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8469 aarch64_replace_opcode (instr, opcode);
8470 if (aarch64_opcode_encode (instr->opcode, instr,
8471 &instr->value, NULL, NULL, insn_sequence))
8472 {
8473 put_aarch64_insn (buf, instr->value);
8474 return;
8475 }
8476 /* Try the MOVK alias. */
8477 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8478 aarch64_replace_opcode (instr, opcode);
8479 if (aarch64_opcode_encode (instr->opcode, instr,
8480 &instr->value, NULL, NULL, insn_sequence))
8481 {
8482 put_aarch64_insn (buf, instr->value);
8483 return;
8484 }
8485 }
8486
8487 if (try_mov_bitmask_p)
8488 {
8489 /* Try the ORR alias. */
8490 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8491 aarch64_replace_opcode (instr, opcode);
8492 if (aarch64_opcode_encode (instr->opcode, instr,
8493 &instr->value, NULL, NULL, insn_sequence))
8494 {
8495 put_aarch64_insn (buf, instr->value);
8496 return;
8497 }
8498 }
8499
8500 as_bad_where (fixP->fx_file, fixP->fx_line,
8501 _("immediate cannot be moved by a single instruction"));
8502 }
8503
8504 /* An instruction operand which is immediate related may have symbol used
8505 in the assembly, e.g.
8506
8507 mov w0, u32
8508 .set u32, 0x00ffff00
8509
8510 At the time when the assembly instruction is parsed, a referenced symbol,
8511 like 'u32' in the above example may not have been seen; a fixS is created
8512 in such a case and is handled here after symbols have been resolved.
8513 Instruction is fixed up with VALUE using the information in *FIXP plus
8514 extra information in FLAGS.
8515
8516 This function is called by md_apply_fix to fix up instructions that need
8517 a fix-up described above but does not involve any linker-time relocation. */
8518
8519 static void
8520 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8521 {
8522 int idx;
8523 uint32_t insn;
8524 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8525 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8526 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8527
8528 if (new_inst)
8529 {
8530 /* Now the instruction is about to be fixed-up, so the operand that
8531 was previously marked as 'ignored' needs to be unmarked in order
8532 to get the encoding done properly. */
8533 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8534 new_inst->operands[idx].skip = 0;
8535 }
8536
8537 gas_assert (opnd != AARCH64_OPND_NIL);
8538
8539 switch (opnd)
8540 {
8541 case AARCH64_OPND_EXCEPTION:
8542 case AARCH64_OPND_UNDEFINED:
8543 if (unsigned_overflow (value, 16))
8544 as_bad_where (fixP->fx_file, fixP->fx_line,
8545 _("immediate out of range"));
8546 insn = get_aarch64_insn (buf);
8547 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8548 put_aarch64_insn (buf, insn);
8549 break;
8550
8551 case AARCH64_OPND_AIMM:
8552 /* ADD or SUB with immediate.
8553 NOTE this assumes we come here with a add/sub shifted reg encoding
8554 3 322|2222|2 2 2 21111 111111
8555 1 098|7654|3 2 1 09876 543210 98765 43210
8556 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8557 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8558 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8559 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8560 ->
8561 3 322|2222|2 2 221111111111
8562 1 098|7654|3 2 109876543210 98765 43210
8563 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8564 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8565 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8566 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8567 Fields sf Rn Rd are already set. */
8568 insn = get_aarch64_insn (buf);
8569 if (value < 0)
8570 {
8571 /* Add <-> sub. */
8572 insn = reencode_addsub_switch_add_sub (insn);
8573 value = -value;
8574 }
8575
8576 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8577 && unsigned_overflow (value, 12))
8578 {
8579 /* Try to shift the value by 12 to make it fit. */
8580 if (((value >> 12) << 12) == value
8581 && ! unsigned_overflow (value, 12 + 12))
8582 {
8583 value >>= 12;
8584 insn |= encode_addsub_imm_shift_amount (1);
8585 }
8586 }
8587
8588 if (unsigned_overflow (value, 12))
8589 as_bad_where (fixP->fx_file, fixP->fx_line,
8590 _("immediate out of range"));
8591
8592 insn |= encode_addsub_imm (value);
8593
8594 put_aarch64_insn (buf, insn);
8595 break;
8596
8597 case AARCH64_OPND_SIMD_IMM:
8598 case AARCH64_OPND_SIMD_IMM_SFT:
8599 case AARCH64_OPND_LIMM:
8600 /* Bit mask immediate. */
8601 gas_assert (new_inst != NULL);
8602 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8603 new_inst->operands[idx].imm.value = value;
8604 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8605 &new_inst->value, NULL, NULL, insn_sequence))
8606 put_aarch64_insn (buf, new_inst->value);
8607 else
8608 as_bad_where (fixP->fx_file, fixP->fx_line,
8609 _("invalid immediate"));
8610 break;
8611
8612 case AARCH64_OPND_HALF:
8613 /* 16-bit unsigned immediate. */
8614 if (unsigned_overflow (value, 16))
8615 as_bad_where (fixP->fx_file, fixP->fx_line,
8616 _("immediate out of range"));
8617 insn = get_aarch64_insn (buf);
8618 insn |= encode_movw_imm (value & 0xffff);
8619 put_aarch64_insn (buf, insn);
8620 break;
8621
8622 case AARCH64_OPND_IMM_MOV:
8623 /* Operand for a generic move immediate instruction, which is
8624 an alias instruction that generates a single MOVZ, MOVN or ORR
8625 instruction to loads a 32-bit/64-bit immediate value into general
8626 register. An assembler error shall result if the immediate cannot be
8627 created by a single one of these instructions. If there is a choice,
8628 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8629 and MOVZ or MOVN to ORR. */
8630 gas_assert (new_inst != NULL);
8631 fix_mov_imm_insn (fixP, buf, new_inst, value);
8632 break;
8633
8634 case AARCH64_OPND_ADDR_SIMM7:
8635 case AARCH64_OPND_ADDR_SIMM9:
8636 case AARCH64_OPND_ADDR_SIMM9_2:
8637 case AARCH64_OPND_ADDR_SIMM10:
8638 case AARCH64_OPND_ADDR_UIMM12:
8639 case AARCH64_OPND_ADDR_SIMM11:
8640 case AARCH64_OPND_ADDR_SIMM13:
8641 /* Immediate offset in an address. */
8642 insn = get_aarch64_insn (buf);
8643
8644 gas_assert (new_inst != NULL && new_inst->value == insn);
8645 gas_assert (new_inst->opcode->operands[1] == opnd
8646 || new_inst->opcode->operands[2] == opnd);
8647
8648 /* Get the index of the address operand. */
8649 if (new_inst->opcode->operands[1] == opnd)
8650 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8651 idx = 1;
8652 else
8653 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8654 idx = 2;
8655
8656 /* Update the resolved offset value. */
8657 new_inst->operands[idx].addr.offset.imm = value;
8658
8659 /* Encode/fix-up. */
8660 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8661 &new_inst->value, NULL, NULL, insn_sequence))
8662 {
8663 put_aarch64_insn (buf, new_inst->value);
8664 break;
8665 }
8666 else if (new_inst->opcode->iclass == ldst_pos
8667 && try_to_encode_as_unscaled_ldst (new_inst))
8668 {
8669 put_aarch64_insn (buf, new_inst->value);
8670 break;
8671 }
8672
8673 as_bad_where (fixP->fx_file, fixP->fx_line,
8674 _("immediate offset out of range"));
8675 break;
8676
8677 default:
8678 gas_assert (0);
8679 as_fatal (_("unhandled operand code %d"), opnd);
8680 }
8681 }
8682
8683 /* Apply a fixup (fixP) to segment data, once it has been determined
8684 by our caller that we have all the info we need to fix it up.
8685
8686 Parameter valP is the pointer to the value of the bits. */
8687
8688 void
8689 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8690 {
8691 offsetT value = *valP;
8692 uint32_t insn;
8693 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8694 int scale;
8695 unsigned flags = fixP->fx_addnumber;
8696
8697 DEBUG_TRACE ("\n\n");
8698 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8699 DEBUG_TRACE ("Enter md_apply_fix");
8700
8701 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8702
8703 /* Note whether this will delete the relocation. */
8704
8705 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8706 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8707 fixP->fx_done = 1;
8708
8709 /* Process the relocations. */
8710 switch (fixP->fx_r_type)
8711 {
8712 case BFD_RELOC_NONE:
8713 /* This will need to go in the object file. */
8714 fixP->fx_done = 0;
8715 break;
8716
8717 case BFD_RELOC_8:
8718 case BFD_RELOC_8_PCREL:
8719 if (fixP->fx_done || !seg->use_rela_p)
8720 md_number_to_chars (buf, value, 1);
8721 break;
8722
8723 case BFD_RELOC_16:
8724 case BFD_RELOC_16_PCREL:
8725 if (fixP->fx_done || !seg->use_rela_p)
8726 md_number_to_chars (buf, value, 2);
8727 break;
8728
8729 case BFD_RELOC_32:
8730 case BFD_RELOC_32_PCREL:
8731 if (fixP->fx_done || !seg->use_rela_p)
8732 md_number_to_chars (buf, value, 4);
8733 break;
8734
8735 case BFD_RELOC_64:
8736 case BFD_RELOC_64_PCREL:
8737 if (fixP->fx_done || !seg->use_rela_p)
8738 md_number_to_chars (buf, value, 8);
8739 break;
8740
8741 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8742 /* We claim that these fixups have been processed here, even if
8743 in fact we generate an error because we do not have a reloc
8744 for them, so tc_gen_reloc() will reject them. */
8745 fixP->fx_done = 1;
8746 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8747 {
8748 as_bad_where (fixP->fx_file, fixP->fx_line,
8749 _("undefined symbol %s used as an immediate value"),
8750 S_GET_NAME (fixP->fx_addsy));
8751 goto apply_fix_return;
8752 }
8753 fix_insn (fixP, flags, value);
8754 break;
8755
8756 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8757 if (fixP->fx_done || !seg->use_rela_p)
8758 {
8759 if (value & 3)
8760 as_bad_where (fixP->fx_file, fixP->fx_line,
8761 _("pc-relative load offset not word aligned"));
8762 if (signed_overflow (value, 21))
8763 as_bad_where (fixP->fx_file, fixP->fx_line,
8764 _("pc-relative load offset out of range"));
8765 insn = get_aarch64_insn (buf);
8766 insn |= encode_ld_lit_ofs_19 (value >> 2);
8767 put_aarch64_insn (buf, insn);
8768 }
8769 break;
8770
8771 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8772 if (fixP->fx_done || !seg->use_rela_p)
8773 {
8774 if (signed_overflow (value, 21))
8775 as_bad_where (fixP->fx_file, fixP->fx_line,
8776 _("pc-relative address offset out of range"));
8777 insn = get_aarch64_insn (buf);
8778 insn |= encode_adr_imm (value);
8779 put_aarch64_insn (buf, insn);
8780 }
8781 break;
8782
8783 case BFD_RELOC_AARCH64_BRANCH19:
8784 if (fixP->fx_done || !seg->use_rela_p)
8785 {
8786 if (value & 3)
8787 as_bad_where (fixP->fx_file, fixP->fx_line,
8788 _("conditional branch target not word aligned"));
8789 if (signed_overflow (value, 21))
8790 as_bad_where (fixP->fx_file, fixP->fx_line,
8791 _("conditional branch out of range"));
8792 insn = get_aarch64_insn (buf);
8793 insn |= encode_cond_branch_ofs_19 (value >> 2);
8794 put_aarch64_insn (buf, insn);
8795 }
8796 break;
8797
8798 case BFD_RELOC_AARCH64_TSTBR14:
8799 if (fixP->fx_done || !seg->use_rela_p)
8800 {
8801 if (value & 3)
8802 as_bad_where (fixP->fx_file, fixP->fx_line,
8803 _("conditional branch target not word aligned"));
8804 if (signed_overflow (value, 16))
8805 as_bad_where (fixP->fx_file, fixP->fx_line,
8806 _("conditional branch out of range"));
8807 insn = get_aarch64_insn (buf);
8808 insn |= encode_tst_branch_ofs_14 (value >> 2);
8809 put_aarch64_insn (buf, insn);
8810 }
8811 break;
8812
8813 case BFD_RELOC_AARCH64_CALL26:
8814 case BFD_RELOC_AARCH64_JUMP26:
8815 if (fixP->fx_done || !seg->use_rela_p)
8816 {
8817 if (value & 3)
8818 as_bad_where (fixP->fx_file, fixP->fx_line,
8819 _("branch target not word aligned"));
8820 if (signed_overflow (value, 28))
8821 as_bad_where (fixP->fx_file, fixP->fx_line,
8822 _("branch out of range"));
8823 insn = get_aarch64_insn (buf);
8824 insn |= encode_branch_ofs_26 (value >> 2);
8825 put_aarch64_insn (buf, insn);
8826 }
8827 break;
8828
8829 case BFD_RELOC_AARCH64_MOVW_G0:
8830 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8831 case BFD_RELOC_AARCH64_MOVW_G0_S:
8832 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8833 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8834 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8835 scale = 0;
8836 goto movw_common;
8837 case BFD_RELOC_AARCH64_MOVW_G1:
8838 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8839 case BFD_RELOC_AARCH64_MOVW_G1_S:
8840 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8841 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8842 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8843 scale = 16;
8844 goto movw_common;
8845 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8846 scale = 0;
8847 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8848 /* Should always be exported to object file, see
8849 aarch64_force_relocation(). */
8850 gas_assert (!fixP->fx_done);
8851 gas_assert (seg->use_rela_p);
8852 goto movw_common;
8853 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8854 scale = 16;
8855 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8856 /* Should always be exported to object file, see
8857 aarch64_force_relocation(). */
8858 gas_assert (!fixP->fx_done);
8859 gas_assert (seg->use_rela_p);
8860 goto movw_common;
8861 case BFD_RELOC_AARCH64_MOVW_G2:
8862 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8863 case BFD_RELOC_AARCH64_MOVW_G2_S:
8864 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8865 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8866 scale = 32;
8867 goto movw_common;
8868 case BFD_RELOC_AARCH64_MOVW_G3:
8869 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8870 scale = 48;
8871 movw_common:
8872 if (fixP->fx_done || !seg->use_rela_p)
8873 {
8874 insn = get_aarch64_insn (buf);
8875
8876 if (!fixP->fx_done)
8877 {
8878 /* REL signed addend must fit in 16 bits */
8879 if (signed_overflow (value, 16))
8880 as_bad_where (fixP->fx_file, fixP->fx_line,
8881 _("offset out of range"));
8882 }
8883 else
8884 {
8885 /* Check for overflow and scale. */
8886 switch (fixP->fx_r_type)
8887 {
8888 case BFD_RELOC_AARCH64_MOVW_G0:
8889 case BFD_RELOC_AARCH64_MOVW_G1:
8890 case BFD_RELOC_AARCH64_MOVW_G2:
8891 case BFD_RELOC_AARCH64_MOVW_G3:
8892 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8893 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8894 if (unsigned_overflow (value, scale + 16))
8895 as_bad_where (fixP->fx_file, fixP->fx_line,
8896 _("unsigned value out of range"));
8897 break;
8898 case BFD_RELOC_AARCH64_MOVW_G0_S:
8899 case BFD_RELOC_AARCH64_MOVW_G1_S:
8900 case BFD_RELOC_AARCH64_MOVW_G2_S:
8901 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8902 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8903 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8904 /* NOTE: We can only come here with movz or movn. */
8905 if (signed_overflow (value, scale + 16))
8906 as_bad_where (fixP->fx_file, fixP->fx_line,
8907 _("signed value out of range"));
8908 if (value < 0)
8909 {
8910 /* Force use of MOVN. */
8911 value = ~value;
8912 insn = reencode_movzn_to_movn (insn);
8913 }
8914 else
8915 {
8916 /* Force use of MOVZ. */
8917 insn = reencode_movzn_to_movz (insn);
8918 }
8919 break;
8920 default:
8921 /* Unchecked relocations. */
8922 break;
8923 }
8924 value >>= scale;
8925 }
8926
8927 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8928 insn |= encode_movw_imm (value & 0xffff);
8929
8930 put_aarch64_insn (buf, insn);
8931 }
8932 break;
8933
8934 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8935 fixP->fx_r_type = (ilp32_p
8936 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8937 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8938 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8939 /* Should always be exported to object file, see
8940 aarch64_force_relocation(). */
8941 gas_assert (!fixP->fx_done);
8942 gas_assert (seg->use_rela_p);
8943 break;
8944
8945 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8946 fixP->fx_r_type = (ilp32_p
8947 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8948 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8949 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8950 /* Should always be exported to object file, see
8951 aarch64_force_relocation(). */
8952 gas_assert (!fixP->fx_done);
8953 gas_assert (seg->use_rela_p);
8954 break;
8955
8956 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8957 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8958 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8959 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8960 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8961 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8962 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8963 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8964 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8965 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8966 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8967 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8968 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8969 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8970 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8971 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8972 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8973 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8974 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8975 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8976 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8977 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8978 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8979 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8980 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8981 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8982 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8983 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8984 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8985 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8986 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8987 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8988 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8989 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8990 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8991 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8992 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8993 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8994 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8995 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8996 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8997 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8998 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8999 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9000 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9001 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9002 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9003 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9004 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9005 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9006 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9007 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9008 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9009 /* Should always be exported to object file, see
9010 aarch64_force_relocation(). */
9011 gas_assert (!fixP->fx_done);
9012 gas_assert (seg->use_rela_p);
9013 break;
9014
9015 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9016 /* Should always be exported to object file, see
9017 aarch64_force_relocation(). */
9018 fixP->fx_r_type = (ilp32_p
9019 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9020 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9021 gas_assert (!fixP->fx_done);
9022 gas_assert (seg->use_rela_p);
9023 break;
9024
9025 case BFD_RELOC_AARCH64_ADD_LO12:
9026 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9027 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9028 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9029 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9030 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9031 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9032 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9033 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9034 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9035 case BFD_RELOC_AARCH64_LDST128_LO12:
9036 case BFD_RELOC_AARCH64_LDST16_LO12:
9037 case BFD_RELOC_AARCH64_LDST32_LO12:
9038 case BFD_RELOC_AARCH64_LDST64_LO12:
9039 case BFD_RELOC_AARCH64_LDST8_LO12:
9040 /* Should always be exported to object file, see
9041 aarch64_force_relocation(). */
9042 gas_assert (!fixP->fx_done);
9043 gas_assert (seg->use_rela_p);
9044 break;
9045
9046 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9047 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9048 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9049 break;
9050
9051 case BFD_RELOC_UNUSED:
9052 /* An error will already have been reported. */
9053 break;
9054
9055 case BFD_RELOC_RVA:
9056 case BFD_RELOC_32_SECREL:
9057 case BFD_RELOC_16_SECIDX:
9058 break;
9059
9060 default:
9061 as_bad_where (fixP->fx_file, fixP->fx_line,
9062 _("unexpected %s fixup"),
9063 bfd_get_reloc_code_name (fixP->fx_r_type));
9064 break;
9065 }
9066
9067 apply_fix_return:
9068 /* Free the allocated the struct aarch64_inst.
9069 N.B. currently there are very limited number of fix-up types actually use
9070 this field, so the impact on the performance should be minimal . */
9071 free (fixP->tc_fix_data.inst);
9072
9073 return;
9074 }
9075
9076 /* Translate internal representation of relocation info to BFD target
9077 format. */
9078
9079 arelent *
9080 tc_gen_reloc (asection * section, fixS * fixp)
9081 {
9082 arelent *reloc;
9083 bfd_reloc_code_real_type code;
9084
9085 reloc = XNEW (arelent);
9086
9087 reloc->sym_ptr_ptr = XNEW (asymbol *);
9088 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9089 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9090
9091 if (fixp->fx_pcrel)
9092 {
9093 if (section->use_rela_p)
9094 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9095 else
9096 fixp->fx_offset = reloc->address;
9097 }
9098 reloc->addend = fixp->fx_offset;
9099
9100 code = fixp->fx_r_type;
9101 switch (code)
9102 {
9103 case BFD_RELOC_16:
9104 if (fixp->fx_pcrel)
9105 code = BFD_RELOC_16_PCREL;
9106 break;
9107
9108 case BFD_RELOC_32:
9109 if (fixp->fx_pcrel)
9110 code = BFD_RELOC_32_PCREL;
9111 break;
9112
9113 case BFD_RELOC_64:
9114 if (fixp->fx_pcrel)
9115 code = BFD_RELOC_64_PCREL;
9116 break;
9117
9118 default:
9119 break;
9120 }
9121
9122 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9123 if (reloc->howto == NULL)
9124 {
9125 as_bad_where (fixp->fx_file, fixp->fx_line,
9126 _
9127 ("cannot represent %s relocation in this object file format"),
9128 bfd_get_reloc_code_name (code));
9129 return NULL;
9130 }
9131
9132 return reloc;
9133 }
9134
9135 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9136
9137 void
9138 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9139 {
9140 bfd_reloc_code_real_type type;
9141 int pcrel = 0;
9142
9143 #ifdef TE_PE
9144 if (exp->X_op == O_secrel)
9145 {
9146 exp->X_op = O_symbol;
9147 type = BFD_RELOC_32_SECREL;
9148 }
9149 else if (exp->X_op == O_secidx)
9150 {
9151 exp->X_op = O_symbol;
9152 type = BFD_RELOC_16_SECIDX;
9153 }
9154 else
9155 {
9156 #endif
9157 /* Pick a reloc.
9158 FIXME: @@ Should look at CPU word size. */
9159 switch (size)
9160 {
9161 case 1:
9162 type = BFD_RELOC_8;
9163 break;
9164 case 2:
9165 type = BFD_RELOC_16;
9166 break;
9167 case 4:
9168 type = BFD_RELOC_32;
9169 break;
9170 case 8:
9171 type = BFD_RELOC_64;
9172 break;
9173 default:
9174 as_bad (_("cannot do %u-byte relocation"), size);
9175 type = BFD_RELOC_UNUSED;
9176 break;
9177 }
9178 #ifdef TE_PE
9179 }
9180 #endif
9181
9182 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9183 }
9184
9185 /* Implement md_after_parse_args. This is the earliest time we need to decide
9186 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9187
9188 void
9189 aarch64_after_parse_args (void)
9190 {
9191 if (aarch64_abi != AARCH64_ABI_NONE)
9192 return;
9193
9194 #ifdef OBJ_ELF
9195 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9196 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9197 aarch64_abi = AARCH64_ABI_ILP32;
9198 else
9199 aarch64_abi = AARCH64_ABI_LP64;
9200 #else
9201 aarch64_abi = AARCH64_ABI_LLP64;
9202 #endif
9203 }
9204
9205 #ifdef OBJ_ELF
9206 const char *
9207 elf64_aarch64_target_format (void)
9208 {
9209 #ifdef TE_CLOUDABI
9210 /* FIXME: What to do for ilp32_p ? */
9211 if (target_big_endian)
9212 return "elf64-bigaarch64-cloudabi";
9213 else
9214 return "elf64-littleaarch64-cloudabi";
9215 #else
9216 if (target_big_endian)
9217 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9218 else
9219 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9220 #endif
9221 }
9222
9223 void
9224 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9225 {
9226 elf_frob_symbol (symp, puntp);
9227 }
9228 #elif defined OBJ_COFF
9229 const char *
9230 coff_aarch64_target_format (void)
9231 {
9232 return "pe-aarch64-little";
9233 }
9234 #endif
9235
9236 /* MD interface: Finalization. */
9237
9238 /* A good place to do this, although this was probably not intended
9239 for this kind of use. We need to dump the literal pool before
9240 references are made to a null symbol pointer. */
9241
9242 void
9243 aarch64_cleanup (void)
9244 {
9245 literal_pool *pool;
9246
9247 for (pool = list_of_pools; pool; pool = pool->next)
9248 {
9249 /* Put it at the end of the relevant section. */
9250 subseg_set (pool->section, pool->sub_section);
9251 s_ltorg (0);
9252 }
9253 }
9254
9255 #ifdef OBJ_ELF
9256 /* Remove any excess mapping symbols generated for alignment frags in
9257 SEC. We may have created a mapping symbol before a zero byte
9258 alignment; remove it if there's a mapping symbol after the
9259 alignment. */
9260 static void
9261 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9262 void *dummy ATTRIBUTE_UNUSED)
9263 {
9264 segment_info_type *seginfo = seg_info (sec);
9265 fragS *fragp;
9266
9267 if (seginfo == NULL || seginfo->frchainP == NULL)
9268 return;
9269
9270 for (fragp = seginfo->frchainP->frch_root;
9271 fragp != NULL; fragp = fragp->fr_next)
9272 {
9273 symbolS *sym = fragp->tc_frag_data.last_map;
9274 fragS *next = fragp->fr_next;
9275
9276 /* Variable-sized frags have been converted to fixed size by
9277 this point. But if this was variable-sized to start with,
9278 there will be a fixed-size frag after it. So don't handle
9279 next == NULL. */
9280 if (sym == NULL || next == NULL)
9281 continue;
9282
9283 if (S_GET_VALUE (sym) < next->fr_address)
9284 /* Not at the end of this frag. */
9285 continue;
9286 know (S_GET_VALUE (sym) == next->fr_address);
9287
9288 do
9289 {
9290 if (next->tc_frag_data.first_map != NULL)
9291 {
9292 /* Next frag starts with a mapping symbol. Discard this
9293 one. */
9294 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9295 break;
9296 }
9297
9298 if (next->fr_next == NULL)
9299 {
9300 /* This mapping symbol is at the end of the section. Discard
9301 it. */
9302 know (next->fr_fix == 0 && next->fr_var == 0);
9303 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9304 break;
9305 }
9306
9307 /* As long as we have empty frags without any mapping symbols,
9308 keep looking. */
9309 /* If the next frag is non-empty and does not start with a
9310 mapping symbol, then this mapping symbol is required. */
9311 if (next->fr_address != next->fr_next->fr_address)
9312 break;
9313
9314 next = next->fr_next;
9315 }
9316 while (next != NULL);
9317 }
9318 }
9319 #endif
9320
9321 /* Adjust the symbol table. */
9322
9323 void
9324 aarch64_adjust_symtab (void)
9325 {
9326 #ifdef OBJ_ELF
9327 /* Remove any overlapping mapping symbols generated by alignment frags. */
9328 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9329 /* Now do generic ELF adjustments. */
9330 elf_adjust_symtab ();
9331 #endif
9332 }
9333
9334 static void
9335 checked_hash_insert (htab_t table, const char *key, void *value)
9336 {
9337 str_hash_insert (table, key, value, 0);
9338 }
9339
9340 static void
9341 sysreg_hash_insert (htab_t table, const char *key, void *value)
9342 {
9343 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9344 checked_hash_insert (table, key, value);
9345 }
9346
9347 static void
9348 fill_instruction_hash_table (void)
9349 {
9350 const aarch64_opcode *opcode = aarch64_opcode_table;
9351
9352 while (opcode->name != NULL)
9353 {
9354 templates *templ, *new_templ;
9355 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9356
9357 new_templ = XNEW (templates);
9358 new_templ->opcode = opcode;
9359 new_templ->next = NULL;
9360
9361 if (!templ)
9362 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9363 else
9364 {
9365 new_templ->next = templ->next;
9366 templ->next = new_templ;
9367 }
9368 ++opcode;
9369 }
9370 }
9371
9372 static inline void
9373 convert_to_upper (char *dst, const char *src, size_t num)
9374 {
9375 unsigned int i;
9376 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9377 *dst = TOUPPER (*src);
9378 *dst = '\0';
9379 }
9380
9381 /* Assume STR point to a lower-case string, allocate, convert and return
9382 the corresponding upper-case string. */
9383 static inline const char*
9384 get_upper_str (const char *str)
9385 {
9386 char *ret;
9387 size_t len = strlen (str);
9388 ret = XNEWVEC (char, len + 1);
9389 convert_to_upper (ret, str, len);
9390 return ret;
9391 }
9392
9393 /* MD interface: Initialization. */
9394
9395 void
9396 md_begin (void)
9397 {
9398 unsigned mach;
9399 unsigned int i;
9400
9401 aarch64_ops_hsh = str_htab_create ();
9402 aarch64_cond_hsh = str_htab_create ();
9403 aarch64_shift_hsh = str_htab_create ();
9404 aarch64_sys_regs_hsh = str_htab_create ();
9405 aarch64_pstatefield_hsh = str_htab_create ();
9406 aarch64_sys_regs_ic_hsh = str_htab_create ();
9407 aarch64_sys_regs_dc_hsh = str_htab_create ();
9408 aarch64_sys_regs_at_hsh = str_htab_create ();
9409 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9410 aarch64_sys_regs_sr_hsh = str_htab_create ();
9411 aarch64_reg_hsh = str_htab_create ();
9412 aarch64_barrier_opt_hsh = str_htab_create ();
9413 aarch64_nzcv_hsh = str_htab_create ();
9414 aarch64_pldop_hsh = str_htab_create ();
9415 aarch64_hint_opt_hsh = str_htab_create ();
9416
9417 fill_instruction_hash_table ();
9418
9419 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9420 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9421 (void *) (aarch64_sys_regs + i));
9422
9423 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9424 sysreg_hash_insert (aarch64_pstatefield_hsh,
9425 aarch64_pstatefields[i].name,
9426 (void *) (aarch64_pstatefields + i));
9427
9428 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9429 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9430 aarch64_sys_regs_ic[i].name,
9431 (void *) (aarch64_sys_regs_ic + i));
9432
9433 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9434 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9435 aarch64_sys_regs_dc[i].name,
9436 (void *) (aarch64_sys_regs_dc + i));
9437
9438 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9439 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9440 aarch64_sys_regs_at[i].name,
9441 (void *) (aarch64_sys_regs_at + i));
9442
9443 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9444 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9445 aarch64_sys_regs_tlbi[i].name,
9446 (void *) (aarch64_sys_regs_tlbi + i));
9447
9448 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9449 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9450 aarch64_sys_regs_sr[i].name,
9451 (void *) (aarch64_sys_regs_sr + i));
9452
9453 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9454 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9455 (void *) (reg_names + i));
9456
9457 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9458 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9459 (void *) (nzcv_names + i));
9460
9461 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9462 {
9463 const char *name = aarch64_operand_modifiers[i].name;
9464 checked_hash_insert (aarch64_shift_hsh, name,
9465 (void *) (aarch64_operand_modifiers + i));
9466 /* Also hash the name in the upper case. */
9467 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9468 (void *) (aarch64_operand_modifiers + i));
9469 }
9470
9471 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9472 {
9473 unsigned int j;
9474 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9475 the same condition code. */
9476 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9477 {
9478 const char *name = aarch64_conds[i].names[j];
9479 if (name == NULL)
9480 break;
9481 checked_hash_insert (aarch64_cond_hsh, name,
9482 (void *) (aarch64_conds + i));
9483 /* Also hash the name in the upper case. */
9484 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9485 (void *) (aarch64_conds + i));
9486 }
9487 }
9488
9489 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9490 {
9491 const char *name = aarch64_barrier_options[i].name;
9492 /* Skip xx00 - the unallocated values of option. */
9493 if ((i & 0x3) == 0)
9494 continue;
9495 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9496 (void *) (aarch64_barrier_options + i));
9497 /* Also hash the name in the upper case. */
9498 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9499 (void *) (aarch64_barrier_options + i));
9500 }
9501
9502 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9503 {
9504 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9505 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9506 (void *) (aarch64_barrier_dsb_nxs_options + i));
9507 /* Also hash the name in the upper case. */
9508 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9509 (void *) (aarch64_barrier_dsb_nxs_options + i));
9510 }
9511
9512 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9513 {
9514 const char* name = aarch64_prfops[i].name;
9515 /* Skip the unallocated hint encodings. */
9516 if (name == NULL)
9517 continue;
9518 checked_hash_insert (aarch64_pldop_hsh, name,
9519 (void *) (aarch64_prfops + i));
9520 /* Also hash the name in the upper case. */
9521 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9522 (void *) (aarch64_prfops + i));
9523 }
9524
9525 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9526 {
9527 const char* name = aarch64_hint_options[i].name;
9528 const char* upper_name = get_upper_str(name);
9529
9530 checked_hash_insert (aarch64_hint_opt_hsh, name,
9531 (void *) (aarch64_hint_options + i));
9532
9533 /* Also hash the name in the upper case if not the same. */
9534 if (strcmp (name, upper_name) != 0)
9535 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9536 (void *) (aarch64_hint_options + i));
9537 }
9538
9539 /* Set the cpu variant based on the command-line options. */
9540 if (!mcpu_cpu_opt)
9541 mcpu_cpu_opt = march_cpu_opt;
9542
9543 if (!mcpu_cpu_opt)
9544 mcpu_cpu_opt = &cpu_default;
9545
9546 cpu_variant = *mcpu_cpu_opt;
9547
9548 /* Record the CPU type. */
9549 if(ilp32_p)
9550 mach = bfd_mach_aarch64_ilp32;
9551 else if (llp64_p)
9552 mach = bfd_mach_aarch64_llp64;
9553 else
9554 mach = bfd_mach_aarch64;
9555
9556 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9557 #ifdef OBJ_ELF
9558 /* FIXME - is there a better way to do it ? */
9559 aarch64_sframe_cfa_sp_reg = 31;
9560 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9561 aarch64_sframe_cfa_ra_reg = 30;
9562 #endif
9563 }
9564
9565 /* Command line processing. */
9566
9567 const char *md_shortopts = "m:";
9568
9569 #ifdef AARCH64_BI_ENDIAN
9570 #define OPTION_EB (OPTION_MD_BASE + 0)
9571 #define OPTION_EL (OPTION_MD_BASE + 1)
9572 #else
9573 #if TARGET_BYTES_BIG_ENDIAN
9574 #define OPTION_EB (OPTION_MD_BASE + 0)
9575 #else
9576 #define OPTION_EL (OPTION_MD_BASE + 1)
9577 #endif
9578 #endif
9579
9580 struct option md_longopts[] = {
9581 #ifdef OPTION_EB
9582 {"EB", no_argument, NULL, OPTION_EB},
9583 #endif
9584 #ifdef OPTION_EL
9585 {"EL", no_argument, NULL, OPTION_EL},
9586 #endif
9587 {NULL, no_argument, NULL, 0}
9588 };
9589
9590 size_t md_longopts_size = sizeof (md_longopts);
9591
9592 struct aarch64_option_table
9593 {
9594 const char *option; /* Option name to match. */
9595 const char *help; /* Help information. */
9596 int *var; /* Variable to change. */
9597 int value; /* What to change it to. */
9598 char *deprecated; /* If non-null, print this message. */
9599 };
9600
9601 static struct aarch64_option_table aarch64_opts[] = {
9602 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9603 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9604 NULL},
9605 #ifdef DEBUG_AARCH64
9606 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9607 #endif /* DEBUG_AARCH64 */
9608 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9609 NULL},
9610 {"mno-verbose-error", N_("do not output verbose error messages"),
9611 &verbose_error_p, 0, NULL},
9612 {NULL, NULL, NULL, 0, NULL}
9613 };
9614
9615 struct aarch64_cpu_option_table
9616 {
9617 const char *name;
9618 const aarch64_feature_set value;
9619 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9620 case. */
9621 const char *canonical_name;
9622 };
9623
9624 /* This list should, at a minimum, contain all the cpu names
9625 recognized by GCC. */
9626 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9627 {"all", AARCH64_ANY, NULL},
9628 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9629 AARCH64_FEATURE_CRC), "Cortex-A34"},
9630 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9631 AARCH64_FEATURE_CRC), "Cortex-A35"},
9632 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9633 AARCH64_FEATURE_CRC), "Cortex-A53"},
9634 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9635 AARCH64_FEATURE_CRC), "Cortex-A57"},
9636 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9637 AARCH64_FEATURE_CRC), "Cortex-A72"},
9638 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9639 AARCH64_FEATURE_CRC), "Cortex-A73"},
9640 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9641 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9642 "Cortex-A55"},
9643 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9644 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9645 "Cortex-A75"},
9646 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9647 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9648 "Cortex-A76"},
9649 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9650 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9651 | AARCH64_FEATURE_DOTPROD
9652 | AARCH64_FEATURE_SSBS),
9653 "Cortex-A76AE"},
9654 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9655 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9656 | AARCH64_FEATURE_DOTPROD
9657 | AARCH64_FEATURE_SSBS),
9658 "Cortex-A77"},
9659 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9660 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9661 | AARCH64_FEATURE_DOTPROD
9662 | AARCH64_FEATURE_SSBS),
9663 "Cortex-A65"},
9664 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9665 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9666 | AARCH64_FEATURE_DOTPROD
9667 | AARCH64_FEATURE_SSBS),
9668 "Cortex-A65AE"},
9669 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9670 AARCH64_FEATURE_F16
9671 | AARCH64_FEATURE_RCPC
9672 | AARCH64_FEATURE_DOTPROD
9673 | AARCH64_FEATURE_SSBS
9674 | AARCH64_FEATURE_PROFILE),
9675 "Cortex-A78"},
9676 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9677 AARCH64_FEATURE_F16
9678 | AARCH64_FEATURE_RCPC
9679 | AARCH64_FEATURE_DOTPROD
9680 | AARCH64_FEATURE_SSBS
9681 | AARCH64_FEATURE_PROFILE),
9682 "Cortex-A78AE"},
9683 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9684 AARCH64_FEATURE_DOTPROD
9685 | AARCH64_FEATURE_F16
9686 | AARCH64_FEATURE_FLAGM
9687 | AARCH64_FEATURE_PAC
9688 | AARCH64_FEATURE_PROFILE
9689 | AARCH64_FEATURE_RCPC
9690 | AARCH64_FEATURE_SSBS),
9691 "Cortex-A78C"},
9692 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9693 AARCH64_FEATURE_BFLOAT16
9694 | AARCH64_FEATURE_I8MM
9695 | AARCH64_FEATURE_MEMTAG
9696 | AARCH64_FEATURE_SVE2_BITPERM),
9697 "Cortex-A510"},
9698 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9699 AARCH64_FEATURE_BFLOAT16
9700 | AARCH64_FEATURE_I8MM
9701 | AARCH64_FEATURE_MEMTAG
9702 | AARCH64_FEATURE_SVE2_BITPERM),
9703 "Cortex-A710"},
9704 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9705 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9706 | AARCH64_FEATURE_DOTPROD
9707 | AARCH64_FEATURE_PROFILE),
9708 "Ares"},
9709 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9710 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9711 "Samsung Exynos M1"},
9712 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9713 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9714 | AARCH64_FEATURE_RDMA),
9715 "Qualcomm Falkor"},
9716 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9717 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9718 | AARCH64_FEATURE_DOTPROD
9719 | AARCH64_FEATURE_SSBS),
9720 "Neoverse E1"},
9721 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9722 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9723 | AARCH64_FEATURE_DOTPROD
9724 | AARCH64_FEATURE_PROFILE),
9725 "Neoverse N1"},
9726 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9727 AARCH64_FEATURE_BFLOAT16
9728 | AARCH64_FEATURE_I8MM
9729 | AARCH64_FEATURE_F16
9730 | AARCH64_FEATURE_SVE
9731 | AARCH64_FEATURE_SVE2
9732 | AARCH64_FEATURE_SVE2_BITPERM
9733 | AARCH64_FEATURE_MEMTAG
9734 | AARCH64_FEATURE_RNG),
9735 "Neoverse N2"},
9736 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9737 AARCH64_FEATURE_PROFILE
9738 | AARCH64_FEATURE_CVADP
9739 | AARCH64_FEATURE_SVE
9740 | AARCH64_FEATURE_SSBS
9741 | AARCH64_FEATURE_RNG
9742 | AARCH64_FEATURE_F16
9743 | AARCH64_FEATURE_BFLOAT16
9744 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9745 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9746 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9747 | AARCH64_FEATURE_RDMA),
9748 "Qualcomm QDF24XX"},
9749 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9750 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9751 "Qualcomm Saphira"},
9752 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9753 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9754 "Cavium ThunderX"},
9755 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9756 AARCH64_FEATURE_CRYPTO),
9757 "Broadcom Vulcan"},
9758 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9759 in earlier releases and is superseded by 'xgene1' in all
9760 tools. */
9761 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9762 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9763 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9764 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9765 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9766 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9767 AARCH64_FEATURE_F16
9768 | AARCH64_FEATURE_RCPC
9769 | AARCH64_FEATURE_DOTPROD
9770 | AARCH64_FEATURE_SSBS
9771 | AARCH64_FEATURE_PROFILE),
9772 "Cortex-X1"},
9773 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9774 AARCH64_FEATURE_BFLOAT16
9775 | AARCH64_FEATURE_I8MM
9776 | AARCH64_FEATURE_MEMTAG
9777 | AARCH64_FEATURE_SVE2_BITPERM),
9778 "Cortex-X2"},
9779 {"generic", AARCH64_ARCH_V8, NULL},
9780
9781 {NULL, AARCH64_ARCH_NONE, NULL}
9782 };
9783
9784 struct aarch64_arch_option_table
9785 {
9786 const char *name;
9787 const aarch64_feature_set value;
9788 };
9789
9790 /* This list should, at a minimum, contain all the architecture names
9791 recognized by GCC. */
9792 static const struct aarch64_arch_option_table aarch64_archs[] = {
9793 {"all", AARCH64_ANY},
9794 {"armv8-a", AARCH64_ARCH_V8},
9795 {"armv8.1-a", AARCH64_ARCH_V8_1},
9796 {"armv8.2-a", AARCH64_ARCH_V8_2},
9797 {"armv8.3-a", AARCH64_ARCH_V8_3},
9798 {"armv8.4-a", AARCH64_ARCH_V8_4},
9799 {"armv8.5-a", AARCH64_ARCH_V8_5},
9800 {"armv8.6-a", AARCH64_ARCH_V8_6},
9801 {"armv8.7-a", AARCH64_ARCH_V8_7},
9802 {"armv8.8-a", AARCH64_ARCH_V8_8},
9803 {"armv8-r", AARCH64_ARCH_V8_R},
9804 {"armv9-a", AARCH64_ARCH_V9},
9805 {"armv9.1-a", AARCH64_ARCH_V9_1},
9806 {"armv9.2-a", AARCH64_ARCH_V9_2},
9807 {"armv9.3-a", AARCH64_ARCH_V9_3},
9808 {NULL, AARCH64_ARCH_NONE}
9809 };
9810
9811 /* ISA extensions. */
9812 struct aarch64_option_cpu_value_table
9813 {
9814 const char *name;
9815 const aarch64_feature_set value;
9816 const aarch64_feature_set require; /* Feature dependencies. */
9817 };
9818
9819 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9820 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9821 AARCH64_ARCH_NONE},
9822 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9823 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9824 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9825 AARCH64_ARCH_NONE},
9826 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9827 AARCH64_ARCH_NONE},
9828 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9829 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9830 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9831 AARCH64_ARCH_NONE},
9832 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9833 AARCH64_ARCH_NONE},
9834 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9835 AARCH64_ARCH_NONE},
9836 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9837 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9838 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9839 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9840 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9841 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
9842 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9843 AARCH64_ARCH_NONE},
9844 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9845 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
9846 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9847 AARCH64_ARCH_NONE},
9848 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9849 AARCH64_FEATURE (AARCH64_FEATURE_F16
9850 | AARCH64_FEATURE_SIMD, 0)},
9851 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9852 AARCH64_ARCH_NONE},
9853 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9854 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9855 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9856 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9857 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9858 AARCH64_ARCH_NONE},
9859 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9860 AARCH64_ARCH_NONE},
9861 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9862 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9863 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9864 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9865 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9866 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9867 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9868 AARCH64_ARCH_NONE},
9869 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9870 AARCH64_ARCH_NONE},
9871 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9872 AARCH64_ARCH_NONE},
9873 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9874 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9875 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9876 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9877 | AARCH64_FEATURE_SM4, 0)},
9878 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9879 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9880 | AARCH64_FEATURE_AES, 0)},
9881 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9882 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9883 | AARCH64_FEATURE_SHA3, 0)},
9884 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9885 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9886 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9887 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9888 | AARCH64_FEATURE_BFLOAT16, 0)},
9889 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
9890 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9891 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
9892 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9893 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
9894 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9895 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
9896 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
9897 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9898 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9899 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9900 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9901 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9902 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9903 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9904 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9905 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9906 AARCH64_ARCH_NONE},
9907 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9908 AARCH64_ARCH_NONE},
9909 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9910 AARCH64_ARCH_NONE},
9911 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
9912 AARCH64_ARCH_NONE},
9913 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
9914 AARCH64_ARCH_NONE},
9915 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
9916 AARCH64_ARCH_NONE},
9917 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9918 };
9919
9920 struct aarch64_long_option_table
9921 {
9922 const char *option; /* Substring to match. */
9923 const char *help; /* Help information. */
9924 int (*func) (const char *subopt); /* Function to decode sub-option. */
9925 char *deprecated; /* If non-null, print this message. */
9926 };
9927
9928 /* Transitive closure of features depending on set. */
9929 static aarch64_feature_set
9930 aarch64_feature_disable_set (aarch64_feature_set set)
9931 {
9932 const struct aarch64_option_cpu_value_table *opt;
9933 aarch64_feature_set prev = 0;
9934
9935 while (prev != set) {
9936 prev = set;
9937 for (opt = aarch64_features; opt->name != NULL; opt++)
9938 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9939 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9940 }
9941 return set;
9942 }
9943
9944 /* Transitive closure of dependencies of set. */
9945 static aarch64_feature_set
9946 aarch64_feature_enable_set (aarch64_feature_set set)
9947 {
9948 const struct aarch64_option_cpu_value_table *opt;
9949 aarch64_feature_set prev = 0;
9950
9951 while (prev != set) {
9952 prev = set;
9953 for (opt = aarch64_features; opt->name != NULL; opt++)
9954 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9955 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9956 }
9957 return set;
9958 }
9959
9960 static int
9961 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9962 bool ext_only)
9963 {
9964 /* We insist on extensions being added before being removed. We achieve
9965 this by using the ADDING_VALUE variable to indicate whether we are
9966 adding an extension (1) or removing it (0) and only allowing it to
9967 change in the order -1 -> 1 -> 0. */
9968 int adding_value = -1;
9969 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9970
9971 /* Copy the feature set, so that we can modify it. */
9972 *ext_set = **opt_p;
9973 *opt_p = ext_set;
9974
9975 while (str != NULL && *str != 0)
9976 {
9977 const struct aarch64_option_cpu_value_table *opt;
9978 const char *ext = NULL;
9979 int optlen;
9980
9981 if (!ext_only)
9982 {
9983 if (*str != '+')
9984 {
9985 as_bad (_("invalid architectural extension"));
9986 return 0;
9987 }
9988
9989 ext = strchr (++str, '+');
9990 }
9991
9992 if (ext != NULL)
9993 optlen = ext - str;
9994 else
9995 optlen = strlen (str);
9996
9997 if (optlen >= 2 && startswith (str, "no"))
9998 {
9999 if (adding_value != 0)
10000 adding_value = 0;
10001 optlen -= 2;
10002 str += 2;
10003 }
10004 else if (optlen > 0)
10005 {
10006 if (adding_value == -1)
10007 adding_value = 1;
10008 else if (adding_value != 1)
10009 {
10010 as_bad (_("must specify extensions to add before specifying "
10011 "those to remove"));
10012 return false;
10013 }
10014 }
10015
10016 if (optlen == 0)
10017 {
10018 as_bad (_("missing architectural extension"));
10019 return 0;
10020 }
10021
10022 gas_assert (adding_value != -1);
10023
10024 for (opt = aarch64_features; opt->name != NULL; opt++)
10025 if (strncmp (opt->name, str, optlen) == 0)
10026 {
10027 aarch64_feature_set set;
10028
10029 /* Add or remove the extension. */
10030 if (adding_value)
10031 {
10032 set = aarch64_feature_enable_set (opt->value);
10033 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10034 }
10035 else
10036 {
10037 set = aarch64_feature_disable_set (opt->value);
10038 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10039 }
10040 break;
10041 }
10042
10043 if (opt->name == NULL)
10044 {
10045 as_bad (_("unknown architectural extension `%s'"), str);
10046 return 0;
10047 }
10048
10049 str = ext;
10050 };
10051
10052 return 1;
10053 }
10054
10055 static int
10056 aarch64_parse_cpu (const char *str)
10057 {
10058 const struct aarch64_cpu_option_table *opt;
10059 const char *ext = strchr (str, '+');
10060 size_t optlen;
10061
10062 if (ext != NULL)
10063 optlen = ext - str;
10064 else
10065 optlen = strlen (str);
10066
10067 if (optlen == 0)
10068 {
10069 as_bad (_("missing cpu name `%s'"), str);
10070 return 0;
10071 }
10072
10073 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10074 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10075 {
10076 mcpu_cpu_opt = &opt->value;
10077 if (ext != NULL)
10078 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10079
10080 return 1;
10081 }
10082
10083 as_bad (_("unknown cpu `%s'"), str);
10084 return 0;
10085 }
10086
10087 static int
10088 aarch64_parse_arch (const char *str)
10089 {
10090 const struct aarch64_arch_option_table *opt;
10091 const char *ext = strchr (str, '+');
10092 size_t optlen;
10093
10094 if (ext != NULL)
10095 optlen = ext - str;
10096 else
10097 optlen = strlen (str);
10098
10099 if (optlen == 0)
10100 {
10101 as_bad (_("missing architecture name `%s'"), str);
10102 return 0;
10103 }
10104
10105 for (opt = aarch64_archs; opt->name != NULL; opt++)
10106 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10107 {
10108 march_cpu_opt = &opt->value;
10109 if (ext != NULL)
10110 return aarch64_parse_features (ext, &march_cpu_opt, false);
10111
10112 return 1;
10113 }
10114
10115 as_bad (_("unknown architecture `%s'\n"), str);
10116 return 0;
10117 }
10118
10119 /* ABIs. */
10120 struct aarch64_option_abi_value_table
10121 {
10122 const char *name;
10123 enum aarch64_abi_type value;
10124 };
10125
10126 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10127 #ifdef OBJ_ELF
10128 {"ilp32", AARCH64_ABI_ILP32},
10129 {"lp64", AARCH64_ABI_LP64},
10130 #else
10131 {"llp64", AARCH64_ABI_LLP64},
10132 #endif
10133 };
10134
10135 static int
10136 aarch64_parse_abi (const char *str)
10137 {
10138 unsigned int i;
10139
10140 if (str[0] == '\0')
10141 {
10142 as_bad (_("missing abi name `%s'"), str);
10143 return 0;
10144 }
10145
10146 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10147 if (strcmp (str, aarch64_abis[i].name) == 0)
10148 {
10149 aarch64_abi = aarch64_abis[i].value;
10150 return 1;
10151 }
10152
10153 as_bad (_("unknown abi `%s'\n"), str);
10154 return 0;
10155 }
10156
10157 static struct aarch64_long_option_table aarch64_long_opts[] = {
10158 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10159 aarch64_parse_abi, NULL},
10160 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10161 aarch64_parse_cpu, NULL},
10162 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10163 aarch64_parse_arch, NULL},
10164 {NULL, NULL, 0, NULL}
10165 };
10166
10167 int
10168 md_parse_option (int c, const char *arg)
10169 {
10170 struct aarch64_option_table *opt;
10171 struct aarch64_long_option_table *lopt;
10172
10173 switch (c)
10174 {
10175 #ifdef OPTION_EB
10176 case OPTION_EB:
10177 target_big_endian = 1;
10178 break;
10179 #endif
10180
10181 #ifdef OPTION_EL
10182 case OPTION_EL:
10183 target_big_endian = 0;
10184 break;
10185 #endif
10186
10187 case 'a':
10188 /* Listing option. Just ignore these, we don't support additional
10189 ones. */
10190 return 0;
10191
10192 default:
10193 for (opt = aarch64_opts; opt->option != NULL; opt++)
10194 {
10195 if (c == opt->option[0]
10196 && ((arg == NULL && opt->option[1] == 0)
10197 || streq (arg, opt->option + 1)))
10198 {
10199 /* If the option is deprecated, tell the user. */
10200 if (opt->deprecated != NULL)
10201 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10202 arg ? arg : "", _(opt->deprecated));
10203
10204 if (opt->var != NULL)
10205 *opt->var = opt->value;
10206
10207 return 1;
10208 }
10209 }
10210
10211 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10212 {
10213 /* These options are expected to have an argument. */
10214 if (c == lopt->option[0]
10215 && arg != NULL
10216 && startswith (arg, lopt->option + 1))
10217 {
10218 /* If the option is deprecated, tell the user. */
10219 if (lopt->deprecated != NULL)
10220 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10221 _(lopt->deprecated));
10222
10223 /* Call the sup-option parser. */
10224 return lopt->func (arg + strlen (lopt->option) - 1);
10225 }
10226 }
10227
10228 return 0;
10229 }
10230
10231 return 1;
10232 }
10233
10234 void
10235 md_show_usage (FILE * fp)
10236 {
10237 struct aarch64_option_table *opt;
10238 struct aarch64_long_option_table *lopt;
10239
10240 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10241
10242 for (opt = aarch64_opts; opt->option != NULL; opt++)
10243 if (opt->help != NULL)
10244 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10245
10246 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10247 if (lopt->help != NULL)
10248 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10249
10250 #ifdef OPTION_EB
10251 fprintf (fp, _("\
10252 -EB assemble code for a big-endian cpu\n"));
10253 #endif
10254
10255 #ifdef OPTION_EL
10256 fprintf (fp, _("\
10257 -EL assemble code for a little-endian cpu\n"));
10258 #endif
10259 }
10260
10261 /* Parse a .cpu directive. */
10262
10263 static void
10264 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10265 {
10266 const struct aarch64_cpu_option_table *opt;
10267 char saved_char;
10268 char *name;
10269 char *ext;
10270 size_t optlen;
10271
10272 name = input_line_pointer;
10273 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10274 saved_char = *input_line_pointer;
10275 *input_line_pointer = 0;
10276
10277 ext = strchr (name, '+');
10278
10279 if (ext != NULL)
10280 optlen = ext - name;
10281 else
10282 optlen = strlen (name);
10283
10284 /* Skip the first "all" entry. */
10285 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10286 if (strlen (opt->name) == optlen
10287 && strncmp (name, opt->name, optlen) == 0)
10288 {
10289 mcpu_cpu_opt = &opt->value;
10290 if (ext != NULL)
10291 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10292 return;
10293
10294 cpu_variant = *mcpu_cpu_opt;
10295
10296 *input_line_pointer = saved_char;
10297 demand_empty_rest_of_line ();
10298 return;
10299 }
10300 as_bad (_("unknown cpu `%s'"), name);
10301 *input_line_pointer = saved_char;
10302 ignore_rest_of_line ();
10303 }
10304
10305
10306 /* Parse a .arch directive. */
10307
10308 static void
10309 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10310 {
10311 const struct aarch64_arch_option_table *opt;
10312 char saved_char;
10313 char *name;
10314 char *ext;
10315 size_t optlen;
10316
10317 name = input_line_pointer;
10318 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10319 saved_char = *input_line_pointer;
10320 *input_line_pointer = 0;
10321
10322 ext = strchr (name, '+');
10323
10324 if (ext != NULL)
10325 optlen = ext - name;
10326 else
10327 optlen = strlen (name);
10328
10329 /* Skip the first "all" entry. */
10330 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10331 if (strlen (opt->name) == optlen
10332 && strncmp (name, opt->name, optlen) == 0)
10333 {
10334 mcpu_cpu_opt = &opt->value;
10335 if (ext != NULL)
10336 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10337 return;
10338
10339 cpu_variant = *mcpu_cpu_opt;
10340
10341 *input_line_pointer = saved_char;
10342 demand_empty_rest_of_line ();
10343 return;
10344 }
10345
10346 as_bad (_("unknown architecture `%s'\n"), name);
10347 *input_line_pointer = saved_char;
10348 ignore_rest_of_line ();
10349 }
10350
10351 /* Parse a .arch_extension directive. */
10352
10353 static void
10354 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10355 {
10356 char saved_char;
10357 char *ext = input_line_pointer;
10358
10359 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10360 saved_char = *input_line_pointer;
10361 *input_line_pointer = 0;
10362
10363 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10364 return;
10365
10366 cpu_variant = *mcpu_cpu_opt;
10367
10368 *input_line_pointer = saved_char;
10369 demand_empty_rest_of_line ();
10370 }
10371
10372 /* Copy symbol information. */
10373
10374 void
10375 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10376 {
10377 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10378 }
10379
10380 #ifdef OBJ_ELF
10381 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10382 This is needed so AArch64 specific st_other values can be independently
10383 specified for an IFUNC resolver (that is called by the dynamic linker)
10384 and the symbol it resolves (aliased to the resolver). In particular,
10385 if a function symbol has special st_other value set via directives,
10386 then attaching an IFUNC resolver to that symbol should not override
10387 the st_other setting. Requiring the directive on the IFUNC resolver
10388 symbol would be unexpected and problematic in C code, where the two
10389 symbols appear as two independent function declarations. */
10390
10391 void
10392 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10393 {
10394 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10395 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10396 /* If size is unset, copy size from src. Because we don't track whether
10397 .size has been used, we can't differentiate .size dest, 0 from the case
10398 where dest's size is unset. */
10399 if (!destelf->size && S_GET_SIZE (dest) == 0)
10400 {
10401 if (srcelf->size)
10402 {
10403 destelf->size = XNEW (expressionS);
10404 *destelf->size = *srcelf->size;
10405 }
10406 S_SET_SIZE (dest, S_GET_SIZE (src));
10407 }
10408 }
10409 #endif