]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
f023e5b0a283309bf3183ec83187f53e4b0372da
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 /* Diagnostics inline function utilities.
164
165 These are lightweight utilities which should only be called by parse_operands
166 and other parsers. GAS processes each assembly line by parsing it against
167 instruction template(s), in the case of multiple templates (for the same
168 mnemonic name), those templates are tried one by one until one succeeds or
169 all fail. An assembly line may fail a few templates before being
170 successfully parsed; an error saved here in most cases is not a user error
171 but an error indicating the current template is not the right template.
172 Therefore it is very important that errors can be saved at a low cost during
173 the parsing; we don't want to slow down the whole parsing by recording
174 non-user errors in detail.
175
176 Remember that the objective is to help GAS pick up the most appropriate
177 error message in the case of multiple templates, e.g. FMOV which has 8
178 templates. */
179
180 static inline void
181 clear_error (void)
182 {
183 inst.parsing_error.kind = AARCH64_OPDE_NIL;
184 inst.parsing_error.error = NULL;
185 }
186
187 static inline bool
188 error_p (void)
189 {
190 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192
193 static inline const char *
194 get_error_message (void)
195 {
196 return inst.parsing_error.error;
197 }
198
199 static inline enum aarch64_operand_error_kind
200 get_error_kind (void)
201 {
202 return inst.parsing_error.kind;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 inst.parsing_error.kind = kind;
209 inst.parsing_error.error = error;
210 }
211
212 static inline void
213 set_recoverable_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219 the error message. */
220 static inline void
221 set_default_error (void)
222 {
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225
226 static inline void
227 set_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231
232 static inline void
233 set_first_syntax_error (const char *error)
234 {
235 if (! error_p ())
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_fatal_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244 \f
245 /* Return value for certain parsers when the parsing fails; those parsers
246 return the information of the parsed result, e.g. register number, on
247 success. */
248 #define PARSE_FAIL -1
249
250 /* This is an invalid condition code that means no conditional field is
251 present. */
252 #define COND_ALWAYS 0x10
253
254 typedef struct
255 {
256 const char *template;
257 uint32_t value;
258 } asm_nzcv;
259
260 struct reloc_entry
261 {
262 char *name;
263 bfd_reloc_code_real_type reloc;
264 };
265
266 /* Macros to define the register types and masks for the purpose
267 of parsing. */
268
269 #undef AARCH64_REG_TYPES
270 #define AARCH64_REG_TYPES \
271 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
272 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
273 BASIC_REG_TYPE(SP_32) /* wsp */ \
274 BASIC_REG_TYPE(SP_64) /* sp */ \
275 BASIC_REG_TYPE(Z_32) /* wzr */ \
276 BASIC_REG_TYPE(Z_64) /* xzr */ \
277 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
278 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
279 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
280 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
281 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
282 BASIC_REG_TYPE(VN) /* v[0-31] */ \
283 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
284 BASIC_REG_TYPE(PN) /* p[0-15] */ \
285 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
286 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
287 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
288 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
289 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
290 /* Typecheck: same, plus SVE registers. */ \
291 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
294 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: same, plus SVE registers. */ \
297 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
301 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
303 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Typecheck: any [BHSDQ]P FP. */ \
308 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
309 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
310 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
315 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
316 be used for SVE instructions, since Zn and Pn are valid symbols \
317 in other contexts. */ \
318 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
321 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
322 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
323 | REG_TYPE(ZN) | REG_TYPE(PN)) \
324 /* Any integer register; used for error messages only. */ \
325 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
328 /* Pseudo type to mark the end of the enumerator sequence. */ \
329 BASIC_REG_TYPE(MAX)
330
331 #undef BASIC_REG_TYPE
332 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
333 #undef MULTI_REG_TYPE
334 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
335
336 /* Register type enumerators. */
337 typedef enum aarch64_reg_type_
338 {
339 /* A list of REG_TYPE_*. */
340 AARCH64_REG_TYPES
341 } aarch64_reg_type;
342
343 #undef BASIC_REG_TYPE
344 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
345 #undef REG_TYPE
346 #define REG_TYPE(T) (1 << REG_TYPE_##T)
347 #undef MULTI_REG_TYPE
348 #define MULTI_REG_TYPE(T,V) V,
349
350 /* Structure for a hash table entry for a register. */
351 typedef struct
352 {
353 const char *name;
354 unsigned char number;
355 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
356 unsigned char builtin;
357 } reg_entry;
358
359 /* Values indexed by aarch64_reg_type to assist the type checking. */
360 static const unsigned reg_type_masks[] =
361 {
362 AARCH64_REG_TYPES
363 };
364
365 #undef BASIC_REG_TYPE
366 #undef REG_TYPE
367 #undef MULTI_REG_TYPE
368 #undef AARCH64_REG_TYPES
369
370 /* Diagnostics used when we don't get a register of the expected type.
371 Note: this has to synchronized with aarch64_reg_type definitions
372 above. */
373 static const char *
374 get_reg_expected_msg (aarch64_reg_type reg_type)
375 {
376 const char *msg;
377
378 switch (reg_type)
379 {
380 case REG_TYPE_R_32:
381 msg = N_("integer 32-bit register expected");
382 break;
383 case REG_TYPE_R_64:
384 msg = N_("integer 64-bit register expected");
385 break;
386 case REG_TYPE_R_N:
387 msg = N_("integer register expected");
388 break;
389 case REG_TYPE_R64_SP:
390 msg = N_("64-bit integer or SP register expected");
391 break;
392 case REG_TYPE_SVE_BASE:
393 msg = N_("base register expected");
394 break;
395 case REG_TYPE_R_Z:
396 msg = N_("integer or zero register expected");
397 break;
398 case REG_TYPE_SVE_OFFSET:
399 msg = N_("offset register expected");
400 break;
401 case REG_TYPE_R_SP:
402 msg = N_("integer or SP register expected");
403 break;
404 case REG_TYPE_R_Z_SP:
405 msg = N_("integer, zero or SP register expected");
406 break;
407 case REG_TYPE_FP_B:
408 msg = N_("8-bit SIMD scalar register expected");
409 break;
410 case REG_TYPE_FP_H:
411 msg = N_("16-bit SIMD scalar or floating-point half precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_S:
415 msg = N_("32-bit SIMD scalar or floating-point single precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_D:
419 msg = N_("64-bit SIMD scalar or floating-point double precision "
420 "register expected");
421 break;
422 case REG_TYPE_FP_Q:
423 msg = N_("128-bit SIMD scalar or floating-point quad precision "
424 "register expected");
425 break;
426 case REG_TYPE_R_Z_BHSDQ_V:
427 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
428 msg = N_("register expected");
429 break;
430 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
431 msg = N_("SIMD scalar or floating-point register expected");
432 break;
433 case REG_TYPE_VN: /* any V reg */
434 msg = N_("vector register expected");
435 break;
436 case REG_TYPE_ZN:
437 msg = N_("SVE vector register expected");
438 break;
439 case REG_TYPE_PN:
440 msg = N_("SVE predicate register expected");
441 break;
442 default:
443 as_fatal (_("invalid register type %d"), reg_type);
444 }
445 return msg;
446 }
447
448 /* Some well known registers that we refer to directly elsewhere. */
449 #define REG_SP 31
450 #define REG_ZR 31
451
452 /* Instructions take 4 bytes in the object file. */
453 #define INSN_SIZE 4
454
455 static htab_t aarch64_ops_hsh;
456 static htab_t aarch64_cond_hsh;
457 static htab_t aarch64_shift_hsh;
458 static htab_t aarch64_sys_regs_hsh;
459 static htab_t aarch64_pstatefield_hsh;
460 static htab_t aarch64_sys_regs_ic_hsh;
461 static htab_t aarch64_sys_regs_dc_hsh;
462 static htab_t aarch64_sys_regs_at_hsh;
463 static htab_t aarch64_sys_regs_tlbi_hsh;
464 static htab_t aarch64_sys_regs_sr_hsh;
465 static htab_t aarch64_reg_hsh;
466 static htab_t aarch64_barrier_opt_hsh;
467 static htab_t aarch64_nzcv_hsh;
468 static htab_t aarch64_pldop_hsh;
469 static htab_t aarch64_hint_opt_hsh;
470
471 /* Stuff needed to resolve the label ambiguity
472 As:
473 ...
474 label: <insn>
475 may differ from:
476 ...
477 label:
478 <insn> */
479
480 static symbolS *last_label_seen;
481
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
484
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488 expressionS exp;
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
492
493 typedef struct literal_pool
494 {
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
497 unsigned int id;
498 symbolS *symbol;
499 segT section;
500 subsegT sub_section;
501 int size;
502 struct literal_pool *next;
503 } literal_pool;
504
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
507 \f
508 /* Pure syntax. */
509
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
513
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
522
523 const char line_separator_chars[] = ";";
524
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
528
529 /* Chars that mean this number is a floating point constant. */
530 /* As in 0f12.456 */
531 /* or 0d1.2345e12 */
532
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
534
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
537
538 /* Separator character handling. */
539
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
541
542 static inline bool
543 skip_past_char (char **str, char c)
544 {
545 if (**str == c)
546 {
547 (*str)++;
548 return true;
549 }
550 else
551 return false;
552 }
553
554 #define skip_past_comma(str) skip_past_char (str, ',')
555
556 /* Arithmetic expressions (possibly involving symbols). */
557
558 static bool in_aarch64_get_expression = false;
559
560 /* Third argument to aarch64_get_expression. */
561 #define GE_NO_PREFIX false
562 #define GE_OPT_PREFIX true
563
564 /* Fourth argument to aarch64_get_expression. */
565 #define ALLOW_ABSENT false
566 #define REJECT_ABSENT true
567
568 /* Return TRUE if the string pointed by *STR is successfully parsed
569 as an valid expression; *EP will be filled with the information of
570 such an expression. Otherwise return FALSE.
571
572 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
573 If REJECT_ABSENT is true then trat missing expressions as an error. */
574
575 static bool
576 aarch64_get_expression (expressionS * ep,
577 char ** str,
578 bool allow_immediate_prefix,
579 bool reject_absent)
580 {
581 char *save_in;
582 segT seg;
583 bool prefix_present = false;
584
585 if (allow_immediate_prefix)
586 {
587 if (is_immediate_prefix (**str))
588 {
589 (*str)++;
590 prefix_present = true;
591 }
592 }
593
594 memset (ep, 0, sizeof (expressionS));
595
596 save_in = input_line_pointer;
597 input_line_pointer = *str;
598 in_aarch64_get_expression = true;
599 seg = expression (ep);
600 in_aarch64_get_expression = false;
601
602 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
603 {
604 /* We found a bad expression in md_operand(). */
605 *str = input_line_pointer;
606 input_line_pointer = save_in;
607 if (prefix_present && ! error_p ())
608 set_fatal_syntax_error (_("bad expression"));
609 else
610 set_first_syntax_error (_("bad expression"));
611 return false;
612 }
613
614 #ifdef OBJ_AOUT
615 if (seg != absolute_section
616 && seg != text_section
617 && seg != data_section
618 && seg != bss_section
619 && seg != undefined_section)
620 {
621 set_syntax_error (_("bad segment"));
622 *str = input_line_pointer;
623 input_line_pointer = save_in;
624 return false;
625 }
626 #else
627 (void) seg;
628 #endif
629
630 *str = input_line_pointer;
631 input_line_pointer = save_in;
632 return true;
633 }
634
635 /* Turn a string in input_line_pointer into a floating point constant
636 of type TYPE, and store the appropriate bytes in *LITP. The number
637 of LITTLENUMS emitted is stored in *SIZEP. An error message is
638 returned, or NULL on OK. */
639
640 const char *
641 md_atof (int type, char *litP, int *sizeP)
642 {
643 return ieee_md_atof (type, litP, sizeP, target_big_endian);
644 }
645
646 /* We handle all bad expressions here, so that we can report the faulty
647 instruction in the error message. */
648 void
649 md_operand (expressionS * exp)
650 {
651 if (in_aarch64_get_expression)
652 exp->X_op = O_illegal;
653 }
654
655 /* Immediate values. */
656
657 /* Errors may be set multiple times during parsing or bit encoding
658 (particularly in the Neon bits), but usually the earliest error which is set
659 will be the most meaningful. Avoid overwriting it with later (cascading)
660 errors by calling this function. */
661
662 static void
663 first_error (const char *error)
664 {
665 if (! error_p ())
666 set_syntax_error (error);
667 }
668
669 /* Similar to first_error, but this function accepts formatted error
670 message. */
671 static void
672 first_error_fmt (const char *format, ...)
673 {
674 va_list args;
675 enum
676 { size = 100 };
677 /* N.B. this single buffer will not cause error messages for different
678 instructions to pollute each other; this is because at the end of
679 processing of each assembly line, error message if any will be
680 collected by as_bad. */
681 static char buffer[size];
682
683 if (! error_p ())
684 {
685 int ret ATTRIBUTE_UNUSED;
686 va_start (args, format);
687 ret = vsnprintf (buffer, size, format, args);
688 know (ret <= size - 1 && ret >= 0);
689 va_end (args);
690 set_syntax_error (buffer);
691 }
692 }
693
694 /* Register parsing. */
695
696 /* Generic register parser which is called by other specialized
697 register parsers.
698 CCP points to what should be the beginning of a register name.
699 If it is indeed a valid register name, advance CCP over it and
700 return the reg_entry structure; otherwise return NULL.
701 It does not issue diagnostics. */
702
703 static reg_entry *
704 parse_reg (char **ccp)
705 {
706 char *start = *ccp;
707 char *p;
708 reg_entry *reg;
709
710 #ifdef REGISTER_PREFIX
711 if (*start != REGISTER_PREFIX)
712 return NULL;
713 start++;
714 #endif
715
716 p = start;
717 if (!ISALPHA (*p) || !is_name_beginner (*p))
718 return NULL;
719
720 do
721 p++;
722 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
723
724 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
725
726 if (!reg)
727 return NULL;
728
729 *ccp = p;
730 return reg;
731 }
732
733 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
734 return FALSE. */
735 static bool
736 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
737 {
738 return (reg_type_masks[type] & (1 << reg->type)) != 0;
739 }
740
741 /* Try to parse a base or offset register. Allow SVE base and offset
742 registers if REG_TYPE includes SVE registers. Return the register
743 entry on success, setting *QUALIFIER to the register qualifier.
744 Return null otherwise.
745
746 Note that this function does not issue any diagnostics. */
747
748 static const reg_entry *
749 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
750 aarch64_opnd_qualifier_t *qualifier)
751 {
752 char *str = *ccp;
753 const reg_entry *reg = parse_reg (&str);
754
755 if (reg == NULL)
756 return NULL;
757
758 switch (reg->type)
759 {
760 case REG_TYPE_R_32:
761 case REG_TYPE_SP_32:
762 case REG_TYPE_Z_32:
763 *qualifier = AARCH64_OPND_QLF_W;
764 break;
765
766 case REG_TYPE_R_64:
767 case REG_TYPE_SP_64:
768 case REG_TYPE_Z_64:
769 *qualifier = AARCH64_OPND_QLF_X;
770 break;
771
772 case REG_TYPE_ZN:
773 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
774 || str[0] != '.')
775 return NULL;
776 switch (TOLOWER (str[1]))
777 {
778 case 's':
779 *qualifier = AARCH64_OPND_QLF_S_S;
780 break;
781 case 'd':
782 *qualifier = AARCH64_OPND_QLF_S_D;
783 break;
784 default:
785 return NULL;
786 }
787 str += 2;
788 break;
789
790 default:
791 return NULL;
792 }
793
794 *ccp = str;
795
796 return reg;
797 }
798
799 /* Try to parse a base or offset register. Return the register entry
800 on success, setting *QUALIFIER to the register qualifier. Return null
801 otherwise.
802
803 Note that this function does not issue any diagnostics. */
804
805 static const reg_entry *
806 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
807 {
808 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
809 }
810
811 /* Parse the qualifier of a vector register or vector element of type
812 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
813 succeeds; otherwise return FALSE.
814
815 Accept only one occurrence of:
816 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
817 b h s d q */
818 static bool
819 parse_vector_type_for_operand (aarch64_reg_type reg_type,
820 struct vector_type_el *parsed_type, char **str)
821 {
822 char *ptr = *str;
823 unsigned width;
824 unsigned element_size;
825 enum vector_el_type type;
826
827 /* skip '.' */
828 gas_assert (*ptr == '.');
829 ptr++;
830
831 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
832 {
833 width = 0;
834 goto elt_size;
835 }
836 width = strtoul (ptr, &ptr, 10);
837 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
838 {
839 first_error_fmt (_("bad size %d in vector width specifier"), width);
840 return false;
841 }
842
843 elt_size:
844 switch (TOLOWER (*ptr))
845 {
846 case 'b':
847 type = NT_b;
848 element_size = 8;
849 break;
850 case 'h':
851 type = NT_h;
852 element_size = 16;
853 break;
854 case 's':
855 type = NT_s;
856 element_size = 32;
857 break;
858 case 'd':
859 type = NT_d;
860 element_size = 64;
861 break;
862 case 'q':
863 if (reg_type == REG_TYPE_ZN || width == 1)
864 {
865 type = NT_q;
866 element_size = 128;
867 break;
868 }
869 /* fall through. */
870 default:
871 if (*ptr != '\0')
872 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
873 else
874 first_error (_("missing element size"));
875 return false;
876 }
877 if (width != 0 && width * element_size != 64
878 && width * element_size != 128
879 && !(width == 2 && element_size == 16)
880 && !(width == 4 && element_size == 8))
881 {
882 first_error_fmt (_
883 ("invalid element size %d and vector size combination %c"),
884 width, *ptr);
885 return false;
886 }
887 ptr++;
888
889 parsed_type->type = type;
890 parsed_type->width = width;
891
892 *str = ptr;
893
894 return true;
895 }
896
897 /* *STR contains an SVE zero/merge predication suffix. Parse it into
898 *PARSED_TYPE and point *STR at the end of the suffix. */
899
900 static bool
901 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
902 {
903 char *ptr = *str;
904
905 /* Skip '/'. */
906 gas_assert (*ptr == '/');
907 ptr++;
908 switch (TOLOWER (*ptr))
909 {
910 case 'z':
911 parsed_type->type = NT_zero;
912 break;
913 case 'm':
914 parsed_type->type = NT_merge;
915 break;
916 default:
917 if (*ptr != '\0' && *ptr != ',')
918 first_error_fmt (_("unexpected character `%c' in predication type"),
919 *ptr);
920 else
921 first_error (_("missing predication type"));
922 return false;
923 }
924 parsed_type->width = 0;
925 *str = ptr + 1;
926 return true;
927 }
928
929 /* Parse a register of the type TYPE.
930
931 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
932 name or the parsed register is not of TYPE.
933
934 Otherwise return the register number, and optionally fill in the actual
935 type of the register in *RTYPE when multiple alternatives were given, and
936 return the register shape and element index information in *TYPEINFO.
937
938 IN_REG_LIST should be set with TRUE if the caller is parsing a register
939 list. */
940
941 static int
942 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
943 struct vector_type_el *typeinfo, bool in_reg_list)
944 {
945 char *str = *ccp;
946 const reg_entry *reg = parse_reg (&str);
947 struct vector_type_el atype;
948 struct vector_type_el parsetype;
949 bool is_typed_vecreg = false;
950
951 atype.defined = 0;
952 atype.type = NT_invtype;
953 atype.width = -1;
954 atype.index = 0;
955
956 if (reg == NULL)
957 {
958 if (typeinfo)
959 *typeinfo = atype;
960 set_default_error ();
961 return PARSE_FAIL;
962 }
963
964 if (! aarch64_check_reg_type (reg, type))
965 {
966 DEBUG_TRACE ("reg type check failed");
967 set_default_error ();
968 return PARSE_FAIL;
969 }
970 type = reg->type;
971
972 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
973 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
974 {
975 if (*str == '.')
976 {
977 if (!parse_vector_type_for_operand (type, &parsetype, &str))
978 return PARSE_FAIL;
979 }
980 else
981 {
982 if (!parse_predication_for_operand (&parsetype, &str))
983 return PARSE_FAIL;
984 }
985
986 /* Register if of the form Vn.[bhsdq]. */
987 is_typed_vecreg = true;
988
989 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
990 {
991 /* The width is always variable; we don't allow an integer width
992 to be specified. */
993 gas_assert (parsetype.width == 0);
994 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
995 }
996 else if (parsetype.width == 0)
997 /* Expect index. In the new scheme we cannot have
998 Vn.[bhsdq] represent a scalar. Therefore any
999 Vn.[bhsdq] should have an index following it.
1000 Except in reglists of course. */
1001 atype.defined |= NTA_HASINDEX;
1002 else
1003 atype.defined |= NTA_HASTYPE;
1004
1005 atype.type = parsetype.type;
1006 atype.width = parsetype.width;
1007 }
1008
1009 if (skip_past_char (&str, '['))
1010 {
1011 expressionS exp;
1012
1013 /* Reject Sn[index] syntax. */
1014 if (!is_typed_vecreg)
1015 {
1016 first_error (_("this type of register can't be indexed"));
1017 return PARSE_FAIL;
1018 }
1019
1020 if (in_reg_list)
1021 {
1022 first_error (_("index not allowed inside register list"));
1023 return PARSE_FAIL;
1024 }
1025
1026 atype.defined |= NTA_HASINDEX;
1027
1028 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1029
1030 if (exp.X_op != O_constant)
1031 {
1032 first_error (_("constant expression required"));
1033 return PARSE_FAIL;
1034 }
1035
1036 if (! skip_past_char (&str, ']'))
1037 return PARSE_FAIL;
1038
1039 atype.index = exp.X_add_number;
1040 }
1041 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1042 {
1043 /* Indexed vector register expected. */
1044 first_error (_("indexed vector register expected"));
1045 return PARSE_FAIL;
1046 }
1047
1048 /* A vector reg Vn should be typed or indexed. */
1049 if (type == REG_TYPE_VN && atype.defined == 0)
1050 {
1051 first_error (_("invalid use of vector register"));
1052 }
1053
1054 if (typeinfo)
1055 *typeinfo = atype;
1056
1057 if (rtype)
1058 *rtype = type;
1059
1060 *ccp = str;
1061
1062 return reg->number;
1063 }
1064
1065 /* Parse register.
1066
1067 Return the register number on success; return PARSE_FAIL otherwise.
1068
1069 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1070 the register (e.g. NEON double or quad reg when either has been requested).
1071
1072 If this is a NEON vector register with additional type information, fill
1073 in the struct pointed to by VECTYPE (if non-NULL).
1074
1075 This parser does not handle register list. */
1076
1077 static int
1078 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1079 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1080 {
1081 struct vector_type_el atype;
1082 char *str = *ccp;
1083 int reg = parse_typed_reg (&str, type, rtype, &atype,
1084 /*in_reg_list= */ false);
1085
1086 if (reg == PARSE_FAIL)
1087 return PARSE_FAIL;
1088
1089 if (vectype)
1090 *vectype = atype;
1091
1092 *ccp = str;
1093
1094 return reg;
1095 }
1096
1097 static inline bool
1098 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1099 {
1100 return
1101 e1.type == e2.type
1102 && e1.defined == e2.defined
1103 && e1.width == e2.width && e1.index == e2.index;
1104 }
1105
1106 /* This function parses a list of vector registers of type TYPE.
1107 On success, it returns the parsed register list information in the
1108 following encoded format:
1109
1110 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1111 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1112
1113 The information of the register shape and/or index is returned in
1114 *VECTYPE.
1115
1116 It returns PARSE_FAIL if the register list is invalid.
1117
1118 The list contains one to four registers.
1119 Each register can be one of:
1120 <Vt>.<T>[<index>]
1121 <Vt>.<T>
1122 All <T> should be identical.
1123 All <index> should be identical.
1124 There are restrictions on <Vt> numbers which are checked later
1125 (by reg_list_valid_p). */
1126
1127 static int
1128 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1129 struct vector_type_el *vectype)
1130 {
1131 char *str = *ccp;
1132 int nb_regs;
1133 struct vector_type_el typeinfo, typeinfo_first;
1134 int val, val_range;
1135 int in_range;
1136 int ret_val;
1137 int i;
1138 bool error = false;
1139 bool expect_index = false;
1140
1141 if (*str != '{')
1142 {
1143 set_syntax_error (_("expecting {"));
1144 return PARSE_FAIL;
1145 }
1146 str++;
1147
1148 nb_regs = 0;
1149 typeinfo_first.defined = 0;
1150 typeinfo_first.type = NT_invtype;
1151 typeinfo_first.width = -1;
1152 typeinfo_first.index = 0;
1153 ret_val = 0;
1154 val = -1;
1155 val_range = -1;
1156 in_range = 0;
1157 do
1158 {
1159 if (in_range)
1160 {
1161 str++; /* skip over '-' */
1162 val_range = val;
1163 }
1164 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1165 /*in_reg_list= */ true);
1166 if (val == PARSE_FAIL)
1167 {
1168 set_first_syntax_error (_("invalid vector register in list"));
1169 error = true;
1170 continue;
1171 }
1172 /* reject [bhsd]n */
1173 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1174 {
1175 set_first_syntax_error (_("invalid scalar register in list"));
1176 error = true;
1177 continue;
1178 }
1179
1180 if (typeinfo.defined & NTA_HASINDEX)
1181 expect_index = true;
1182
1183 if (in_range)
1184 {
1185 if (val < val_range)
1186 {
1187 set_first_syntax_error
1188 (_("invalid range in vector register list"));
1189 error = true;
1190 }
1191 val_range++;
1192 }
1193 else
1194 {
1195 val_range = val;
1196 if (nb_regs == 0)
1197 typeinfo_first = typeinfo;
1198 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1199 {
1200 set_first_syntax_error
1201 (_("type mismatch in vector register list"));
1202 error = true;
1203 }
1204 }
1205 if (! error)
1206 for (i = val_range; i <= val; i++)
1207 {
1208 ret_val |= i << (5 * nb_regs);
1209 nb_regs++;
1210 }
1211 in_range = 0;
1212 }
1213 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1214
1215 skip_whitespace (str);
1216 if (*str != '}')
1217 {
1218 set_first_syntax_error (_("end of vector register list not found"));
1219 error = true;
1220 }
1221 str++;
1222
1223 skip_whitespace (str);
1224
1225 if (expect_index)
1226 {
1227 if (skip_past_char (&str, '['))
1228 {
1229 expressionS exp;
1230
1231 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1232 if (exp.X_op != O_constant)
1233 {
1234 set_first_syntax_error (_("constant expression required."));
1235 error = true;
1236 }
1237 if (! skip_past_char (&str, ']'))
1238 error = true;
1239 else
1240 typeinfo_first.index = exp.X_add_number;
1241 }
1242 else
1243 {
1244 set_first_syntax_error (_("expected index"));
1245 error = true;
1246 }
1247 }
1248
1249 if (nb_regs > 4)
1250 {
1251 set_first_syntax_error (_("too many registers in vector register list"));
1252 error = true;
1253 }
1254 else if (nb_regs == 0)
1255 {
1256 set_first_syntax_error (_("empty vector register list"));
1257 error = true;
1258 }
1259
1260 *ccp = str;
1261 if (! error)
1262 *vectype = typeinfo_first;
1263
1264 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1265 }
1266
1267 /* Directives: register aliases. */
1268
1269 static reg_entry *
1270 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1271 {
1272 reg_entry *new;
1273 const char *name;
1274
1275 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1276 {
1277 if (new->builtin)
1278 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1279 str);
1280
1281 /* Only warn about a redefinition if it's not defined as the
1282 same register. */
1283 else if (new->number != number || new->type != type)
1284 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1285
1286 return NULL;
1287 }
1288
1289 name = xstrdup (str);
1290 new = XNEW (reg_entry);
1291
1292 new->name = name;
1293 new->number = number;
1294 new->type = type;
1295 new->builtin = false;
1296
1297 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1298
1299 return new;
1300 }
1301
1302 /* Look for the .req directive. This is of the form:
1303
1304 new_register_name .req existing_register_name
1305
1306 If we find one, or if it looks sufficiently like one that we want to
1307 handle any error here, return TRUE. Otherwise return FALSE. */
1308
1309 static bool
1310 create_register_alias (char *newname, char *p)
1311 {
1312 const reg_entry *old;
1313 char *oldname, *nbuf;
1314 size_t nlen;
1315
1316 /* The input scrubber ensures that whitespace after the mnemonic is
1317 collapsed to single spaces. */
1318 oldname = p;
1319 if (!startswith (oldname, " .req "))
1320 return false;
1321
1322 oldname += 6;
1323 if (*oldname == '\0')
1324 return false;
1325
1326 old = str_hash_find (aarch64_reg_hsh, oldname);
1327 if (!old)
1328 {
1329 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1330 return true;
1331 }
1332
1333 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1334 the desired alias name, and p points to its end. If not, then
1335 the desired alias name is in the global original_case_string. */
1336 #ifdef TC_CASE_SENSITIVE
1337 nlen = p - newname;
1338 #else
1339 newname = original_case_string;
1340 nlen = strlen (newname);
1341 #endif
1342
1343 nbuf = xmemdup0 (newname, nlen);
1344
1345 /* Create aliases under the new name as stated; an all-lowercase
1346 version of the new name; and an all-uppercase version of the new
1347 name. */
1348 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1349 {
1350 for (p = nbuf; *p; p++)
1351 *p = TOUPPER (*p);
1352
1353 if (strncmp (nbuf, newname, nlen))
1354 {
1355 /* If this attempt to create an additional alias fails, do not bother
1356 trying to create the all-lower case alias. We will fail and issue
1357 a second, duplicate error message. This situation arises when the
1358 programmer does something like:
1359 foo .req r0
1360 Foo .req r1
1361 The second .req creates the "Foo" alias but then fails to create
1362 the artificial FOO alias because it has already been created by the
1363 first .req. */
1364 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1365 {
1366 free (nbuf);
1367 return true;
1368 }
1369 }
1370
1371 for (p = nbuf; *p; p++)
1372 *p = TOLOWER (*p);
1373
1374 if (strncmp (nbuf, newname, nlen))
1375 insert_reg_alias (nbuf, old->number, old->type);
1376 }
1377
1378 free (nbuf);
1379 return true;
1380 }
1381
1382 /* Should never be called, as .req goes between the alias and the
1383 register name, not at the beginning of the line. */
1384 static void
1385 s_req (int a ATTRIBUTE_UNUSED)
1386 {
1387 as_bad (_("invalid syntax for .req directive"));
1388 }
1389
1390 /* The .unreq directive deletes an alias which was previously defined
1391 by .req. For example:
1392
1393 my_alias .req r11
1394 .unreq my_alias */
1395
1396 static void
1397 s_unreq (int a ATTRIBUTE_UNUSED)
1398 {
1399 char *name;
1400 char saved_char;
1401
1402 name = input_line_pointer;
1403
1404 while (*input_line_pointer != 0
1405 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1406 ++input_line_pointer;
1407
1408 saved_char = *input_line_pointer;
1409 *input_line_pointer = 0;
1410
1411 if (!*name)
1412 as_bad (_("invalid syntax for .unreq directive"));
1413 else
1414 {
1415 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1416
1417 if (!reg)
1418 as_bad (_("unknown register alias '%s'"), name);
1419 else if (reg->builtin)
1420 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1421 name);
1422 else
1423 {
1424 char *p;
1425 char *nbuf;
1426
1427 str_hash_delete (aarch64_reg_hsh, name);
1428 free ((char *) reg->name);
1429 free (reg);
1430
1431 /* Also locate the all upper case and all lower case versions.
1432 Do not complain if we cannot find one or the other as it
1433 was probably deleted above. */
1434
1435 nbuf = strdup (name);
1436 for (p = nbuf; *p; p++)
1437 *p = TOUPPER (*p);
1438 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1439 if (reg)
1440 {
1441 str_hash_delete (aarch64_reg_hsh, nbuf);
1442 free ((char *) reg->name);
1443 free (reg);
1444 }
1445
1446 for (p = nbuf; *p; p++)
1447 *p = TOLOWER (*p);
1448 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1449 if (reg)
1450 {
1451 str_hash_delete (aarch64_reg_hsh, nbuf);
1452 free ((char *) reg->name);
1453 free (reg);
1454 }
1455
1456 free (nbuf);
1457 }
1458 }
1459
1460 *input_line_pointer = saved_char;
1461 demand_empty_rest_of_line ();
1462 }
1463
1464 /* Directives: Instruction set selection. */
1465
1466 #ifdef OBJ_ELF
1467 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1468 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1469 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1470 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1471
1472 /* Create a new mapping symbol for the transition to STATE. */
1473
1474 static void
1475 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1476 {
1477 symbolS *symbolP;
1478 const char *symname;
1479 int type;
1480
1481 switch (state)
1482 {
1483 case MAP_DATA:
1484 symname = "$d";
1485 type = BSF_NO_FLAGS;
1486 break;
1487 case MAP_INSN:
1488 symname = "$x";
1489 type = BSF_NO_FLAGS;
1490 break;
1491 default:
1492 abort ();
1493 }
1494
1495 symbolP = symbol_new (symname, now_seg, frag, value);
1496 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1497
1498 /* Save the mapping symbols for future reference. Also check that
1499 we do not place two mapping symbols at the same offset within a
1500 frag. We'll handle overlap between frags in
1501 check_mapping_symbols.
1502
1503 If .fill or other data filling directive generates zero sized data,
1504 the mapping symbol for the following code will have the same value
1505 as the one generated for the data filling directive. In this case,
1506 we replace the old symbol with the new one at the same address. */
1507 if (value == 0)
1508 {
1509 if (frag->tc_frag_data.first_map != NULL)
1510 {
1511 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1512 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1513 &symbol_lastP);
1514 }
1515 frag->tc_frag_data.first_map = symbolP;
1516 }
1517 if (frag->tc_frag_data.last_map != NULL)
1518 {
1519 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1520 S_GET_VALUE (symbolP));
1521 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1522 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1523 &symbol_lastP);
1524 }
1525 frag->tc_frag_data.last_map = symbolP;
1526 }
1527
1528 /* We must sometimes convert a region marked as code to data during
1529 code alignment, if an odd number of bytes have to be padded. The
1530 code mapping symbol is pushed to an aligned address. */
1531
1532 static void
1533 insert_data_mapping_symbol (enum mstate state,
1534 valueT value, fragS * frag, offsetT bytes)
1535 {
1536 /* If there was already a mapping symbol, remove it. */
1537 if (frag->tc_frag_data.last_map != NULL
1538 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1539 frag->fr_address + value)
1540 {
1541 symbolS *symp = frag->tc_frag_data.last_map;
1542
1543 if (value == 0)
1544 {
1545 know (frag->tc_frag_data.first_map == symp);
1546 frag->tc_frag_data.first_map = NULL;
1547 }
1548 frag->tc_frag_data.last_map = NULL;
1549 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1550 }
1551
1552 make_mapping_symbol (MAP_DATA, value, frag);
1553 make_mapping_symbol (state, value + bytes, frag);
1554 }
1555
1556 static void mapping_state_2 (enum mstate state, int max_chars);
1557
1558 /* Set the mapping state to STATE. Only call this when about to
1559 emit some STATE bytes to the file. */
1560
1561 void
1562 mapping_state (enum mstate state)
1563 {
1564 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1565
1566 if (state == MAP_INSN)
1567 /* AArch64 instructions require 4-byte alignment. When emitting
1568 instructions into any section, record the appropriate section
1569 alignment. */
1570 record_alignment (now_seg, 2);
1571
1572 if (mapstate == state)
1573 /* The mapping symbol has already been emitted.
1574 There is nothing else to do. */
1575 return;
1576
1577 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1578 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1579 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1580 evaluated later in the next else. */
1581 return;
1582 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1583 {
1584 /* Only add the symbol if the offset is > 0:
1585 if we're at the first frag, check it's size > 0;
1586 if we're not at the first frag, then for sure
1587 the offset is > 0. */
1588 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1589 const int add_symbol = (frag_now != frag_first)
1590 || (frag_now_fix () > 0);
1591
1592 if (add_symbol)
1593 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1594 }
1595 #undef TRANSITION
1596
1597 mapping_state_2 (state, 0);
1598 }
1599
1600 /* Same as mapping_state, but MAX_CHARS bytes have already been
1601 allocated. Put the mapping symbol that far back. */
1602
1603 static void
1604 mapping_state_2 (enum mstate state, int max_chars)
1605 {
1606 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1607
1608 if (!SEG_NORMAL (now_seg))
1609 return;
1610
1611 if (mapstate == state)
1612 /* The mapping symbol has already been emitted.
1613 There is nothing else to do. */
1614 return;
1615
1616 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1617 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1618 }
1619 #else
1620 #define mapping_state(x) /* nothing */
1621 #define mapping_state_2(x, y) /* nothing */
1622 #endif
1623
1624 /* Directives: sectioning and alignment. */
1625
1626 static void
1627 s_bss (int ignore ATTRIBUTE_UNUSED)
1628 {
1629 /* We don't support putting frags in the BSS segment, we fake it by
1630 marking in_bss, then looking at s_skip for clues. */
1631 subseg_set (bss_section, 0);
1632 demand_empty_rest_of_line ();
1633 mapping_state (MAP_DATA);
1634 }
1635
1636 static void
1637 s_even (int ignore ATTRIBUTE_UNUSED)
1638 {
1639 /* Never make frag if expect extra pass. */
1640 if (!need_pass_2)
1641 frag_align (1, 0, 0);
1642
1643 record_alignment (now_seg, 1);
1644
1645 demand_empty_rest_of_line ();
1646 }
1647
1648 /* Directives: Literal pools. */
1649
1650 static literal_pool *
1651 find_literal_pool (int size)
1652 {
1653 literal_pool *pool;
1654
1655 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1656 {
1657 if (pool->section == now_seg
1658 && pool->sub_section == now_subseg && pool->size == size)
1659 break;
1660 }
1661
1662 return pool;
1663 }
1664
1665 static literal_pool *
1666 find_or_make_literal_pool (int size)
1667 {
1668 /* Next literal pool ID number. */
1669 static unsigned int latest_pool_num = 1;
1670 literal_pool *pool;
1671
1672 pool = find_literal_pool (size);
1673
1674 if (pool == NULL)
1675 {
1676 /* Create a new pool. */
1677 pool = XNEW (literal_pool);
1678 if (!pool)
1679 return NULL;
1680
1681 /* Currently we always put the literal pool in the current text
1682 section. If we were generating "small" model code where we
1683 knew that all code and initialised data was within 1MB then
1684 we could output literals to mergeable, read-only data
1685 sections. */
1686
1687 pool->next_free_entry = 0;
1688 pool->section = now_seg;
1689 pool->sub_section = now_subseg;
1690 pool->size = size;
1691 pool->next = list_of_pools;
1692 pool->symbol = NULL;
1693
1694 /* Add it to the list. */
1695 list_of_pools = pool;
1696 }
1697
1698 /* New pools, and emptied pools, will have a NULL symbol. */
1699 if (pool->symbol == NULL)
1700 {
1701 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1702 &zero_address_frag, 0);
1703 pool->id = latest_pool_num++;
1704 }
1705
1706 /* Done. */
1707 return pool;
1708 }
1709
1710 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1711 Return TRUE on success, otherwise return FALSE. */
1712 static bool
1713 add_to_lit_pool (expressionS *exp, int size)
1714 {
1715 literal_pool *pool;
1716 unsigned int entry;
1717
1718 pool = find_or_make_literal_pool (size);
1719
1720 /* Check if this literal value is already in the pool. */
1721 for (entry = 0; entry < pool->next_free_entry; entry++)
1722 {
1723 expressionS * litexp = & pool->literals[entry].exp;
1724
1725 if ((litexp->X_op == exp->X_op)
1726 && (exp->X_op == O_constant)
1727 && (litexp->X_add_number == exp->X_add_number)
1728 && (litexp->X_unsigned == exp->X_unsigned))
1729 break;
1730
1731 if ((litexp->X_op == exp->X_op)
1732 && (exp->X_op == O_symbol)
1733 && (litexp->X_add_number == exp->X_add_number)
1734 && (litexp->X_add_symbol == exp->X_add_symbol)
1735 && (litexp->X_op_symbol == exp->X_op_symbol))
1736 break;
1737 }
1738
1739 /* Do we need to create a new entry? */
1740 if (entry == pool->next_free_entry)
1741 {
1742 if (entry >= MAX_LITERAL_POOL_SIZE)
1743 {
1744 set_syntax_error (_("literal pool overflow"));
1745 return false;
1746 }
1747
1748 pool->literals[entry].exp = *exp;
1749 pool->next_free_entry += 1;
1750 if (exp->X_op == O_big)
1751 {
1752 /* PR 16688: Bignums are held in a single global array. We must
1753 copy and preserve that value now, before it is overwritten. */
1754 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1755 exp->X_add_number);
1756 memcpy (pool->literals[entry].bignum, generic_bignum,
1757 CHARS_PER_LITTLENUM * exp->X_add_number);
1758 }
1759 else
1760 pool->literals[entry].bignum = NULL;
1761 }
1762
1763 exp->X_op = O_symbol;
1764 exp->X_add_number = ((int) entry) * size;
1765 exp->X_add_symbol = pool->symbol;
1766
1767 return true;
1768 }
1769
1770 /* Can't use symbol_new here, so have to create a symbol and then at
1771 a later date assign it a value. That's what these functions do. */
1772
1773 static void
1774 symbol_locate (symbolS * symbolP,
1775 const char *name,/* It is copied, the caller can modify. */
1776 segT segment, /* Segment identifier (SEG_<something>). */
1777 valueT valu, /* Symbol value. */
1778 fragS * frag) /* Associated fragment. */
1779 {
1780 size_t name_length;
1781 char *preserved_copy_of_name;
1782
1783 name_length = strlen (name) + 1; /* +1 for \0. */
1784 obstack_grow (&notes, name, name_length);
1785 preserved_copy_of_name = obstack_finish (&notes);
1786
1787 #ifdef tc_canonicalize_symbol_name
1788 preserved_copy_of_name =
1789 tc_canonicalize_symbol_name (preserved_copy_of_name);
1790 #endif
1791
1792 S_SET_NAME (symbolP, preserved_copy_of_name);
1793
1794 S_SET_SEGMENT (symbolP, segment);
1795 S_SET_VALUE (symbolP, valu);
1796 symbol_clear_list_pointers (symbolP);
1797
1798 symbol_set_frag (symbolP, frag);
1799
1800 /* Link to end of symbol chain. */
1801 {
1802 extern int symbol_table_frozen;
1803
1804 if (symbol_table_frozen)
1805 abort ();
1806 }
1807
1808 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1809
1810 obj_symbol_new_hook (symbolP);
1811
1812 #ifdef tc_symbol_new_hook
1813 tc_symbol_new_hook (symbolP);
1814 #endif
1815
1816 #ifdef DEBUG_SYMS
1817 verify_symbol_chain (symbol_rootP, symbol_lastP);
1818 #endif /* DEBUG_SYMS */
1819 }
1820
1821
1822 static void
1823 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1824 {
1825 unsigned int entry;
1826 literal_pool *pool;
1827 char sym_name[20];
1828 int align;
1829
1830 for (align = 2; align <= 4; align++)
1831 {
1832 int size = 1 << align;
1833
1834 pool = find_literal_pool (size);
1835 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1836 continue;
1837
1838 /* Align pool as you have word accesses.
1839 Only make a frag if we have to. */
1840 if (!need_pass_2)
1841 frag_align (align, 0, 0);
1842
1843 mapping_state (MAP_DATA);
1844
1845 record_alignment (now_seg, align);
1846
1847 sprintf (sym_name, "$$lit_\002%x", pool->id);
1848
1849 symbol_locate (pool->symbol, sym_name, now_seg,
1850 (valueT) frag_now_fix (), frag_now);
1851 symbol_table_insert (pool->symbol);
1852
1853 for (entry = 0; entry < pool->next_free_entry; entry++)
1854 {
1855 expressionS * exp = & pool->literals[entry].exp;
1856
1857 if (exp->X_op == O_big)
1858 {
1859 /* PR 16688: Restore the global bignum value. */
1860 gas_assert (pool->literals[entry].bignum != NULL);
1861 memcpy (generic_bignum, pool->literals[entry].bignum,
1862 CHARS_PER_LITTLENUM * exp->X_add_number);
1863 }
1864
1865 /* First output the expression in the instruction to the pool. */
1866 emit_expr (exp, size); /* .word|.xword */
1867
1868 if (exp->X_op == O_big)
1869 {
1870 free (pool->literals[entry].bignum);
1871 pool->literals[entry].bignum = NULL;
1872 }
1873 }
1874
1875 /* Mark the pool as empty. */
1876 pool->next_free_entry = 0;
1877 pool->symbol = NULL;
1878 }
1879 }
1880
1881 #ifdef OBJ_ELF
1882 /* Forward declarations for functions below, in the MD interface
1883 section. */
1884 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1885 static struct reloc_table_entry * find_reloc_table_entry (char **);
1886
1887 /* Directives: Data. */
1888 /* N.B. the support for relocation suffix in this directive needs to be
1889 implemented properly. */
1890
1891 static void
1892 s_aarch64_elf_cons (int nbytes)
1893 {
1894 expressionS exp;
1895
1896 #ifdef md_flush_pending_output
1897 md_flush_pending_output ();
1898 #endif
1899
1900 if (is_it_end_of_statement ())
1901 {
1902 demand_empty_rest_of_line ();
1903 return;
1904 }
1905
1906 #ifdef md_cons_align
1907 md_cons_align (nbytes);
1908 #endif
1909
1910 mapping_state (MAP_DATA);
1911 do
1912 {
1913 struct reloc_table_entry *reloc;
1914
1915 expression (&exp);
1916
1917 if (exp.X_op != O_symbol)
1918 emit_expr (&exp, (unsigned int) nbytes);
1919 else
1920 {
1921 skip_past_char (&input_line_pointer, '#');
1922 if (skip_past_char (&input_line_pointer, ':'))
1923 {
1924 reloc = find_reloc_table_entry (&input_line_pointer);
1925 if (reloc == NULL)
1926 as_bad (_("unrecognized relocation suffix"));
1927 else
1928 as_bad (_("unimplemented relocation suffix"));
1929 ignore_rest_of_line ();
1930 return;
1931 }
1932 else
1933 emit_expr (&exp, (unsigned int) nbytes);
1934 }
1935 }
1936 while (*input_line_pointer++ == ',');
1937
1938 /* Put terminator back into stream. */
1939 input_line_pointer--;
1940 demand_empty_rest_of_line ();
1941 }
1942
1943 /* Mark symbol that it follows a variant PCS convention. */
1944
1945 static void
1946 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1947 {
1948 char *name;
1949 char c;
1950 symbolS *sym;
1951 asymbol *bfdsym;
1952 elf_symbol_type *elfsym;
1953
1954 c = get_symbol_name (&name);
1955 if (!*name)
1956 as_bad (_("Missing symbol name in directive"));
1957 sym = symbol_find_or_make (name);
1958 restore_line_pointer (c);
1959 demand_empty_rest_of_line ();
1960 bfdsym = symbol_get_bfdsym (sym);
1961 elfsym = elf_symbol_from (bfdsym);
1962 gas_assert (elfsym);
1963 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1964 }
1965 #endif /* OBJ_ELF */
1966
1967 /* Output a 32-bit word, but mark as an instruction. */
1968
1969 static void
1970 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1971 {
1972 expressionS exp;
1973 unsigned n = 0;
1974
1975 #ifdef md_flush_pending_output
1976 md_flush_pending_output ();
1977 #endif
1978
1979 if (is_it_end_of_statement ())
1980 {
1981 demand_empty_rest_of_line ();
1982 return;
1983 }
1984
1985 /* Sections are assumed to start aligned. In executable section, there is no
1986 MAP_DATA symbol pending. So we only align the address during
1987 MAP_DATA --> MAP_INSN transition.
1988 For other sections, this is not guaranteed. */
1989 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1990 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1991 frag_align_code (2, 0);
1992
1993 #ifdef OBJ_ELF
1994 mapping_state (MAP_INSN);
1995 #endif
1996
1997 do
1998 {
1999 expression (&exp);
2000 if (exp.X_op != O_constant)
2001 {
2002 as_bad (_("constant expression required"));
2003 ignore_rest_of_line ();
2004 return;
2005 }
2006
2007 if (target_big_endian)
2008 {
2009 unsigned int val = exp.X_add_number;
2010 exp.X_add_number = SWAP_32 (val);
2011 }
2012 emit_expr (&exp, INSN_SIZE);
2013 ++n;
2014 }
2015 while (*input_line_pointer++ == ',');
2016
2017 dwarf2_emit_insn (n * INSN_SIZE);
2018
2019 /* Put terminator back into stream. */
2020 input_line_pointer--;
2021 demand_empty_rest_of_line ();
2022 }
2023
2024 static void
2025 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2026 {
2027 demand_empty_rest_of_line ();
2028 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2029 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2030 }
2031
2032 #ifdef OBJ_ELF
2033 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2034
2035 static void
2036 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2037 {
2038 expressionS exp;
2039
2040 expression (&exp);
2041 frag_grow (4);
2042 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2043 BFD_RELOC_AARCH64_TLSDESC_ADD);
2044
2045 demand_empty_rest_of_line ();
2046 }
2047
2048 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2049
2050 static void
2051 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2052 {
2053 expressionS exp;
2054
2055 /* Since we're just labelling the code, there's no need to define a
2056 mapping symbol. */
2057 expression (&exp);
2058 /* Make sure there is enough room in this frag for the following
2059 blr. This trick only works if the blr follows immediately after
2060 the .tlsdesc directive. */
2061 frag_grow (4);
2062 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2063 BFD_RELOC_AARCH64_TLSDESC_CALL);
2064
2065 demand_empty_rest_of_line ();
2066 }
2067
2068 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2069
2070 static void
2071 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2072 {
2073 expressionS exp;
2074
2075 expression (&exp);
2076 frag_grow (4);
2077 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2078 BFD_RELOC_AARCH64_TLSDESC_LDR);
2079
2080 demand_empty_rest_of_line ();
2081 }
2082 #endif /* OBJ_ELF */
2083
2084 static void s_aarch64_arch (int);
2085 static void s_aarch64_cpu (int);
2086 static void s_aarch64_arch_extension (int);
2087
2088 /* This table describes all the machine specific pseudo-ops the assembler
2089 has to support. The fields are:
2090 pseudo-op name without dot
2091 function to call to execute this pseudo-op
2092 Integer arg to pass to the function. */
2093
2094 const pseudo_typeS md_pseudo_table[] = {
2095 /* Never called because '.req' does not start a line. */
2096 {"req", s_req, 0},
2097 {"unreq", s_unreq, 0},
2098 {"bss", s_bss, 0},
2099 {"even", s_even, 0},
2100 {"ltorg", s_ltorg, 0},
2101 {"pool", s_ltorg, 0},
2102 {"cpu", s_aarch64_cpu, 0},
2103 {"arch", s_aarch64_arch, 0},
2104 {"arch_extension", s_aarch64_arch_extension, 0},
2105 {"inst", s_aarch64_inst, 0},
2106 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2107 #ifdef OBJ_ELF
2108 {"tlsdescadd", s_tlsdescadd, 0},
2109 {"tlsdesccall", s_tlsdesccall, 0},
2110 {"tlsdescldr", s_tlsdescldr, 0},
2111 {"word", s_aarch64_elf_cons, 4},
2112 {"long", s_aarch64_elf_cons, 4},
2113 {"xword", s_aarch64_elf_cons, 8},
2114 {"dword", s_aarch64_elf_cons, 8},
2115 {"variant_pcs", s_variant_pcs, 0},
2116 #endif
2117 {"float16", float_cons, 'h'},
2118 {"bfloat16", float_cons, 'b'},
2119 {0, 0, 0}
2120 };
2121 \f
2122
2123 /* Check whether STR points to a register name followed by a comma or the
2124 end of line; REG_TYPE indicates which register types are checked
2125 against. Return TRUE if STR is such a register name; otherwise return
2126 FALSE. The function does not intend to produce any diagnostics, but since
2127 the register parser aarch64_reg_parse, which is called by this function,
2128 does produce diagnostics, we call clear_error to clear any diagnostics
2129 that may be generated by aarch64_reg_parse.
2130 Also, the function returns FALSE directly if there is any user error
2131 present at the function entry. This prevents the existing diagnostics
2132 state from being spoiled.
2133 The function currently serves parse_constant_immediate and
2134 parse_big_immediate only. */
2135 static bool
2136 reg_name_p (char *str, aarch64_reg_type reg_type)
2137 {
2138 int reg;
2139
2140 /* Prevent the diagnostics state from being spoiled. */
2141 if (error_p ())
2142 return false;
2143
2144 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2145
2146 /* Clear the parsing error that may be set by the reg parser. */
2147 clear_error ();
2148
2149 if (reg == PARSE_FAIL)
2150 return false;
2151
2152 skip_whitespace (str);
2153 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2154 return true;
2155
2156 return false;
2157 }
2158
2159 /* Parser functions used exclusively in instruction operands. */
2160
2161 /* Parse an immediate expression which may not be constant.
2162
2163 To prevent the expression parser from pushing a register name
2164 into the symbol table as an undefined symbol, firstly a check is
2165 done to find out whether STR is a register of type REG_TYPE followed
2166 by a comma or the end of line. Return FALSE if STR is such a string. */
2167
2168 static bool
2169 parse_immediate_expression (char **str, expressionS *exp,
2170 aarch64_reg_type reg_type)
2171 {
2172 if (reg_name_p (*str, reg_type))
2173 {
2174 set_recoverable_error (_("immediate operand required"));
2175 return false;
2176 }
2177
2178 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2179
2180 if (exp->X_op == O_absent)
2181 {
2182 set_fatal_syntax_error (_("missing immediate expression"));
2183 return false;
2184 }
2185
2186 return true;
2187 }
2188
2189 /* Constant immediate-value read function for use in insn parsing.
2190 STR points to the beginning of the immediate (with the optional
2191 leading #); *VAL receives the value. REG_TYPE says which register
2192 names should be treated as registers rather than as symbolic immediates.
2193
2194 Return TRUE on success; otherwise return FALSE. */
2195
2196 static bool
2197 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2198 {
2199 expressionS exp;
2200
2201 if (! parse_immediate_expression (str, &exp, reg_type))
2202 return false;
2203
2204 if (exp.X_op != O_constant)
2205 {
2206 set_syntax_error (_("constant expression required"));
2207 return false;
2208 }
2209
2210 *val = exp.X_add_number;
2211 return true;
2212 }
2213
2214 static uint32_t
2215 encode_imm_float_bits (uint32_t imm)
2216 {
2217 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2218 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2219 }
2220
2221 /* Return TRUE if the single-precision floating-point value encoded in IMM
2222 can be expressed in the AArch64 8-bit signed floating-point format with
2223 3-bit exponent and normalized 4 bits of precision; in other words, the
2224 floating-point value must be expressable as
2225 (+/-) n / 16 * power (2, r)
2226 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2227
2228 static bool
2229 aarch64_imm_float_p (uint32_t imm)
2230 {
2231 /* If a single-precision floating-point value has the following bit
2232 pattern, it can be expressed in the AArch64 8-bit floating-point
2233 format:
2234
2235 3 32222222 2221111111111
2236 1 09876543 21098765432109876543210
2237 n Eeeeeexx xxxx0000000000000000000
2238
2239 where n, e and each x are either 0 or 1 independently, with
2240 E == ~ e. */
2241
2242 uint32_t pattern;
2243
2244 /* Prepare the pattern for 'Eeeeee'. */
2245 if (((imm >> 30) & 0x1) == 0)
2246 pattern = 0x3e000000;
2247 else
2248 pattern = 0x40000000;
2249
2250 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2251 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2252 }
2253
2254 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2255 as an IEEE float without any loss of precision. Store the value in
2256 *FPWORD if so. */
2257
2258 static bool
2259 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2260 {
2261 /* If a double-precision floating-point value has the following bit
2262 pattern, it can be expressed in a float:
2263
2264 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2265 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2266 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2267
2268 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2269 if Eeee_eeee != 1111_1111
2270
2271 where n, e, s and S are either 0 or 1 independently and where ~ is the
2272 inverse of E. */
2273
2274 uint32_t pattern;
2275 uint32_t high32 = imm >> 32;
2276 uint32_t low32 = imm;
2277
2278 /* Lower 29 bits need to be 0s. */
2279 if ((imm & 0x1fffffff) != 0)
2280 return false;
2281
2282 /* Prepare the pattern for 'Eeeeeeeee'. */
2283 if (((high32 >> 30) & 0x1) == 0)
2284 pattern = 0x38000000;
2285 else
2286 pattern = 0x40000000;
2287
2288 /* Check E~~~. */
2289 if ((high32 & 0x78000000) != pattern)
2290 return false;
2291
2292 /* Check Eeee_eeee != 1111_1111. */
2293 if ((high32 & 0x7ff00000) == 0x47f00000)
2294 return false;
2295
2296 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2297 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2298 | (low32 >> 29)); /* 3 S bits. */
2299 return true;
2300 }
2301
2302 /* Return true if we should treat OPERAND as a double-precision
2303 floating-point operand rather than a single-precision one. */
2304 static bool
2305 double_precision_operand_p (const aarch64_opnd_info *operand)
2306 {
2307 /* Check for unsuffixed SVE registers, which are allowed
2308 for LDR and STR but not in instructions that require an
2309 immediate. We get better error messages if we arbitrarily
2310 pick one size, parse the immediate normally, and then
2311 report the match failure in the normal way. */
2312 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2313 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2314 }
2315
2316 /* Parse a floating-point immediate. Return TRUE on success and return the
2317 value in *IMMED in the format of IEEE754 single-precision encoding.
2318 *CCP points to the start of the string; DP_P is TRUE when the immediate
2319 is expected to be in double-precision (N.B. this only matters when
2320 hexadecimal representation is involved). REG_TYPE says which register
2321 names should be treated as registers rather than as symbolic immediates.
2322
2323 This routine accepts any IEEE float; it is up to the callers to reject
2324 invalid ones. */
2325
2326 static bool
2327 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2328 aarch64_reg_type reg_type)
2329 {
2330 char *str = *ccp;
2331 char *fpnum;
2332 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2333 int64_t val = 0;
2334 unsigned fpword = 0;
2335 bool hex_p = false;
2336
2337 skip_past_char (&str, '#');
2338
2339 fpnum = str;
2340 skip_whitespace (fpnum);
2341
2342 if (startswith (fpnum, "0x"))
2343 {
2344 /* Support the hexadecimal representation of the IEEE754 encoding.
2345 Double-precision is expected when DP_P is TRUE, otherwise the
2346 representation should be in single-precision. */
2347 if (! parse_constant_immediate (&str, &val, reg_type))
2348 goto invalid_fp;
2349
2350 if (dp_p)
2351 {
2352 if (!can_convert_double_to_float (val, &fpword))
2353 goto invalid_fp;
2354 }
2355 else if ((uint64_t) val > 0xffffffff)
2356 goto invalid_fp;
2357 else
2358 fpword = val;
2359
2360 hex_p = true;
2361 }
2362 else if (reg_name_p (str, reg_type))
2363 {
2364 set_recoverable_error (_("immediate operand required"));
2365 return false;
2366 }
2367
2368 if (! hex_p)
2369 {
2370 int i;
2371
2372 if ((str = atof_ieee (str, 's', words)) == NULL)
2373 goto invalid_fp;
2374
2375 /* Our FP word must be 32 bits (single-precision FP). */
2376 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2377 {
2378 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2379 fpword |= words[i];
2380 }
2381 }
2382
2383 *immed = fpword;
2384 *ccp = str;
2385 return true;
2386
2387 invalid_fp:
2388 set_fatal_syntax_error (_("invalid floating-point constant"));
2389 return false;
2390 }
2391
2392 /* Less-generic immediate-value read function with the possibility of loading
2393 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2394 instructions.
2395
2396 To prevent the expression parser from pushing a register name into the
2397 symbol table as an undefined symbol, a check is firstly done to find
2398 out whether STR is a register of type REG_TYPE followed by a comma or
2399 the end of line. Return FALSE if STR is such a register. */
2400
2401 static bool
2402 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2403 {
2404 char *ptr = *str;
2405
2406 if (reg_name_p (ptr, reg_type))
2407 {
2408 set_syntax_error (_("immediate operand required"));
2409 return false;
2410 }
2411
2412 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2413
2414 if (inst.reloc.exp.X_op == O_constant)
2415 *imm = inst.reloc.exp.X_add_number;
2416
2417 *str = ptr;
2418
2419 return true;
2420 }
2421
2422 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2423 if NEED_LIBOPCODES is non-zero, the fixup will need
2424 assistance from the libopcodes. */
2425
2426 static inline void
2427 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2428 const aarch64_opnd_info *operand,
2429 int need_libopcodes_p)
2430 {
2431 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2432 reloc->opnd = operand->type;
2433 if (need_libopcodes_p)
2434 reloc->need_libopcodes_p = 1;
2435 };
2436
2437 /* Return TRUE if the instruction needs to be fixed up later internally by
2438 the GAS; otherwise return FALSE. */
2439
2440 static inline bool
2441 aarch64_gas_internal_fixup_p (void)
2442 {
2443 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2444 }
2445
2446 /* Assign the immediate value to the relevant field in *OPERAND if
2447 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2448 needs an internal fixup in a later stage.
2449 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2450 IMM.VALUE that may get assigned with the constant. */
2451 static inline void
2452 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2453 aarch64_opnd_info *operand,
2454 int addr_off_p,
2455 int need_libopcodes_p,
2456 int skip_p)
2457 {
2458 if (reloc->exp.X_op == O_constant)
2459 {
2460 if (addr_off_p)
2461 operand->addr.offset.imm = reloc->exp.X_add_number;
2462 else
2463 operand->imm.value = reloc->exp.X_add_number;
2464 reloc->type = BFD_RELOC_UNUSED;
2465 }
2466 else
2467 {
2468 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2469 /* Tell libopcodes to ignore this operand or not. This is helpful
2470 when one of the operands needs to be fixed up later but we need
2471 libopcodes to check the other operands. */
2472 operand->skip = skip_p;
2473 }
2474 }
2475
2476 /* Relocation modifiers. Each entry in the table contains the textual
2477 name for the relocation which may be placed before a symbol used as
2478 a load/store offset, or add immediate. It must be surrounded by a
2479 leading and trailing colon, for example:
2480
2481 ldr x0, [x1, #:rello:varsym]
2482 add x0, x1, #:rello:varsym */
2483
2484 struct reloc_table_entry
2485 {
2486 const char *name;
2487 int pc_rel;
2488 bfd_reloc_code_real_type adr_type;
2489 bfd_reloc_code_real_type adrp_type;
2490 bfd_reloc_code_real_type movw_type;
2491 bfd_reloc_code_real_type add_type;
2492 bfd_reloc_code_real_type ldst_type;
2493 bfd_reloc_code_real_type ld_literal_type;
2494 };
2495
2496 static struct reloc_table_entry reloc_table[] =
2497 {
2498 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2499 {"lo12", 0,
2500 0, /* adr_type */
2501 0,
2502 0,
2503 BFD_RELOC_AARCH64_ADD_LO12,
2504 BFD_RELOC_AARCH64_LDST_LO12,
2505 0},
2506
2507 /* Higher 21 bits of pc-relative page offset: ADRP */
2508 {"pg_hi21", 1,
2509 0, /* adr_type */
2510 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2511 0,
2512 0,
2513 0,
2514 0},
2515
2516 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2517 {"pg_hi21_nc", 1,
2518 0, /* adr_type */
2519 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2520 0,
2521 0,
2522 0,
2523 0},
2524
2525 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2526 {"abs_g0", 0,
2527 0, /* adr_type */
2528 0,
2529 BFD_RELOC_AARCH64_MOVW_G0,
2530 0,
2531 0,
2532 0},
2533
2534 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2535 {"abs_g0_s", 0,
2536 0, /* adr_type */
2537 0,
2538 BFD_RELOC_AARCH64_MOVW_G0_S,
2539 0,
2540 0,
2541 0},
2542
2543 /* Less significant bits 0-15 of address/value: MOVK, no check */
2544 {"abs_g0_nc", 0,
2545 0, /* adr_type */
2546 0,
2547 BFD_RELOC_AARCH64_MOVW_G0_NC,
2548 0,
2549 0,
2550 0},
2551
2552 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2553 {"abs_g1", 0,
2554 0, /* adr_type */
2555 0,
2556 BFD_RELOC_AARCH64_MOVW_G1,
2557 0,
2558 0,
2559 0},
2560
2561 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2562 {"abs_g1_s", 0,
2563 0, /* adr_type */
2564 0,
2565 BFD_RELOC_AARCH64_MOVW_G1_S,
2566 0,
2567 0,
2568 0},
2569
2570 /* Less significant bits 16-31 of address/value: MOVK, no check */
2571 {"abs_g1_nc", 0,
2572 0, /* adr_type */
2573 0,
2574 BFD_RELOC_AARCH64_MOVW_G1_NC,
2575 0,
2576 0,
2577 0},
2578
2579 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2580 {"abs_g2", 0,
2581 0, /* adr_type */
2582 0,
2583 BFD_RELOC_AARCH64_MOVW_G2,
2584 0,
2585 0,
2586 0},
2587
2588 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2589 {"abs_g2_s", 0,
2590 0, /* adr_type */
2591 0,
2592 BFD_RELOC_AARCH64_MOVW_G2_S,
2593 0,
2594 0,
2595 0},
2596
2597 /* Less significant bits 32-47 of address/value: MOVK, no check */
2598 {"abs_g2_nc", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_MOVW_G2_NC,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2607 {"abs_g3", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_MOVW_G3,
2611 0,
2612 0,
2613 0},
2614
2615 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2616 {"prel_g0", 1,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2620 0,
2621 0,
2622 0},
2623
2624 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2625 {"prel_g0_nc", 1,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2629 0,
2630 0,
2631 0},
2632
2633 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2634 {"prel_g1", 1,
2635 0, /* adr_type */
2636 0,
2637 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2638 0,
2639 0,
2640 0},
2641
2642 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2643 {"prel_g1_nc", 1,
2644 0, /* adr_type */
2645 0,
2646 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2647 0,
2648 0,
2649 0},
2650
2651 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2652 {"prel_g2", 1,
2653 0, /* adr_type */
2654 0,
2655 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2656 0,
2657 0,
2658 0},
2659
2660 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2661 {"prel_g2_nc", 1,
2662 0, /* adr_type */
2663 0,
2664 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2665 0,
2666 0,
2667 0},
2668
2669 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2670 {"prel_g3", 1,
2671 0, /* adr_type */
2672 0,
2673 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2674 0,
2675 0,
2676 0},
2677
2678 /* Get to the page containing GOT entry for a symbol. */
2679 {"got", 1,
2680 0, /* adr_type */
2681 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2682 0,
2683 0,
2684 0,
2685 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2686
2687 /* 12 bit offset into the page containing GOT entry for that symbol. */
2688 {"got_lo12", 0,
2689 0, /* adr_type */
2690 0,
2691 0,
2692 0,
2693 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2694 0},
2695
2696 /* 0-15 bits of address/value: MOVk, no check. */
2697 {"gotoff_g0_nc", 0,
2698 0, /* adr_type */
2699 0,
2700 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2701 0,
2702 0,
2703 0},
2704
2705 /* Most significant bits 16-31 of address/value: MOVZ. */
2706 {"gotoff_g1", 0,
2707 0, /* adr_type */
2708 0,
2709 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2710 0,
2711 0,
2712 0},
2713
2714 /* 15 bit offset into the page containing GOT entry for that symbol. */
2715 {"gotoff_lo15", 0,
2716 0, /* adr_type */
2717 0,
2718 0,
2719 0,
2720 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2721 0},
2722
2723 /* Get to the page containing GOT TLS entry for a symbol */
2724 {"gottprel_g0_nc", 0,
2725 0, /* adr_type */
2726 0,
2727 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2728 0,
2729 0,
2730 0},
2731
2732 /* Get to the page containing GOT TLS entry for a symbol */
2733 {"gottprel_g1", 0,
2734 0, /* adr_type */
2735 0,
2736 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2737 0,
2738 0,
2739 0},
2740
2741 /* Get to the page containing GOT TLS entry for a symbol */
2742 {"tlsgd", 0,
2743 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2744 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2745 0,
2746 0,
2747 0,
2748 0},
2749
2750 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2751 {"tlsgd_lo12", 0,
2752 0, /* adr_type */
2753 0,
2754 0,
2755 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2756 0,
2757 0},
2758
2759 /* Lower 16 bits address/value: MOVk. */
2760 {"tlsgd_g0_nc", 0,
2761 0, /* adr_type */
2762 0,
2763 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2764 0,
2765 0,
2766 0},
2767
2768 /* Most significant bits 16-31 of address/value: MOVZ. */
2769 {"tlsgd_g1", 0,
2770 0, /* adr_type */
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2773 0,
2774 0,
2775 0},
2776
2777 /* Get to the page containing GOT TLS entry for a symbol */
2778 {"tlsdesc", 0,
2779 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2780 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2781 0,
2782 0,
2783 0,
2784 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2785
2786 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2787 {"tlsdesc_lo12", 0,
2788 0, /* adr_type */
2789 0,
2790 0,
2791 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2792 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2793 0},
2794
2795 /* Get to the page containing GOT TLS entry for a symbol.
2796 The same as GD, we allocate two consecutive GOT slots
2797 for module index and module offset, the only difference
2798 with GD is the module offset should be initialized to
2799 zero without any outstanding runtime relocation. */
2800 {"tlsldm", 0,
2801 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2802 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2803 0,
2804 0,
2805 0,
2806 0},
2807
2808 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2809 {"tlsldm_lo12_nc", 0,
2810 0, /* adr_type */
2811 0,
2812 0,
2813 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2814 0,
2815 0},
2816
2817 /* 12 bit offset into the module TLS base address. */
2818 {"dtprel_lo12", 0,
2819 0, /* adr_type */
2820 0,
2821 0,
2822 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2823 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2824 0},
2825
2826 /* Same as dtprel_lo12, no overflow check. */
2827 {"dtprel_lo12_nc", 0,
2828 0, /* adr_type */
2829 0,
2830 0,
2831 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2832 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2833 0},
2834
2835 /* bits[23:12] of offset to the module TLS base address. */
2836 {"dtprel_hi12", 0,
2837 0, /* adr_type */
2838 0,
2839 0,
2840 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2841 0,
2842 0},
2843
2844 /* bits[15:0] of offset to the module TLS base address. */
2845 {"dtprel_g0", 0,
2846 0, /* adr_type */
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2849 0,
2850 0,
2851 0},
2852
2853 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2854 {"dtprel_g0_nc", 0,
2855 0, /* adr_type */
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2858 0,
2859 0,
2860 0},
2861
2862 /* bits[31:16] of offset to the module TLS base address. */
2863 {"dtprel_g1", 0,
2864 0, /* adr_type */
2865 0,
2866 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2867 0,
2868 0,
2869 0},
2870
2871 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2872 {"dtprel_g1_nc", 0,
2873 0, /* adr_type */
2874 0,
2875 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2876 0,
2877 0,
2878 0},
2879
2880 /* bits[47:32] of offset to the module TLS base address. */
2881 {"dtprel_g2", 0,
2882 0, /* adr_type */
2883 0,
2884 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2885 0,
2886 0,
2887 0},
2888
2889 /* Lower 16 bit offset into GOT entry for a symbol */
2890 {"tlsdesc_off_g0_nc", 0,
2891 0, /* adr_type */
2892 0,
2893 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2894 0,
2895 0,
2896 0},
2897
2898 /* Higher 16 bit offset into GOT entry for a symbol */
2899 {"tlsdesc_off_g1", 0,
2900 0, /* adr_type */
2901 0,
2902 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2903 0,
2904 0,
2905 0},
2906
2907 /* Get to the page containing GOT TLS entry for a symbol */
2908 {"gottprel", 0,
2909 0, /* adr_type */
2910 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2911 0,
2912 0,
2913 0,
2914 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2915
2916 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2917 {"gottprel_lo12", 0,
2918 0, /* adr_type */
2919 0,
2920 0,
2921 0,
2922 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2923 0},
2924
2925 /* Get tp offset for a symbol. */
2926 {"tprel", 0,
2927 0, /* adr_type */
2928 0,
2929 0,
2930 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2931 0,
2932 0},
2933
2934 /* Get tp offset for a symbol. */
2935 {"tprel_lo12", 0,
2936 0, /* adr_type */
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2940 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2941 0},
2942
2943 /* Get tp offset for a symbol. */
2944 {"tprel_hi12", 0,
2945 0, /* adr_type */
2946 0,
2947 0,
2948 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2949 0,
2950 0},
2951
2952 /* Get tp offset for a symbol. */
2953 {"tprel_lo12_nc", 0,
2954 0, /* adr_type */
2955 0,
2956 0,
2957 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2958 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2959 0},
2960
2961 /* Most significant bits 32-47 of address/value: MOVZ. */
2962 {"tprel_g2", 0,
2963 0, /* adr_type */
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2966 0,
2967 0,
2968 0},
2969
2970 /* Most significant bits 16-31 of address/value: MOVZ. */
2971 {"tprel_g1", 0,
2972 0, /* adr_type */
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2975 0,
2976 0,
2977 0},
2978
2979 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2980 {"tprel_g1_nc", 0,
2981 0, /* adr_type */
2982 0,
2983 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2984 0,
2985 0,
2986 0},
2987
2988 /* Most significant bits 0-15 of address/value: MOVZ. */
2989 {"tprel_g0", 0,
2990 0, /* adr_type */
2991 0,
2992 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2993 0,
2994 0,
2995 0},
2996
2997 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2998 {"tprel_g0_nc", 0,
2999 0, /* adr_type */
3000 0,
3001 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3002 0,
3003 0,
3004 0},
3005
3006 /* 15bit offset from got entry to base address of GOT table. */
3007 {"gotpage_lo15", 0,
3008 0,
3009 0,
3010 0,
3011 0,
3012 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3013 0},
3014
3015 /* 14bit offset from got entry to base address of GOT table. */
3016 {"gotpage_lo14", 0,
3017 0,
3018 0,
3019 0,
3020 0,
3021 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3022 0},
3023 };
3024
3025 /* Given the address of a pointer pointing to the textual name of a
3026 relocation as may appear in assembler source, attempt to find its
3027 details in reloc_table. The pointer will be updated to the character
3028 after the trailing colon. On failure, NULL will be returned;
3029 otherwise return the reloc_table_entry. */
3030
3031 static struct reloc_table_entry *
3032 find_reloc_table_entry (char **str)
3033 {
3034 unsigned int i;
3035 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3036 {
3037 int length = strlen (reloc_table[i].name);
3038
3039 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3040 && (*str)[length] == ':')
3041 {
3042 *str += (length + 1);
3043 return &reloc_table[i];
3044 }
3045 }
3046
3047 return NULL;
3048 }
3049
3050 /* Returns 0 if the relocation should never be forced,
3051 1 if the relocation must be forced, and -1 if either
3052 result is OK. */
3053
3054 static signed int
3055 aarch64_force_reloc (unsigned int type)
3056 {
3057 switch (type)
3058 {
3059 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3060 /* Perform these "immediate" internal relocations
3061 even if the symbol is extern or weak. */
3062 return 0;
3063
3064 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3065 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3066 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3067 /* Pseudo relocs that need to be fixed up according to
3068 ilp32_p. */
3069 return 1;
3070
3071 case BFD_RELOC_AARCH64_ADD_LO12:
3072 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3073 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3074 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3075 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3076 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3077 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3078 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3079 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3080 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3081 case BFD_RELOC_AARCH64_LDST128_LO12:
3082 case BFD_RELOC_AARCH64_LDST16_LO12:
3083 case BFD_RELOC_AARCH64_LDST32_LO12:
3084 case BFD_RELOC_AARCH64_LDST64_LO12:
3085 case BFD_RELOC_AARCH64_LDST8_LO12:
3086 case BFD_RELOC_AARCH64_LDST_LO12:
3087 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3088 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3089 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3090 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3091 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3092 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3093 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3094 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3095 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3096 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3097 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3098 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3099 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3100 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3101 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3102 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3103 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3104 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3105 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3106 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3107 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3108 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3109 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3110 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3111 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3112 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3113 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3114 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3115 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3117 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3119 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3120 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3121 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3122 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3127 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3128 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3129 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3130 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3131 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3132 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3133 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3134 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3135 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3136 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3137 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3138 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3139 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3140 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3141 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3142 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3143 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3144 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3145 /* Always leave these relocations for the linker. */
3146 return 1;
3147
3148 default:
3149 return -1;
3150 }
3151 }
3152
3153 int
3154 aarch64_force_relocation (struct fix *fixp)
3155 {
3156 int res = aarch64_force_reloc (fixp->fx_r_type);
3157
3158 if (res == -1)
3159 return generic_force_reloc (fixp);
3160 return res;
3161 }
3162
3163 /* Mode argument to parse_shift and parser_shifter_operand. */
3164 enum parse_shift_mode
3165 {
3166 SHIFTED_NONE, /* no shifter allowed */
3167 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3168 "#imm{,lsl #n}" */
3169 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3170 "#imm" */
3171 SHIFTED_LSL, /* bare "lsl #n" */
3172 SHIFTED_MUL, /* bare "mul #n" */
3173 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3174 SHIFTED_MUL_VL, /* "mul vl" */
3175 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3176 };
3177
3178 /* Parse a <shift> operator on an AArch64 data processing instruction.
3179 Return TRUE on success; otherwise return FALSE. */
3180 static bool
3181 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3182 {
3183 const struct aarch64_name_value_pair *shift_op;
3184 enum aarch64_modifier_kind kind;
3185 expressionS exp;
3186 int exp_has_prefix;
3187 char *s = *str;
3188 char *p = s;
3189
3190 for (p = *str; ISALPHA (*p); p++)
3191 ;
3192
3193 if (p == *str)
3194 {
3195 set_syntax_error (_("shift expression expected"));
3196 return false;
3197 }
3198
3199 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3200
3201 if (shift_op == NULL)
3202 {
3203 set_syntax_error (_("shift operator expected"));
3204 return false;
3205 }
3206
3207 kind = aarch64_get_operand_modifier (shift_op);
3208
3209 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3210 {
3211 set_syntax_error (_("invalid use of 'MSL'"));
3212 return false;
3213 }
3214
3215 if (kind == AARCH64_MOD_MUL
3216 && mode != SHIFTED_MUL
3217 && mode != SHIFTED_MUL_VL)
3218 {
3219 set_syntax_error (_("invalid use of 'MUL'"));
3220 return false;
3221 }
3222
3223 switch (mode)
3224 {
3225 case SHIFTED_LOGIC_IMM:
3226 if (aarch64_extend_operator_p (kind))
3227 {
3228 set_syntax_error (_("extending shift is not permitted"));
3229 return false;
3230 }
3231 break;
3232
3233 case SHIFTED_ARITH_IMM:
3234 if (kind == AARCH64_MOD_ROR)
3235 {
3236 set_syntax_error (_("'ROR' shift is not permitted"));
3237 return false;
3238 }
3239 break;
3240
3241 case SHIFTED_LSL:
3242 if (kind != AARCH64_MOD_LSL)
3243 {
3244 set_syntax_error (_("only 'LSL' shift is permitted"));
3245 return false;
3246 }
3247 break;
3248
3249 case SHIFTED_MUL:
3250 if (kind != AARCH64_MOD_MUL)
3251 {
3252 set_syntax_error (_("only 'MUL' is permitted"));
3253 return false;
3254 }
3255 break;
3256
3257 case SHIFTED_MUL_VL:
3258 /* "MUL VL" consists of two separate tokens. Require the first
3259 token to be "MUL" and look for a following "VL". */
3260 if (kind == AARCH64_MOD_MUL)
3261 {
3262 skip_whitespace (p);
3263 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3264 {
3265 p += 2;
3266 kind = AARCH64_MOD_MUL_VL;
3267 break;
3268 }
3269 }
3270 set_syntax_error (_("only 'MUL VL' is permitted"));
3271 return false;
3272
3273 case SHIFTED_REG_OFFSET:
3274 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3275 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3276 {
3277 set_fatal_syntax_error
3278 (_("invalid shift for the register offset addressing mode"));
3279 return false;
3280 }
3281 break;
3282
3283 case SHIFTED_LSL_MSL:
3284 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3285 {
3286 set_syntax_error (_("invalid shift operator"));
3287 return false;
3288 }
3289 break;
3290
3291 default:
3292 abort ();
3293 }
3294
3295 /* Whitespace can appear here if the next thing is a bare digit. */
3296 skip_whitespace (p);
3297
3298 /* Parse shift amount. */
3299 exp_has_prefix = 0;
3300 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3301 exp.X_op = O_absent;
3302 else
3303 {
3304 if (is_immediate_prefix (*p))
3305 {
3306 p++;
3307 exp_has_prefix = 1;
3308 }
3309 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3310 }
3311 if (kind == AARCH64_MOD_MUL_VL)
3312 /* For consistency, give MUL VL the same shift amount as an implicit
3313 MUL #1. */
3314 operand->shifter.amount = 1;
3315 else if (exp.X_op == O_absent)
3316 {
3317 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3318 {
3319 set_syntax_error (_("missing shift amount"));
3320 return false;
3321 }
3322 operand->shifter.amount = 0;
3323 }
3324 else if (exp.X_op != O_constant)
3325 {
3326 set_syntax_error (_("constant shift amount required"));
3327 return false;
3328 }
3329 /* For parsing purposes, MUL #n has no inherent range. The range
3330 depends on the operand and will be checked by operand-specific
3331 routines. */
3332 else if (kind != AARCH64_MOD_MUL
3333 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3334 {
3335 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3336 return false;
3337 }
3338 else
3339 {
3340 operand->shifter.amount = exp.X_add_number;
3341 operand->shifter.amount_present = 1;
3342 }
3343
3344 operand->shifter.operator_present = 1;
3345 operand->shifter.kind = kind;
3346
3347 *str = p;
3348 return true;
3349 }
3350
3351 /* Parse a <shifter_operand> for a data processing instruction:
3352
3353 #<immediate>
3354 #<immediate>, LSL #imm
3355
3356 Validation of immediate operands is deferred to md_apply_fix.
3357
3358 Return TRUE on success; otherwise return FALSE. */
3359
3360 static bool
3361 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3362 enum parse_shift_mode mode)
3363 {
3364 char *p;
3365
3366 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3367 return false;
3368
3369 p = *str;
3370
3371 /* Accept an immediate expression. */
3372 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3373 REJECT_ABSENT))
3374 return false;
3375
3376 /* Accept optional LSL for arithmetic immediate values. */
3377 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3378 if (! parse_shift (&p, operand, SHIFTED_LSL))
3379 return false;
3380
3381 /* Not accept any shifter for logical immediate values. */
3382 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3383 && parse_shift (&p, operand, mode))
3384 {
3385 set_syntax_error (_("unexpected shift operator"));
3386 return false;
3387 }
3388
3389 *str = p;
3390 return true;
3391 }
3392
3393 /* Parse a <shifter_operand> for a data processing instruction:
3394
3395 <Rm>
3396 <Rm>, <shift>
3397 #<immediate>
3398 #<immediate>, LSL #imm
3399
3400 where <shift> is handled by parse_shift above, and the last two
3401 cases are handled by the function above.
3402
3403 Validation of immediate operands is deferred to md_apply_fix.
3404
3405 Return TRUE on success; otherwise return FALSE. */
3406
3407 static bool
3408 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3409 enum parse_shift_mode mode)
3410 {
3411 const reg_entry *reg;
3412 aarch64_opnd_qualifier_t qualifier;
3413 enum aarch64_operand_class opd_class
3414 = aarch64_get_operand_class (operand->type);
3415
3416 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3417 if (reg)
3418 {
3419 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3420 {
3421 set_syntax_error (_("unexpected register in the immediate operand"));
3422 return false;
3423 }
3424
3425 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3426 {
3427 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3428 return false;
3429 }
3430
3431 operand->reg.regno = reg->number;
3432 operand->qualifier = qualifier;
3433
3434 /* Accept optional shift operation on register. */
3435 if (! skip_past_comma (str))
3436 return true;
3437
3438 if (! parse_shift (str, operand, mode))
3439 return false;
3440
3441 return true;
3442 }
3443 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3444 {
3445 set_syntax_error
3446 (_("integer register expected in the extended/shifted operand "
3447 "register"));
3448 return false;
3449 }
3450
3451 /* We have a shifted immediate variable. */
3452 return parse_shifter_operand_imm (str, operand, mode);
3453 }
3454
3455 /* Return TRUE on success; return FALSE otherwise. */
3456
3457 static bool
3458 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3459 enum parse_shift_mode mode)
3460 {
3461 char *p = *str;
3462
3463 /* Determine if we have the sequence of characters #: or just :
3464 coming next. If we do, then we check for a :rello: relocation
3465 modifier. If we don't, punt the whole lot to
3466 parse_shifter_operand. */
3467
3468 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3469 {
3470 struct reloc_table_entry *entry;
3471
3472 if (p[0] == '#')
3473 p += 2;
3474 else
3475 p++;
3476 *str = p;
3477
3478 /* Try to parse a relocation. Anything else is an error. */
3479 if (!(entry = find_reloc_table_entry (str)))
3480 {
3481 set_syntax_error (_("unknown relocation modifier"));
3482 return false;
3483 }
3484
3485 if (entry->add_type == 0)
3486 {
3487 set_syntax_error
3488 (_("this relocation modifier is not allowed on this instruction"));
3489 return false;
3490 }
3491
3492 /* Save str before we decompose it. */
3493 p = *str;
3494
3495 /* Next, we parse the expression. */
3496 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3497 REJECT_ABSENT))
3498 return false;
3499
3500 /* Record the relocation type (use the ADD variant here). */
3501 inst.reloc.type = entry->add_type;
3502 inst.reloc.pc_rel = entry->pc_rel;
3503
3504 /* If str is empty, we've reached the end, stop here. */
3505 if (**str == '\0')
3506 return true;
3507
3508 /* Otherwise, we have a shifted reloc modifier, so rewind to
3509 recover the variable name and continue parsing for the shifter. */
3510 *str = p;
3511 return parse_shifter_operand_imm (str, operand, mode);
3512 }
3513
3514 return parse_shifter_operand (str, operand, mode);
3515 }
3516
3517 /* Parse all forms of an address expression. Information is written
3518 to *OPERAND and/or inst.reloc.
3519
3520 The A64 instruction set has the following addressing modes:
3521
3522 Offset
3523 [base] // in SIMD ld/st structure
3524 [base{,#0}] // in ld/st exclusive
3525 [base{,#imm}]
3526 [base,Xm{,LSL #imm}]
3527 [base,Xm,SXTX {#imm}]
3528 [base,Wm,(S|U)XTW {#imm}]
3529 Pre-indexed
3530 [base]! // in ldraa/ldrab exclusive
3531 [base,#imm]!
3532 Post-indexed
3533 [base],#imm
3534 [base],Xm // in SIMD ld/st structure
3535 PC-relative (literal)
3536 label
3537 SVE:
3538 [base,#imm,MUL VL]
3539 [base,Zm.D{,LSL #imm}]
3540 [base,Zm.S,(S|U)XTW {#imm}]
3541 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3542 [Zn.S,#imm]
3543 [Zn.D,#imm]
3544 [Zn.S{, Xm}]
3545 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3546 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3547 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3548
3549 (As a convenience, the notation "=immediate" is permitted in conjunction
3550 with the pc-relative literal load instructions to automatically place an
3551 immediate value or symbolic address in a nearby literal pool and generate
3552 a hidden label which references it.)
3553
3554 Upon a successful parsing, the address structure in *OPERAND will be
3555 filled in the following way:
3556
3557 .base_regno = <base>
3558 .offset.is_reg // 1 if the offset is a register
3559 .offset.imm = <imm>
3560 .offset.regno = <Rm>
3561
3562 For different addressing modes defined in the A64 ISA:
3563
3564 Offset
3565 .pcrel=0; .preind=1; .postind=0; .writeback=0
3566 Pre-indexed
3567 .pcrel=0; .preind=1; .postind=0; .writeback=1
3568 Post-indexed
3569 .pcrel=0; .preind=0; .postind=1; .writeback=1
3570 PC-relative (literal)
3571 .pcrel=1; .preind=1; .postind=0; .writeback=0
3572
3573 The shift/extension information, if any, will be stored in .shifter.
3574 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3575 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3576 corresponding register.
3577
3578 BASE_TYPE says which types of base register should be accepted and
3579 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3580 is the type of shifter that is allowed for immediate offsets,
3581 or SHIFTED_NONE if none.
3582
3583 In all other respects, it is the caller's responsibility to check
3584 for addressing modes not supported by the instruction, and to set
3585 inst.reloc.type. */
3586
3587 static bool
3588 parse_address_main (char **str, aarch64_opnd_info *operand,
3589 aarch64_opnd_qualifier_t *base_qualifier,
3590 aarch64_opnd_qualifier_t *offset_qualifier,
3591 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3592 enum parse_shift_mode imm_shift_mode)
3593 {
3594 char *p = *str;
3595 const reg_entry *reg;
3596 expressionS *exp = &inst.reloc.exp;
3597
3598 *base_qualifier = AARCH64_OPND_QLF_NIL;
3599 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3600 if (! skip_past_char (&p, '['))
3601 {
3602 /* =immediate or label. */
3603 operand->addr.pcrel = 1;
3604 operand->addr.preind = 1;
3605
3606 /* #:<reloc_op>:<symbol> */
3607 skip_past_char (&p, '#');
3608 if (skip_past_char (&p, ':'))
3609 {
3610 bfd_reloc_code_real_type ty;
3611 struct reloc_table_entry *entry;
3612
3613 /* Try to parse a relocation modifier. Anything else is
3614 an error. */
3615 entry = find_reloc_table_entry (&p);
3616 if (! entry)
3617 {
3618 set_syntax_error (_("unknown relocation modifier"));
3619 return false;
3620 }
3621
3622 switch (operand->type)
3623 {
3624 case AARCH64_OPND_ADDR_PCREL21:
3625 /* adr */
3626 ty = entry->adr_type;
3627 break;
3628
3629 default:
3630 ty = entry->ld_literal_type;
3631 break;
3632 }
3633
3634 if (ty == 0)
3635 {
3636 set_syntax_error
3637 (_("this relocation modifier is not allowed on this "
3638 "instruction"));
3639 return false;
3640 }
3641
3642 /* #:<reloc_op>: */
3643 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3644 {
3645 set_syntax_error (_("invalid relocation expression"));
3646 return false;
3647 }
3648 /* #:<reloc_op>:<expr> */
3649 /* Record the relocation type. */
3650 inst.reloc.type = ty;
3651 inst.reloc.pc_rel = entry->pc_rel;
3652 }
3653 else
3654 {
3655 if (skip_past_char (&p, '='))
3656 /* =immediate; need to generate the literal in the literal pool. */
3657 inst.gen_lit_pool = 1;
3658
3659 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3660 {
3661 set_syntax_error (_("invalid address"));
3662 return false;
3663 }
3664 }
3665
3666 *str = p;
3667 return true;
3668 }
3669
3670 /* [ */
3671
3672 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3673 if (!reg || !aarch64_check_reg_type (reg, base_type))
3674 {
3675 set_syntax_error (_(get_reg_expected_msg (base_type)));
3676 return false;
3677 }
3678 operand->addr.base_regno = reg->number;
3679
3680 /* [Xn */
3681 if (skip_past_comma (&p))
3682 {
3683 /* [Xn, */
3684 operand->addr.preind = 1;
3685
3686 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3687 if (reg)
3688 {
3689 if (!aarch64_check_reg_type (reg, offset_type))
3690 {
3691 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3692 return false;
3693 }
3694
3695 /* [Xn,Rm */
3696 operand->addr.offset.regno = reg->number;
3697 operand->addr.offset.is_reg = 1;
3698 /* Shifted index. */
3699 if (skip_past_comma (&p))
3700 {
3701 /* [Xn,Rm, */
3702 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3703 /* Use the diagnostics set in parse_shift, so not set new
3704 error message here. */
3705 return false;
3706 }
3707 /* We only accept:
3708 [base,Xm] # For vector plus scalar SVE2 indexing.
3709 [base,Xm{,LSL #imm}]
3710 [base,Xm,SXTX {#imm}]
3711 [base,Wm,(S|U)XTW {#imm}] */
3712 if (operand->shifter.kind == AARCH64_MOD_NONE
3713 || operand->shifter.kind == AARCH64_MOD_LSL
3714 || operand->shifter.kind == AARCH64_MOD_SXTX)
3715 {
3716 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3717 {
3718 set_syntax_error (_("invalid use of 32-bit register offset"));
3719 return false;
3720 }
3721 if (aarch64_get_qualifier_esize (*base_qualifier)
3722 != aarch64_get_qualifier_esize (*offset_qualifier)
3723 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3724 || *base_qualifier != AARCH64_OPND_QLF_S_S
3725 || *offset_qualifier != AARCH64_OPND_QLF_X))
3726 {
3727 set_syntax_error (_("offset has different size from base"));
3728 return false;
3729 }
3730 }
3731 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3732 {
3733 set_syntax_error (_("invalid use of 64-bit register offset"));
3734 return false;
3735 }
3736 }
3737 else
3738 {
3739 /* [Xn,#:<reloc_op>:<symbol> */
3740 skip_past_char (&p, '#');
3741 if (skip_past_char (&p, ':'))
3742 {
3743 struct reloc_table_entry *entry;
3744
3745 /* Try to parse a relocation modifier. Anything else is
3746 an error. */
3747 if (!(entry = find_reloc_table_entry (&p)))
3748 {
3749 set_syntax_error (_("unknown relocation modifier"));
3750 return false;
3751 }
3752
3753 if (entry->ldst_type == 0)
3754 {
3755 set_syntax_error
3756 (_("this relocation modifier is not allowed on this "
3757 "instruction"));
3758 return false;
3759 }
3760
3761 /* [Xn,#:<reloc_op>: */
3762 /* We now have the group relocation table entry corresponding to
3763 the name in the assembler source. Next, we parse the
3764 expression. */
3765 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3766 {
3767 set_syntax_error (_("invalid relocation expression"));
3768 return false;
3769 }
3770
3771 /* [Xn,#:<reloc_op>:<expr> */
3772 /* Record the load/store relocation type. */
3773 inst.reloc.type = entry->ldst_type;
3774 inst.reloc.pc_rel = entry->pc_rel;
3775 }
3776 else
3777 {
3778 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3779 {
3780 set_syntax_error (_("invalid expression in the address"));
3781 return false;
3782 }
3783 /* [Xn,<expr> */
3784 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3785 /* [Xn,<expr>,<shifter> */
3786 if (! parse_shift (&p, operand, imm_shift_mode))
3787 return false;
3788 }
3789 }
3790 }
3791
3792 if (! skip_past_char (&p, ']'))
3793 {
3794 set_syntax_error (_("']' expected"));
3795 return false;
3796 }
3797
3798 if (skip_past_char (&p, '!'))
3799 {
3800 if (operand->addr.preind && operand->addr.offset.is_reg)
3801 {
3802 set_syntax_error (_("register offset not allowed in pre-indexed "
3803 "addressing mode"));
3804 return false;
3805 }
3806 /* [Xn]! */
3807 operand->addr.writeback = 1;
3808 }
3809 else if (skip_past_comma (&p))
3810 {
3811 /* [Xn], */
3812 operand->addr.postind = 1;
3813 operand->addr.writeback = 1;
3814
3815 if (operand->addr.preind)
3816 {
3817 set_syntax_error (_("cannot combine pre- and post-indexing"));
3818 return false;
3819 }
3820
3821 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3822 if (reg)
3823 {
3824 /* [Xn],Xm */
3825 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3826 {
3827 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3828 return false;
3829 }
3830
3831 operand->addr.offset.regno = reg->number;
3832 operand->addr.offset.is_reg = 1;
3833 }
3834 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3835 {
3836 /* [Xn],#expr */
3837 set_syntax_error (_("invalid expression in the address"));
3838 return false;
3839 }
3840 }
3841
3842 /* If at this point neither .preind nor .postind is set, we have a
3843 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3844 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3845 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3846 [Zn.<T>, xzr]. */
3847 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3848 {
3849 if (operand->addr.writeback)
3850 {
3851 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3852 {
3853 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3854 operand->addr.offset.is_reg = 0;
3855 operand->addr.offset.imm = 0;
3856 operand->addr.preind = 1;
3857 }
3858 else
3859 {
3860 /* Reject [Rn]! */
3861 set_syntax_error (_("missing offset in the pre-indexed address"));
3862 return false;
3863 }
3864 }
3865 else
3866 {
3867 operand->addr.preind = 1;
3868 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3869 {
3870 operand->addr.offset.is_reg = 1;
3871 operand->addr.offset.regno = REG_ZR;
3872 *offset_qualifier = AARCH64_OPND_QLF_X;
3873 }
3874 else
3875 {
3876 inst.reloc.exp.X_op = O_constant;
3877 inst.reloc.exp.X_add_number = 0;
3878 }
3879 }
3880 }
3881
3882 *str = p;
3883 return true;
3884 }
3885
3886 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3887 on success. */
3888 static bool
3889 parse_address (char **str, aarch64_opnd_info *operand)
3890 {
3891 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3892 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3893 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3894 }
3895
3896 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3897 The arguments have the same meaning as for parse_address_main.
3898 Return TRUE on success. */
3899 static bool
3900 parse_sve_address (char **str, aarch64_opnd_info *operand,
3901 aarch64_opnd_qualifier_t *base_qualifier,
3902 aarch64_opnd_qualifier_t *offset_qualifier)
3903 {
3904 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3905 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3906 SHIFTED_MUL_VL);
3907 }
3908
3909 /* Parse a register X0-X30. The register must be 64-bit and register 31
3910 is unallocated. */
3911 static bool
3912 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3913 {
3914 const reg_entry *reg = parse_reg (str);
3915 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3916 {
3917 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3918 return false;
3919 }
3920 operand->reg.regno = reg->number;
3921 operand->qualifier = AARCH64_OPND_QLF_X;
3922 return true;
3923 }
3924
3925 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3926 Return TRUE on success; otherwise return FALSE. */
3927 static bool
3928 parse_half (char **str, int *internal_fixup_p)
3929 {
3930 char *p = *str;
3931
3932 skip_past_char (&p, '#');
3933
3934 gas_assert (internal_fixup_p);
3935 *internal_fixup_p = 0;
3936
3937 if (*p == ':')
3938 {
3939 struct reloc_table_entry *entry;
3940
3941 /* Try to parse a relocation. Anything else is an error. */
3942 ++p;
3943
3944 if (!(entry = find_reloc_table_entry (&p)))
3945 {
3946 set_syntax_error (_("unknown relocation modifier"));
3947 return false;
3948 }
3949
3950 if (entry->movw_type == 0)
3951 {
3952 set_syntax_error
3953 (_("this relocation modifier is not allowed on this instruction"));
3954 return false;
3955 }
3956
3957 inst.reloc.type = entry->movw_type;
3958 }
3959 else
3960 *internal_fixup_p = 1;
3961
3962 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3963 return false;
3964
3965 *str = p;
3966 return true;
3967 }
3968
3969 /* Parse an operand for an ADRP instruction:
3970 ADRP <Xd>, <label>
3971 Return TRUE on success; otherwise return FALSE. */
3972
3973 static bool
3974 parse_adrp (char **str)
3975 {
3976 char *p;
3977
3978 p = *str;
3979 if (*p == ':')
3980 {
3981 struct reloc_table_entry *entry;
3982
3983 /* Try to parse a relocation. Anything else is an error. */
3984 ++p;
3985 if (!(entry = find_reloc_table_entry (&p)))
3986 {
3987 set_syntax_error (_("unknown relocation modifier"));
3988 return false;
3989 }
3990
3991 if (entry->adrp_type == 0)
3992 {
3993 set_syntax_error
3994 (_("this relocation modifier is not allowed on this instruction"));
3995 return false;
3996 }
3997
3998 inst.reloc.type = entry->adrp_type;
3999 }
4000 else
4001 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4002
4003 inst.reloc.pc_rel = 1;
4004 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4005 return false;
4006 *str = p;
4007 return true;
4008 }
4009
4010 /* Miscellaneous. */
4011
4012 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4013 of SIZE tokens in which index I gives the token for field value I,
4014 or is null if field value I is invalid. REG_TYPE says which register
4015 names should be treated as registers rather than as symbolic immediates.
4016
4017 Return true on success, moving *STR past the operand and storing the
4018 field value in *VAL. */
4019
4020 static int
4021 parse_enum_string (char **str, int64_t *val, const char *const *array,
4022 size_t size, aarch64_reg_type reg_type)
4023 {
4024 expressionS exp;
4025 char *p, *q;
4026 size_t i;
4027
4028 /* Match C-like tokens. */
4029 p = q = *str;
4030 while (ISALNUM (*q))
4031 q++;
4032
4033 for (i = 0; i < size; ++i)
4034 if (array[i]
4035 && strncasecmp (array[i], p, q - p) == 0
4036 && array[i][q - p] == 0)
4037 {
4038 *val = i;
4039 *str = q;
4040 return true;
4041 }
4042
4043 if (!parse_immediate_expression (&p, &exp, reg_type))
4044 return false;
4045
4046 if (exp.X_op == O_constant
4047 && (uint64_t) exp.X_add_number < size)
4048 {
4049 *val = exp.X_add_number;
4050 *str = p;
4051 return true;
4052 }
4053
4054 /* Use the default error for this operand. */
4055 return false;
4056 }
4057
4058 /* Parse an option for a preload instruction. Returns the encoding for the
4059 option, or PARSE_FAIL. */
4060
4061 static int
4062 parse_pldop (char **str)
4063 {
4064 char *p, *q;
4065 const struct aarch64_name_value_pair *o;
4066
4067 p = q = *str;
4068 while (ISALNUM (*q))
4069 q++;
4070
4071 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4072 if (!o)
4073 return PARSE_FAIL;
4074
4075 *str = q;
4076 return o->value;
4077 }
4078
4079 /* Parse an option for a barrier instruction. Returns the encoding for the
4080 option, or PARSE_FAIL. */
4081
4082 static int
4083 parse_barrier (char **str)
4084 {
4085 char *p, *q;
4086 const struct aarch64_name_value_pair *o;
4087
4088 p = q = *str;
4089 while (ISALPHA (*q))
4090 q++;
4091
4092 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4093 if (!o)
4094 return PARSE_FAIL;
4095
4096 *str = q;
4097 return o->value;
4098 }
4099
4100 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4101 return 0 if successful. Otherwise return PARSE_FAIL. */
4102
4103 static int
4104 parse_barrier_psb (char **str,
4105 const struct aarch64_name_value_pair ** hint_opt)
4106 {
4107 char *p, *q;
4108 const struct aarch64_name_value_pair *o;
4109
4110 p = q = *str;
4111 while (ISALPHA (*q))
4112 q++;
4113
4114 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4115 if (!o)
4116 {
4117 set_fatal_syntax_error
4118 ( _("unknown or missing option to PSB/TSB"));
4119 return PARSE_FAIL;
4120 }
4121
4122 if (o->value != 0x11)
4123 {
4124 /* PSB only accepts option name 'CSYNC'. */
4125 set_syntax_error
4126 (_("the specified option is not accepted for PSB/TSB"));
4127 return PARSE_FAIL;
4128 }
4129
4130 *str = q;
4131 *hint_opt = o;
4132 return 0;
4133 }
4134
4135 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4136 return 0 if successful. Otherwise return PARSE_FAIL. */
4137
4138 static int
4139 parse_bti_operand (char **str,
4140 const struct aarch64_name_value_pair ** hint_opt)
4141 {
4142 char *p, *q;
4143 const struct aarch64_name_value_pair *o;
4144
4145 p = q = *str;
4146 while (ISALPHA (*q))
4147 q++;
4148
4149 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4150 if (!o)
4151 {
4152 set_fatal_syntax_error
4153 ( _("unknown option to BTI"));
4154 return PARSE_FAIL;
4155 }
4156
4157 switch (o->value)
4158 {
4159 /* Valid BTI operands. */
4160 case HINT_OPD_C:
4161 case HINT_OPD_J:
4162 case HINT_OPD_JC:
4163 break;
4164
4165 default:
4166 set_syntax_error
4167 (_("unknown option to BTI"));
4168 return PARSE_FAIL;
4169 }
4170
4171 *str = q;
4172 *hint_opt = o;
4173 return 0;
4174 }
4175
4176 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4177 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4178 on failure. Format:
4179
4180 REG_TYPE.QUALIFIER
4181
4182 Side effect: Update STR with current parse position of success.
4183 */
4184
4185 static const reg_entry *
4186 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4187 aarch64_opnd_qualifier_t *qualifier)
4188 {
4189 char *q;
4190
4191 reg_entry *reg = parse_reg (str);
4192 if (reg != NULL && reg->type == reg_type)
4193 {
4194 if (!skip_past_char (str, '.'))
4195 {
4196 set_syntax_error (_("missing ZA tile element size separator"));
4197 return NULL;
4198 }
4199
4200 q = *str;
4201 switch (TOLOWER (*q))
4202 {
4203 case 'b':
4204 *qualifier = AARCH64_OPND_QLF_S_B;
4205 break;
4206 case 'h':
4207 *qualifier = AARCH64_OPND_QLF_S_H;
4208 break;
4209 case 's':
4210 *qualifier = AARCH64_OPND_QLF_S_S;
4211 break;
4212 case 'd':
4213 *qualifier = AARCH64_OPND_QLF_S_D;
4214 break;
4215 case 'q':
4216 *qualifier = AARCH64_OPND_QLF_S_Q;
4217 break;
4218 default:
4219 return NULL;
4220 }
4221 q++;
4222
4223 *str = q;
4224 return reg;
4225 }
4226
4227 return NULL;
4228 }
4229
4230 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4231 Function return tile QUALIFIER on success.
4232
4233 Tiles are in example format: za[0-9]\.[bhsd]
4234
4235 Function returns <ZAda> register number or PARSE_FAIL.
4236 */
4237 static int
4238 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4239 {
4240 int regno;
4241 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4242
4243 if (reg == NULL)
4244 return PARSE_FAIL;
4245 regno = reg->number;
4246
4247 switch (*qualifier)
4248 {
4249 case AARCH64_OPND_QLF_S_B:
4250 if (regno != 0x00)
4251 {
4252 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4253 return PARSE_FAIL;
4254 }
4255 break;
4256 case AARCH64_OPND_QLF_S_H:
4257 if (regno > 0x01)
4258 {
4259 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4260 return PARSE_FAIL;
4261 }
4262 break;
4263 case AARCH64_OPND_QLF_S_S:
4264 if (regno > 0x03)
4265 {
4266 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4267 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4268 return PARSE_FAIL;
4269 }
4270 break;
4271 case AARCH64_OPND_QLF_S_D:
4272 if (regno > 0x07)
4273 {
4274 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4275 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4276 return PARSE_FAIL;
4277 }
4278 break;
4279 default:
4280 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4281 return PARSE_FAIL;
4282 }
4283
4284 return regno;
4285 }
4286
4287 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4288
4289 #<imm>
4290 <imm>
4291
4292 Function return TRUE if immediate was found, or FALSE.
4293 */
4294 static bool
4295 parse_sme_immediate (char **str, int64_t *imm)
4296 {
4297 int64_t val;
4298 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4299 return false;
4300
4301 *imm = val;
4302 return true;
4303 }
4304
4305 /* Parse index with vector select register and immediate:
4306
4307 [<Wv>, <imm>]
4308 [<Wv>, #<imm>]
4309 where <Wv> is in W12-W15 range and # is optional for immediate.
4310
4311 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4312 is set to true.
4313
4314 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4315 IMM output.
4316 */
4317 static bool
4318 parse_sme_za_hv_tiles_operand_index (char **str,
4319 int *vector_select_register,
4320 int64_t *imm)
4321 {
4322 const reg_entry *reg;
4323
4324 if (!skip_past_char (str, '['))
4325 {
4326 set_syntax_error (_("expected '['"));
4327 return false;
4328 }
4329
4330 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4331 reg = parse_reg (str);
4332 if (reg == NULL || reg->type != REG_TYPE_R_32
4333 || reg->number < 12 || reg->number > 15)
4334 {
4335 set_syntax_error (_("expected vector select register W12-W15"));
4336 return false;
4337 }
4338 *vector_select_register = reg->number;
4339
4340 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4341 {
4342 set_syntax_error (_("expected ','"));
4343 return false;
4344 }
4345
4346 if (!parse_sme_immediate (str, imm))
4347 {
4348 set_syntax_error (_("index offset immediate expected"));
4349 return false;
4350 }
4351
4352 if (!skip_past_char (str, ']'))
4353 {
4354 set_syntax_error (_("expected ']'"));
4355 return false;
4356 }
4357
4358 return true;
4359 }
4360
4361 /* Parse SME ZA horizontal or vertical vector access to tiles.
4362 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4363 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4364 contains <Wv> select register and corresponding optional IMMEDIATE.
4365 In addition QUALIFIER is extracted.
4366
4367 Field format examples:
4368
4369 ZA0<HV>.B[<Wv>, #<imm>]
4370 <ZAn><HV>.H[<Wv>, #<imm>]
4371 <ZAn><HV>.S[<Wv>, #<imm>]
4372 <ZAn><HV>.D[<Wv>, #<imm>]
4373 <ZAn><HV>.Q[<Wv>, #<imm>]
4374
4375 Function returns <ZAda> register number or PARSE_FAIL.
4376 */
4377 static int
4378 parse_sme_za_hv_tiles_operand (char **str,
4379 enum sme_hv_slice *slice_indicator,
4380 int *vector_select_register,
4381 int *imm,
4382 aarch64_opnd_qualifier_t *qualifier)
4383 {
4384 char *qh, *qv;
4385 int regno;
4386 int regno_limit;
4387 int64_t imm_limit;
4388 int64_t imm_value;
4389 const reg_entry *reg;
4390
4391 qh = qv = *str;
4392 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4393 {
4394 *slice_indicator = HV_horizontal;
4395 *str = qh;
4396 }
4397 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4398 {
4399 *slice_indicator = HV_vertical;
4400 *str = qv;
4401 }
4402 else
4403 return PARSE_FAIL;
4404 regno = reg->number;
4405
4406 switch (*qualifier)
4407 {
4408 case AARCH64_OPND_QLF_S_B:
4409 regno_limit = 0;
4410 imm_limit = 15;
4411 break;
4412 case AARCH64_OPND_QLF_S_H:
4413 regno_limit = 1;
4414 imm_limit = 7;
4415 break;
4416 case AARCH64_OPND_QLF_S_S:
4417 regno_limit = 3;
4418 imm_limit = 3;
4419 break;
4420 case AARCH64_OPND_QLF_S_D:
4421 regno_limit = 7;
4422 imm_limit = 1;
4423 break;
4424 case AARCH64_OPND_QLF_S_Q:
4425 regno_limit = 15;
4426 imm_limit = 0;
4427 break;
4428 default:
4429 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4430 return PARSE_FAIL;
4431 }
4432
4433 /* Check if destination register ZA tile vector is in range for given
4434 instruction variant. */
4435 if (regno < 0 || regno > regno_limit)
4436 {
4437 set_syntax_error (_("ZA tile vector out of range"));
4438 return PARSE_FAIL;
4439 }
4440
4441 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4442 &imm_value))
4443 return PARSE_FAIL;
4444
4445 /* Check if optional index offset is in the range for instruction
4446 variant. */
4447 if (imm_value < 0 || imm_value > imm_limit)
4448 {
4449 set_syntax_error (_("index offset out of range"));
4450 return PARSE_FAIL;
4451 }
4452
4453 *imm = imm_value;
4454
4455 return regno;
4456 }
4457
4458
4459 static int
4460 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4461 enum sme_hv_slice *slice_indicator,
4462 int *vector_select_register,
4463 int *imm,
4464 aarch64_opnd_qualifier_t *qualifier)
4465 {
4466 int regno;
4467
4468 if (!skip_past_char (str, '{'))
4469 {
4470 set_syntax_error (_("expected '{'"));
4471 return PARSE_FAIL;
4472 }
4473
4474 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4475 vector_select_register, imm,
4476 qualifier);
4477
4478 if (regno == PARSE_FAIL)
4479 return PARSE_FAIL;
4480
4481 if (!skip_past_char (str, '}'))
4482 {
4483 set_syntax_error (_("expected '}'"));
4484 return PARSE_FAIL;
4485 }
4486
4487 return regno;
4488 }
4489
4490 /* Parse list of up to eight 64-bit element tile names separated by commas in
4491 SME's ZERO instruction:
4492
4493 ZERO { <mask> }
4494
4495 Function returns <mask>:
4496
4497 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4498 */
4499 static int
4500 parse_sme_zero_mask(char **str)
4501 {
4502 char *q;
4503 int mask;
4504 aarch64_opnd_qualifier_t qualifier;
4505
4506 mask = 0x00;
4507 q = *str;
4508 do
4509 {
4510 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4511 if (reg)
4512 {
4513 int regno = reg->number;
4514 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4515 {
4516 /* { ZA0.B } is assembled as all-ones immediate. */
4517 mask = 0xff;
4518 }
4519 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4520 mask |= 0x55 << regno;
4521 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4522 mask |= 0x11 << regno;
4523 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4524 mask |= 0x01 << regno;
4525 else
4526 {
4527 set_syntax_error (_("wrong ZA tile element format"));
4528 return PARSE_FAIL;
4529 }
4530 continue;
4531 }
4532 else if (strncasecmp (q, "za", 2) == 0
4533 && !ISALNUM (q[2]))
4534 {
4535 /* { ZA } is assembled as all-ones immediate. */
4536 mask = 0xff;
4537 q += 2;
4538 continue;
4539 }
4540 else
4541 {
4542 set_syntax_error (_("wrong ZA tile element format"));
4543 return PARSE_FAIL;
4544 }
4545 }
4546 while (skip_past_char (&q, ','));
4547
4548 *str = q;
4549 return mask;
4550 }
4551
4552 /* Wraps in curly braces <mask> operand ZERO instruction:
4553
4554 ZERO { <mask> }
4555
4556 Function returns value of <mask> bit-field.
4557 */
4558 static int
4559 parse_sme_list_of_64bit_tiles (char **str)
4560 {
4561 int regno;
4562
4563 if (!skip_past_char (str, '{'))
4564 {
4565 set_syntax_error (_("expected '{'"));
4566 return PARSE_FAIL;
4567 }
4568
4569 /* Empty <mask> list is an all-zeros immediate. */
4570 if (!skip_past_char (str, '}'))
4571 {
4572 regno = parse_sme_zero_mask (str);
4573 if (regno == PARSE_FAIL)
4574 return PARSE_FAIL;
4575
4576 if (!skip_past_char (str, '}'))
4577 {
4578 set_syntax_error (_("expected '}'"));
4579 return PARSE_FAIL;
4580 }
4581 }
4582 else
4583 regno = 0x00;
4584
4585 return regno;
4586 }
4587
4588 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4589 Operand format:
4590
4591 ZA[<Wv>, <imm>]
4592 ZA[<Wv>, #<imm>]
4593
4594 Function returns <Wv> or PARSE_FAIL.
4595 */
4596 static int
4597 parse_sme_za_array (char **str, int *imm)
4598 {
4599 char *p, *q;
4600 int regno;
4601 int64_t imm_value;
4602
4603 p = q = *str;
4604 while (ISALPHA (*q))
4605 q++;
4606
4607 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4608 {
4609 set_syntax_error (_("expected ZA array"));
4610 return PARSE_FAIL;
4611 }
4612
4613 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4614 return PARSE_FAIL;
4615
4616 if (imm_value < 0 || imm_value > 15)
4617 {
4618 set_syntax_error (_("offset out of range"));
4619 return PARSE_FAIL;
4620 }
4621
4622 *imm = imm_value;
4623 *str = q;
4624 return regno;
4625 }
4626
4627 /* Parse streaming mode operand for SMSTART and SMSTOP.
4628
4629 {SM | ZA}
4630
4631 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4632 */
4633 static int
4634 parse_sme_sm_za (char **str)
4635 {
4636 char *p, *q;
4637
4638 p = q = *str;
4639 while (ISALPHA (*q))
4640 q++;
4641
4642 if ((q - p != 2)
4643 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4644 {
4645 set_syntax_error (_("expected SM or ZA operand"));
4646 return PARSE_FAIL;
4647 }
4648
4649 *str = q;
4650 return TOLOWER (p[0]);
4651 }
4652
4653 /* Parse the name of the source scalable predicate register, the index base
4654 register W12-W15 and the element index. Function performs element index
4655 limit checks as well as qualifier type checks.
4656
4657 <Pn>.<T>[<Wv>, <imm>]
4658 <Pn>.<T>[<Wv>, #<imm>]
4659
4660 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4661 <imm> to IMM.
4662 Function returns <Pn>, or PARSE_FAIL.
4663 */
4664 static int
4665 parse_sme_pred_reg_with_index(char **str,
4666 int *index_base_reg,
4667 int *imm,
4668 aarch64_opnd_qualifier_t *qualifier)
4669 {
4670 int regno;
4671 int64_t imm_limit;
4672 int64_t imm_value;
4673 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4674
4675 if (reg == NULL)
4676 return PARSE_FAIL;
4677 regno = reg->number;
4678
4679 switch (*qualifier)
4680 {
4681 case AARCH64_OPND_QLF_S_B:
4682 imm_limit = 15;
4683 break;
4684 case AARCH64_OPND_QLF_S_H:
4685 imm_limit = 7;
4686 break;
4687 case AARCH64_OPND_QLF_S_S:
4688 imm_limit = 3;
4689 break;
4690 case AARCH64_OPND_QLF_S_D:
4691 imm_limit = 1;
4692 break;
4693 default:
4694 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4695 return PARSE_FAIL;
4696 }
4697
4698 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4699 return PARSE_FAIL;
4700
4701 if (imm_value < 0 || imm_value > imm_limit)
4702 {
4703 set_syntax_error (_("element index out of range for given variant"));
4704 return PARSE_FAIL;
4705 }
4706
4707 *imm = imm_value;
4708
4709 return regno;
4710 }
4711
4712 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4713 Returns the encoding for the option, or PARSE_FAIL.
4714
4715 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4716 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4717
4718 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4719 field, otherwise as a system register.
4720 */
4721
4722 static int
4723 parse_sys_reg (char **str, htab_t sys_regs,
4724 int imple_defined_p, int pstatefield_p,
4725 uint32_t* flags)
4726 {
4727 char *p, *q;
4728 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4729 const aarch64_sys_reg *o;
4730 int value;
4731
4732 p = buf;
4733 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4734 if (p < buf + (sizeof (buf) - 1))
4735 *p++ = TOLOWER (*q);
4736 *p = '\0';
4737
4738 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4739 valid system register. This is enforced by construction of the hash
4740 table. */
4741 if (p - buf != q - *str)
4742 return PARSE_FAIL;
4743
4744 o = str_hash_find (sys_regs, buf);
4745 if (!o)
4746 {
4747 if (!imple_defined_p)
4748 return PARSE_FAIL;
4749 else
4750 {
4751 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4752 unsigned int op0, op1, cn, cm, op2;
4753
4754 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4755 != 5)
4756 return PARSE_FAIL;
4757 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4758 return PARSE_FAIL;
4759 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4760 if (flags)
4761 *flags = 0;
4762 }
4763 }
4764 else
4765 {
4766 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4767 as_bad (_("selected processor does not support PSTATE field "
4768 "name '%s'"), buf);
4769 if (!pstatefield_p
4770 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4771 o->value, o->flags, o->features))
4772 as_bad (_("selected processor does not support system register "
4773 "name '%s'"), buf);
4774 if (aarch64_sys_reg_deprecated_p (o->flags))
4775 as_warn (_("system register name '%s' is deprecated and may be "
4776 "removed in a future release"), buf);
4777 value = o->value;
4778 if (flags)
4779 *flags = o->flags;
4780 }
4781
4782 *str = q;
4783 return value;
4784 }
4785
4786 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4787 for the option, or NULL. */
4788
4789 static const aarch64_sys_ins_reg *
4790 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4791 {
4792 char *p, *q;
4793 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4794 const aarch64_sys_ins_reg *o;
4795
4796 p = buf;
4797 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4798 if (p < buf + (sizeof (buf) - 1))
4799 *p++ = TOLOWER (*q);
4800 *p = '\0';
4801
4802 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4803 valid system register. This is enforced by construction of the hash
4804 table. */
4805 if (p - buf != q - *str)
4806 return NULL;
4807
4808 o = str_hash_find (sys_ins_regs, buf);
4809 if (!o)
4810 return NULL;
4811
4812 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4813 o->name, o->value, o->flags, 0))
4814 as_bad (_("selected processor does not support system register "
4815 "name '%s'"), buf);
4816 if (aarch64_sys_reg_deprecated_p (o->flags))
4817 as_warn (_("system register name '%s' is deprecated and may be "
4818 "removed in a future release"), buf);
4819
4820 *str = q;
4821 return o;
4822 }
4823 \f
4824 #define po_char_or_fail(chr) do { \
4825 if (! skip_past_char (&str, chr)) \
4826 goto failure; \
4827 } while (0)
4828
4829 #define po_reg_or_fail(regtype) do { \
4830 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4831 if (val == PARSE_FAIL) \
4832 { \
4833 set_default_error (); \
4834 goto failure; \
4835 } \
4836 } while (0)
4837
4838 #define po_int_reg_or_fail(reg_type) do { \
4839 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4840 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4841 { \
4842 set_default_error (); \
4843 goto failure; \
4844 } \
4845 info->reg.regno = reg->number; \
4846 info->qualifier = qualifier; \
4847 } while (0)
4848
4849 #define po_imm_nc_or_fail() do { \
4850 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4851 goto failure; \
4852 } while (0)
4853
4854 #define po_imm_or_fail(min, max) do { \
4855 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4856 goto failure; \
4857 if (val < min || val > max) \
4858 { \
4859 set_fatal_syntax_error (_("immediate value out of range "\
4860 #min " to "#max)); \
4861 goto failure; \
4862 } \
4863 } while (0)
4864
4865 #define po_enum_or_fail(array) do { \
4866 if (!parse_enum_string (&str, &val, array, \
4867 ARRAY_SIZE (array), imm_reg_type)) \
4868 goto failure; \
4869 } while (0)
4870
4871 #define po_misc_or_fail(expr) do { \
4872 if (!expr) \
4873 goto failure; \
4874 } while (0)
4875 \f
4876 /* encode the 12-bit imm field of Add/sub immediate */
4877 static inline uint32_t
4878 encode_addsub_imm (uint32_t imm)
4879 {
4880 return imm << 10;
4881 }
4882
4883 /* encode the shift amount field of Add/sub immediate */
4884 static inline uint32_t
4885 encode_addsub_imm_shift_amount (uint32_t cnt)
4886 {
4887 return cnt << 22;
4888 }
4889
4890
4891 /* encode the imm field of Adr instruction */
4892 static inline uint32_t
4893 encode_adr_imm (uint32_t imm)
4894 {
4895 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4896 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4897 }
4898
4899 /* encode the immediate field of Move wide immediate */
4900 static inline uint32_t
4901 encode_movw_imm (uint32_t imm)
4902 {
4903 return imm << 5;
4904 }
4905
4906 /* encode the 26-bit offset of unconditional branch */
4907 static inline uint32_t
4908 encode_branch_ofs_26 (uint32_t ofs)
4909 {
4910 return ofs & ((1 << 26) - 1);
4911 }
4912
4913 /* encode the 19-bit offset of conditional branch and compare & branch */
4914 static inline uint32_t
4915 encode_cond_branch_ofs_19 (uint32_t ofs)
4916 {
4917 return (ofs & ((1 << 19) - 1)) << 5;
4918 }
4919
4920 /* encode the 19-bit offset of ld literal */
4921 static inline uint32_t
4922 encode_ld_lit_ofs_19 (uint32_t ofs)
4923 {
4924 return (ofs & ((1 << 19) - 1)) << 5;
4925 }
4926
4927 /* Encode the 14-bit offset of test & branch. */
4928 static inline uint32_t
4929 encode_tst_branch_ofs_14 (uint32_t ofs)
4930 {
4931 return (ofs & ((1 << 14) - 1)) << 5;
4932 }
4933
4934 /* Encode the 16-bit imm field of svc/hvc/smc. */
4935 static inline uint32_t
4936 encode_svc_imm (uint32_t imm)
4937 {
4938 return imm << 5;
4939 }
4940
4941 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4942 static inline uint32_t
4943 reencode_addsub_switch_add_sub (uint32_t opcode)
4944 {
4945 return opcode ^ (1 << 30);
4946 }
4947
4948 static inline uint32_t
4949 reencode_movzn_to_movz (uint32_t opcode)
4950 {
4951 return opcode | (1 << 30);
4952 }
4953
4954 static inline uint32_t
4955 reencode_movzn_to_movn (uint32_t opcode)
4956 {
4957 return opcode & ~(1 << 30);
4958 }
4959
4960 /* Overall per-instruction processing. */
4961
4962 /* We need to be able to fix up arbitrary expressions in some statements.
4963 This is so that we can handle symbols that are an arbitrary distance from
4964 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4965 which returns part of an address in a form which will be valid for
4966 a data instruction. We do this by pushing the expression into a symbol
4967 in the expr_section, and creating a fix for that. */
4968
4969 static fixS *
4970 fix_new_aarch64 (fragS * frag,
4971 int where,
4972 short int size,
4973 expressionS * exp,
4974 int pc_rel,
4975 int reloc)
4976 {
4977 fixS *new_fix;
4978
4979 switch (exp->X_op)
4980 {
4981 case O_constant:
4982 case O_symbol:
4983 case O_add:
4984 case O_subtract:
4985 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4986 break;
4987
4988 default:
4989 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4990 pc_rel, reloc);
4991 break;
4992 }
4993 return new_fix;
4994 }
4995 \f
4996 /* Diagnostics on operands errors. */
4997
4998 /* By default, output verbose error message.
4999 Disable the verbose error message by -mno-verbose-error. */
5000 static int verbose_error_p = 1;
5001
5002 #ifdef DEBUG_AARCH64
5003 /* N.B. this is only for the purpose of debugging. */
5004 const char* operand_mismatch_kind_names[] =
5005 {
5006 "AARCH64_OPDE_NIL",
5007 "AARCH64_OPDE_RECOVERABLE",
5008 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5009 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5010 "AARCH64_OPDE_SYNTAX_ERROR",
5011 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5012 "AARCH64_OPDE_INVALID_VARIANT",
5013 "AARCH64_OPDE_OUT_OF_RANGE",
5014 "AARCH64_OPDE_UNALIGNED",
5015 "AARCH64_OPDE_REG_LIST",
5016 "AARCH64_OPDE_OTHER_ERROR",
5017 };
5018 #endif /* DEBUG_AARCH64 */
5019
5020 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5021
5022 When multiple errors of different kinds are found in the same assembly
5023 line, only the error of the highest severity will be picked up for
5024 issuing the diagnostics. */
5025
5026 static inline bool
5027 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5028 enum aarch64_operand_error_kind rhs)
5029 {
5030 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5031 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5032 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5033 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5034 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5035 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5036 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5037 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5038 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5039 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5040 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5041 return lhs > rhs;
5042 }
5043
5044 /* Helper routine to get the mnemonic name from the assembly instruction
5045 line; should only be called for the diagnosis purpose, as there is
5046 string copy operation involved, which may affect the runtime
5047 performance if used in elsewhere. */
5048
5049 static const char*
5050 get_mnemonic_name (const char *str)
5051 {
5052 static char mnemonic[32];
5053 char *ptr;
5054
5055 /* Get the first 15 bytes and assume that the full name is included. */
5056 strncpy (mnemonic, str, 31);
5057 mnemonic[31] = '\0';
5058
5059 /* Scan up to the end of the mnemonic, which must end in white space,
5060 '.', or end of string. */
5061 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5062 ;
5063
5064 *ptr = '\0';
5065
5066 /* Append '...' to the truncated long name. */
5067 if (ptr - mnemonic == 31)
5068 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5069
5070 return mnemonic;
5071 }
5072
5073 static void
5074 reset_aarch64_instruction (aarch64_instruction *instruction)
5075 {
5076 memset (instruction, '\0', sizeof (aarch64_instruction));
5077 instruction->reloc.type = BFD_RELOC_UNUSED;
5078 }
5079
5080 /* Data structures storing one user error in the assembly code related to
5081 operands. */
5082
5083 struct operand_error_record
5084 {
5085 const aarch64_opcode *opcode;
5086 aarch64_operand_error detail;
5087 struct operand_error_record *next;
5088 };
5089
5090 typedef struct operand_error_record operand_error_record;
5091
5092 struct operand_errors
5093 {
5094 operand_error_record *head;
5095 operand_error_record *tail;
5096 };
5097
5098 typedef struct operand_errors operand_errors;
5099
5100 /* Top-level data structure reporting user errors for the current line of
5101 the assembly code.
5102 The way md_assemble works is that all opcodes sharing the same mnemonic
5103 name are iterated to find a match to the assembly line. In this data
5104 structure, each of the such opcodes will have one operand_error_record
5105 allocated and inserted. In other words, excessive errors related with
5106 a single opcode are disregarded. */
5107 operand_errors operand_error_report;
5108
5109 /* Free record nodes. */
5110 static operand_error_record *free_opnd_error_record_nodes = NULL;
5111
5112 /* Initialize the data structure that stores the operand mismatch
5113 information on assembling one line of the assembly code. */
5114 static void
5115 init_operand_error_report (void)
5116 {
5117 if (operand_error_report.head != NULL)
5118 {
5119 gas_assert (operand_error_report.tail != NULL);
5120 operand_error_report.tail->next = free_opnd_error_record_nodes;
5121 free_opnd_error_record_nodes = operand_error_report.head;
5122 operand_error_report.head = NULL;
5123 operand_error_report.tail = NULL;
5124 return;
5125 }
5126 gas_assert (operand_error_report.tail == NULL);
5127 }
5128
5129 /* Return TRUE if some operand error has been recorded during the
5130 parsing of the current assembly line using the opcode *OPCODE;
5131 otherwise return FALSE. */
5132 static inline bool
5133 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5134 {
5135 operand_error_record *record = operand_error_report.head;
5136 return record && record->opcode == opcode;
5137 }
5138
5139 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5140 OPCODE field is initialized with OPCODE.
5141 N.B. only one record for each opcode, i.e. the maximum of one error is
5142 recorded for each instruction template. */
5143
5144 static void
5145 add_operand_error_record (const operand_error_record* new_record)
5146 {
5147 const aarch64_opcode *opcode = new_record->opcode;
5148 operand_error_record* record = operand_error_report.head;
5149
5150 /* The record may have been created for this opcode. If not, we need
5151 to prepare one. */
5152 if (! opcode_has_operand_error_p (opcode))
5153 {
5154 /* Get one empty record. */
5155 if (free_opnd_error_record_nodes == NULL)
5156 {
5157 record = XNEW (operand_error_record);
5158 }
5159 else
5160 {
5161 record = free_opnd_error_record_nodes;
5162 free_opnd_error_record_nodes = record->next;
5163 }
5164 record->opcode = opcode;
5165 /* Insert at the head. */
5166 record->next = operand_error_report.head;
5167 operand_error_report.head = record;
5168 if (operand_error_report.tail == NULL)
5169 operand_error_report.tail = record;
5170 }
5171 else if (record->detail.kind != AARCH64_OPDE_NIL
5172 && record->detail.index <= new_record->detail.index
5173 && operand_error_higher_severity_p (record->detail.kind,
5174 new_record->detail.kind))
5175 {
5176 /* In the case of multiple errors found on operands related with a
5177 single opcode, only record the error of the leftmost operand and
5178 only if the error is of higher severity. */
5179 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5180 " the existing error %s on operand %d",
5181 operand_mismatch_kind_names[new_record->detail.kind],
5182 new_record->detail.index,
5183 operand_mismatch_kind_names[record->detail.kind],
5184 record->detail.index);
5185 return;
5186 }
5187
5188 record->detail = new_record->detail;
5189 }
5190
5191 static inline void
5192 record_operand_error_info (const aarch64_opcode *opcode,
5193 aarch64_operand_error *error_info)
5194 {
5195 operand_error_record record;
5196 record.opcode = opcode;
5197 record.detail = *error_info;
5198 add_operand_error_record (&record);
5199 }
5200
5201 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5202 error message *ERROR, for operand IDX (count from 0). */
5203
5204 static void
5205 record_operand_error (const aarch64_opcode *opcode, int idx,
5206 enum aarch64_operand_error_kind kind,
5207 const char* error)
5208 {
5209 aarch64_operand_error info;
5210 memset(&info, 0, sizeof (info));
5211 info.index = idx;
5212 info.kind = kind;
5213 info.error = error;
5214 info.non_fatal = false;
5215 record_operand_error_info (opcode, &info);
5216 }
5217
5218 static void
5219 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5220 enum aarch64_operand_error_kind kind,
5221 const char* error, const int *extra_data)
5222 {
5223 aarch64_operand_error info;
5224 info.index = idx;
5225 info.kind = kind;
5226 info.error = error;
5227 info.data[0].i = extra_data[0];
5228 info.data[1].i = extra_data[1];
5229 info.data[2].i = extra_data[2];
5230 info.non_fatal = false;
5231 record_operand_error_info (opcode, &info);
5232 }
5233
5234 static void
5235 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5236 const char* error, int lower_bound,
5237 int upper_bound)
5238 {
5239 int data[3] = {lower_bound, upper_bound, 0};
5240 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5241 error, data);
5242 }
5243
5244 /* Remove the operand error record for *OPCODE. */
5245 static void ATTRIBUTE_UNUSED
5246 remove_operand_error_record (const aarch64_opcode *opcode)
5247 {
5248 if (opcode_has_operand_error_p (opcode))
5249 {
5250 operand_error_record* record = operand_error_report.head;
5251 gas_assert (record != NULL && operand_error_report.tail != NULL);
5252 operand_error_report.head = record->next;
5253 record->next = free_opnd_error_record_nodes;
5254 free_opnd_error_record_nodes = record;
5255 if (operand_error_report.head == NULL)
5256 {
5257 gas_assert (operand_error_report.tail == record);
5258 operand_error_report.tail = NULL;
5259 }
5260 }
5261 }
5262
5263 /* Given the instruction in *INSTR, return the index of the best matched
5264 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5265
5266 Return -1 if there is no qualifier sequence; return the first match
5267 if there is multiple matches found. */
5268
5269 static int
5270 find_best_match (const aarch64_inst *instr,
5271 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5272 {
5273 int i, num_opnds, max_num_matched, idx;
5274
5275 num_opnds = aarch64_num_of_operands (instr->opcode);
5276 if (num_opnds == 0)
5277 {
5278 DEBUG_TRACE ("no operand");
5279 return -1;
5280 }
5281
5282 max_num_matched = 0;
5283 idx = 0;
5284
5285 /* For each pattern. */
5286 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5287 {
5288 int j, num_matched;
5289 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5290
5291 /* Most opcodes has much fewer patterns in the list. */
5292 if (empty_qualifier_sequence_p (qualifiers))
5293 {
5294 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5295 break;
5296 }
5297
5298 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5299 if (*qualifiers == instr->operands[j].qualifier)
5300 ++num_matched;
5301
5302 if (num_matched > max_num_matched)
5303 {
5304 max_num_matched = num_matched;
5305 idx = i;
5306 }
5307 }
5308
5309 DEBUG_TRACE ("return with %d", idx);
5310 return idx;
5311 }
5312
5313 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5314 corresponding operands in *INSTR. */
5315
5316 static inline void
5317 assign_qualifier_sequence (aarch64_inst *instr,
5318 const aarch64_opnd_qualifier_t *qualifiers)
5319 {
5320 int i = 0;
5321 int num_opnds = aarch64_num_of_operands (instr->opcode);
5322 gas_assert (num_opnds);
5323 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5324 instr->operands[i].qualifier = *qualifiers;
5325 }
5326
5327 /* Callback used by aarch64_print_operand to apply STYLE to the
5328 disassembler output created from FMT and ARGS. The STYLER object holds
5329 any required state. Must return a pointer to a string (created from FMT
5330 and ARGS) that will continue to be valid until the complete disassembled
5331 instruction has been printed.
5332
5333 We don't currently add any styling to the output of the disassembler as
5334 used within assembler error messages, and so STYLE is ignored here. A
5335 new string is allocated on the obstack help within STYLER and returned
5336 to the caller. */
5337
5338 static const char *aarch64_apply_style
5339 (struct aarch64_styler *styler,
5340 enum disassembler_style style ATTRIBUTE_UNUSED,
5341 const char *fmt, va_list args)
5342 {
5343 int res;
5344 char *ptr;
5345 struct obstack *stack = (struct obstack *) styler->state;
5346 va_list ap;
5347
5348 /* Calculate the required space. */
5349 va_copy (ap, args);
5350 res = vsnprintf (NULL, 0, fmt, ap);
5351 va_end (ap);
5352 gas_assert (res >= 0);
5353
5354 /* Allocate space on the obstack and format the result. */
5355 ptr = (char *) obstack_alloc (stack, res + 1);
5356 res = vsnprintf (ptr, (res + 1), fmt, args);
5357 gas_assert (res >= 0);
5358
5359 return ptr;
5360 }
5361
5362 /* Print operands for the diagnosis purpose. */
5363
5364 static void
5365 print_operands (char *buf, const aarch64_opcode *opcode,
5366 const aarch64_opnd_info *opnds)
5367 {
5368 int i;
5369 struct aarch64_styler styler;
5370 struct obstack content;
5371 obstack_init (&content);
5372
5373 styler.apply_style = aarch64_apply_style;
5374 styler.state = (void *) &content;
5375
5376 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5377 {
5378 char str[128];
5379 char cmt[128];
5380
5381 /* We regard the opcode operand info more, however we also look into
5382 the inst->operands to support the disassembling of the optional
5383 operand.
5384 The two operand code should be the same in all cases, apart from
5385 when the operand can be optional. */
5386 if (opcode->operands[i] == AARCH64_OPND_NIL
5387 || opnds[i].type == AARCH64_OPND_NIL)
5388 break;
5389
5390 /* Generate the operand string in STR. */
5391 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5392 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5393
5394 /* Delimiter. */
5395 if (str[0] != '\0')
5396 strcat (buf, i == 0 ? " " : ", ");
5397
5398 /* Append the operand string. */
5399 strcat (buf, str);
5400
5401 /* Append a comment. This works because only the last operand ever
5402 adds a comment. If that ever changes then we'll need to be
5403 smarter here. */
5404 if (cmt[0] != '\0')
5405 {
5406 strcat (buf, "\t// ");
5407 strcat (buf, cmt);
5408 }
5409 }
5410
5411 obstack_free (&content, NULL);
5412 }
5413
5414 /* Send to stderr a string as information. */
5415
5416 static void
5417 output_info (const char *format, ...)
5418 {
5419 const char *file;
5420 unsigned int line;
5421 va_list args;
5422
5423 file = as_where (&line);
5424 if (file)
5425 {
5426 if (line != 0)
5427 fprintf (stderr, "%s:%u: ", file, line);
5428 else
5429 fprintf (stderr, "%s: ", file);
5430 }
5431 fprintf (stderr, _("Info: "));
5432 va_start (args, format);
5433 vfprintf (stderr, format, args);
5434 va_end (args);
5435 (void) putc ('\n', stderr);
5436 }
5437
5438 /* Output one operand error record. */
5439
5440 static void
5441 output_operand_error_record (const operand_error_record *record, char *str)
5442 {
5443 const aarch64_operand_error *detail = &record->detail;
5444 int idx = detail->index;
5445 const aarch64_opcode *opcode = record->opcode;
5446 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5447 : AARCH64_OPND_NIL);
5448
5449 typedef void (*handler_t)(const char *format, ...);
5450 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5451
5452 switch (detail->kind)
5453 {
5454 case AARCH64_OPDE_NIL:
5455 gas_assert (0);
5456 break;
5457
5458 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5459 handler (_("this `%s' should have an immediately preceding `%s'"
5460 " -- `%s'"),
5461 detail->data[0].s, detail->data[1].s, str);
5462 break;
5463
5464 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5465 handler (_("the preceding `%s' should be followed by `%s` rather"
5466 " than `%s` -- `%s'"),
5467 detail->data[1].s, detail->data[0].s, opcode->name, str);
5468 break;
5469
5470 case AARCH64_OPDE_SYNTAX_ERROR:
5471 case AARCH64_OPDE_RECOVERABLE:
5472 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5473 case AARCH64_OPDE_OTHER_ERROR:
5474 /* Use the prepared error message if there is, otherwise use the
5475 operand description string to describe the error. */
5476 if (detail->error != NULL)
5477 {
5478 if (idx < 0)
5479 handler (_("%s -- `%s'"), detail->error, str);
5480 else
5481 handler (_("%s at operand %d -- `%s'"),
5482 detail->error, idx + 1, str);
5483 }
5484 else
5485 {
5486 gas_assert (idx >= 0);
5487 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5488 aarch64_get_operand_desc (opd_code), str);
5489 }
5490 break;
5491
5492 case AARCH64_OPDE_INVALID_VARIANT:
5493 handler (_("operand mismatch -- `%s'"), str);
5494 if (verbose_error_p)
5495 {
5496 /* We will try to correct the erroneous instruction and also provide
5497 more information e.g. all other valid variants.
5498
5499 The string representation of the corrected instruction and other
5500 valid variants are generated by
5501
5502 1) obtaining the intermediate representation of the erroneous
5503 instruction;
5504 2) manipulating the IR, e.g. replacing the operand qualifier;
5505 3) printing out the instruction by calling the printer functions
5506 shared with the disassembler.
5507
5508 The limitation of this method is that the exact input assembly
5509 line cannot be accurately reproduced in some cases, for example an
5510 optional operand present in the actual assembly line will be
5511 omitted in the output; likewise for the optional syntax rules,
5512 e.g. the # before the immediate. Another limitation is that the
5513 assembly symbols and relocation operations in the assembly line
5514 currently cannot be printed out in the error report. Last but not
5515 least, when there is other error(s) co-exist with this error, the
5516 'corrected' instruction may be still incorrect, e.g. given
5517 'ldnp h0,h1,[x0,#6]!'
5518 this diagnosis will provide the version:
5519 'ldnp s0,s1,[x0,#6]!'
5520 which is still not right. */
5521 size_t len = strlen (get_mnemonic_name (str));
5522 int i, qlf_idx;
5523 bool result;
5524 char buf[2048];
5525 aarch64_inst *inst_base = &inst.base;
5526 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5527
5528 /* Init inst. */
5529 reset_aarch64_instruction (&inst);
5530 inst_base->opcode = opcode;
5531
5532 /* Reset the error report so that there is no side effect on the
5533 following operand parsing. */
5534 init_operand_error_report ();
5535
5536 /* Fill inst. */
5537 result = parse_operands (str + len, opcode)
5538 && programmer_friendly_fixup (&inst);
5539 gas_assert (result);
5540 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5541 NULL, NULL, insn_sequence);
5542 gas_assert (!result);
5543
5544 /* Find the most matched qualifier sequence. */
5545 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5546 gas_assert (qlf_idx > -1);
5547
5548 /* Assign the qualifiers. */
5549 assign_qualifier_sequence (inst_base,
5550 opcode->qualifiers_list[qlf_idx]);
5551
5552 /* Print the hint. */
5553 output_info (_(" did you mean this?"));
5554 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5555 print_operands (buf, opcode, inst_base->operands);
5556 output_info (_(" %s"), buf);
5557
5558 /* Print out other variant(s) if there is any. */
5559 if (qlf_idx != 0 ||
5560 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5561 output_info (_(" other valid variant(s):"));
5562
5563 /* For each pattern. */
5564 qualifiers_list = opcode->qualifiers_list;
5565 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5566 {
5567 /* Most opcodes has much fewer patterns in the list.
5568 First NIL qualifier indicates the end in the list. */
5569 if (empty_qualifier_sequence_p (*qualifiers_list))
5570 break;
5571
5572 if (i != qlf_idx)
5573 {
5574 /* Mnemonics name. */
5575 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5576
5577 /* Assign the qualifiers. */
5578 assign_qualifier_sequence (inst_base, *qualifiers_list);
5579
5580 /* Print instruction. */
5581 print_operands (buf, opcode, inst_base->operands);
5582
5583 output_info (_(" %s"), buf);
5584 }
5585 }
5586 }
5587 break;
5588
5589 case AARCH64_OPDE_UNTIED_IMMS:
5590 handler (_("operand %d must have the same immediate value "
5591 "as operand 1 -- `%s'"),
5592 detail->index + 1, str);
5593 break;
5594
5595 case AARCH64_OPDE_UNTIED_OPERAND:
5596 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5597 detail->index + 1, str);
5598 break;
5599
5600 case AARCH64_OPDE_OUT_OF_RANGE:
5601 if (detail->data[0].i != detail->data[1].i)
5602 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5603 detail->error ? detail->error : _("immediate value"),
5604 detail->data[0].i, detail->data[1].i, idx + 1, str);
5605 else
5606 handler (_("%s must be %d at operand %d -- `%s'"),
5607 detail->error ? detail->error : _("immediate value"),
5608 detail->data[0].i, idx + 1, str);
5609 break;
5610
5611 case AARCH64_OPDE_REG_LIST:
5612 if (detail->data[0].i == 1)
5613 handler (_("invalid number of registers in the list; "
5614 "only 1 register is expected at operand %d -- `%s'"),
5615 idx + 1, str);
5616 else
5617 handler (_("invalid number of registers in the list; "
5618 "%d registers are expected at operand %d -- `%s'"),
5619 detail->data[0].i, idx + 1, str);
5620 break;
5621
5622 case AARCH64_OPDE_UNALIGNED:
5623 handler (_("immediate value must be a multiple of "
5624 "%d at operand %d -- `%s'"),
5625 detail->data[0].i, idx + 1, str);
5626 break;
5627
5628 default:
5629 gas_assert (0);
5630 break;
5631 }
5632 }
5633
5634 /* Process and output the error message about the operand mismatching.
5635
5636 When this function is called, the operand error information had
5637 been collected for an assembly line and there will be multiple
5638 errors in the case of multiple instruction templates; output the
5639 error message that most closely describes the problem.
5640
5641 The errors to be printed can be filtered on printing all errors
5642 or only non-fatal errors. This distinction has to be made because
5643 the error buffer may already be filled with fatal errors we don't want to
5644 print due to the different instruction templates. */
5645
5646 static void
5647 output_operand_error_report (char *str, bool non_fatal_only)
5648 {
5649 int largest_error_pos;
5650 const char *msg = NULL;
5651 enum aarch64_operand_error_kind kind;
5652 operand_error_record *curr;
5653 operand_error_record *head = operand_error_report.head;
5654 operand_error_record *record = NULL;
5655
5656 /* No error to report. */
5657 if (head == NULL)
5658 return;
5659
5660 gas_assert (head != NULL && operand_error_report.tail != NULL);
5661
5662 /* Only one error. */
5663 if (head == operand_error_report.tail)
5664 {
5665 /* If the only error is a non-fatal one and we don't want to print it,
5666 just exit. */
5667 if (!non_fatal_only || head->detail.non_fatal)
5668 {
5669 DEBUG_TRACE ("single opcode entry with error kind: %s",
5670 operand_mismatch_kind_names[head->detail.kind]);
5671 output_operand_error_record (head, str);
5672 }
5673 return;
5674 }
5675
5676 /* Find the error kind of the highest severity. */
5677 DEBUG_TRACE ("multiple opcode entries with error kind");
5678 kind = AARCH64_OPDE_NIL;
5679 for (curr = head; curr != NULL; curr = curr->next)
5680 {
5681 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5682 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5683 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5684 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5685 kind = curr->detail.kind;
5686 }
5687
5688 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5689
5690 /* Pick up one of errors of KIND to report. */
5691 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5692 for (curr = head; curr != NULL; curr = curr->next)
5693 {
5694 /* If we don't want to print non-fatal errors then don't consider them
5695 at all. */
5696 if (curr->detail.kind != kind
5697 || (non_fatal_only && !curr->detail.non_fatal))
5698 continue;
5699 /* If there are multiple errors, pick up the one with the highest
5700 mismatching operand index. In the case of multiple errors with
5701 the equally highest operand index, pick up the first one or the
5702 first one with non-NULL error message. */
5703 if (curr->detail.index > largest_error_pos
5704 || (curr->detail.index == largest_error_pos && msg == NULL
5705 && curr->detail.error != NULL))
5706 {
5707 largest_error_pos = curr->detail.index;
5708 record = curr;
5709 msg = record->detail.error;
5710 }
5711 }
5712
5713 /* The way errors are collected in the back-end is a bit non-intuitive. But
5714 essentially, because each operand template is tried recursively you may
5715 always have errors collected from the previous tried OPND. These are
5716 usually skipped if there is one successful match. However now with the
5717 non-fatal errors we have to ignore those previously collected hard errors
5718 when we're only interested in printing the non-fatal ones. This condition
5719 prevents us from printing errors that are not appropriate, since we did
5720 match a condition, but it also has warnings that it wants to print. */
5721 if (non_fatal_only && !record)
5722 return;
5723
5724 gas_assert (largest_error_pos != -2 && record != NULL);
5725 DEBUG_TRACE ("Pick up error kind %s to report",
5726 operand_mismatch_kind_names[record->detail.kind]);
5727
5728 /* Output. */
5729 output_operand_error_record (record, str);
5730 }
5731 \f
5732 /* Write an AARCH64 instruction to buf - always little-endian. */
5733 static void
5734 put_aarch64_insn (char *buf, uint32_t insn)
5735 {
5736 unsigned char *where = (unsigned char *) buf;
5737 where[0] = insn;
5738 where[1] = insn >> 8;
5739 where[2] = insn >> 16;
5740 where[3] = insn >> 24;
5741 }
5742
5743 static uint32_t
5744 get_aarch64_insn (char *buf)
5745 {
5746 unsigned char *where = (unsigned char *) buf;
5747 uint32_t result;
5748 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5749 | ((uint32_t) where[3] << 24)));
5750 return result;
5751 }
5752
5753 static void
5754 output_inst (struct aarch64_inst *new_inst)
5755 {
5756 char *to = NULL;
5757
5758 to = frag_more (INSN_SIZE);
5759
5760 frag_now->tc_frag_data.recorded = 1;
5761
5762 put_aarch64_insn (to, inst.base.value);
5763
5764 if (inst.reloc.type != BFD_RELOC_UNUSED)
5765 {
5766 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5767 INSN_SIZE, &inst.reloc.exp,
5768 inst.reloc.pc_rel,
5769 inst.reloc.type);
5770 DEBUG_TRACE ("Prepared relocation fix up");
5771 /* Don't check the addend value against the instruction size,
5772 that's the job of our code in md_apply_fix(). */
5773 fixp->fx_no_overflow = 1;
5774 if (new_inst != NULL)
5775 fixp->tc_fix_data.inst = new_inst;
5776 if (aarch64_gas_internal_fixup_p ())
5777 {
5778 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5779 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5780 fixp->fx_addnumber = inst.reloc.flags;
5781 }
5782 }
5783
5784 dwarf2_emit_insn (INSN_SIZE);
5785 }
5786
5787 /* Link together opcodes of the same name. */
5788
5789 struct templates
5790 {
5791 const aarch64_opcode *opcode;
5792 struct templates *next;
5793 };
5794
5795 typedef struct templates templates;
5796
5797 static templates *
5798 lookup_mnemonic (const char *start, int len)
5799 {
5800 templates *templ = NULL;
5801
5802 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5803 return templ;
5804 }
5805
5806 /* Subroutine of md_assemble, responsible for looking up the primary
5807 opcode from the mnemonic the user wrote. BASE points to the beginning
5808 of the mnemonic, DOT points to the first '.' within the mnemonic
5809 (if any) and END points to the end of the mnemonic. */
5810
5811 static templates *
5812 opcode_lookup (char *base, char *dot, char *end)
5813 {
5814 const aarch64_cond *cond;
5815 char condname[16];
5816 int len;
5817
5818 if (dot == end)
5819 return 0;
5820
5821 inst.cond = COND_ALWAYS;
5822
5823 /* Handle a possible condition. */
5824 if (dot)
5825 {
5826 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5827 if (!cond)
5828 return 0;
5829 inst.cond = cond->value;
5830 len = dot - base;
5831 }
5832 else
5833 len = end - base;
5834
5835 if (inst.cond == COND_ALWAYS)
5836 {
5837 /* Look for unaffixed mnemonic. */
5838 return lookup_mnemonic (base, len);
5839 }
5840 else if (len <= 13)
5841 {
5842 /* append ".c" to mnemonic if conditional */
5843 memcpy (condname, base, len);
5844 memcpy (condname + len, ".c", 2);
5845 base = condname;
5846 len += 2;
5847 return lookup_mnemonic (base, len);
5848 }
5849
5850 return NULL;
5851 }
5852
5853 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5854 to a corresponding operand qualifier. */
5855
5856 static inline aarch64_opnd_qualifier_t
5857 vectype_to_qualifier (const struct vector_type_el *vectype)
5858 {
5859 /* Element size in bytes indexed by vector_el_type. */
5860 const unsigned char ele_size[5]
5861 = {1, 2, 4, 8, 16};
5862 const unsigned int ele_base [5] =
5863 {
5864 AARCH64_OPND_QLF_V_4B,
5865 AARCH64_OPND_QLF_V_2H,
5866 AARCH64_OPND_QLF_V_2S,
5867 AARCH64_OPND_QLF_V_1D,
5868 AARCH64_OPND_QLF_V_1Q
5869 };
5870
5871 if (!vectype->defined || vectype->type == NT_invtype)
5872 goto vectype_conversion_fail;
5873
5874 if (vectype->type == NT_zero)
5875 return AARCH64_OPND_QLF_P_Z;
5876 if (vectype->type == NT_merge)
5877 return AARCH64_OPND_QLF_P_M;
5878
5879 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5880
5881 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5882 {
5883 /* Special case S_4B. */
5884 if (vectype->type == NT_b && vectype->width == 4)
5885 return AARCH64_OPND_QLF_S_4B;
5886
5887 /* Special case S_2H. */
5888 if (vectype->type == NT_h && vectype->width == 2)
5889 return AARCH64_OPND_QLF_S_2H;
5890
5891 /* Vector element register. */
5892 return AARCH64_OPND_QLF_S_B + vectype->type;
5893 }
5894 else
5895 {
5896 /* Vector register. */
5897 int reg_size = ele_size[vectype->type] * vectype->width;
5898 unsigned offset;
5899 unsigned shift;
5900 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5901 goto vectype_conversion_fail;
5902
5903 /* The conversion is by calculating the offset from the base operand
5904 qualifier for the vector type. The operand qualifiers are regular
5905 enough that the offset can established by shifting the vector width by
5906 a vector-type dependent amount. */
5907 shift = 0;
5908 if (vectype->type == NT_b)
5909 shift = 3;
5910 else if (vectype->type == NT_h || vectype->type == NT_s)
5911 shift = 2;
5912 else if (vectype->type >= NT_d)
5913 shift = 1;
5914 else
5915 gas_assert (0);
5916
5917 offset = ele_base [vectype->type] + (vectype->width >> shift);
5918 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5919 && offset <= AARCH64_OPND_QLF_V_1Q);
5920 return offset;
5921 }
5922
5923 vectype_conversion_fail:
5924 first_error (_("bad vector arrangement type"));
5925 return AARCH64_OPND_QLF_NIL;
5926 }
5927
5928 /* Process an optional operand that is found omitted from the assembly line.
5929 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5930 instruction's opcode entry while IDX is the index of this omitted operand.
5931 */
5932
5933 static void
5934 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5935 int idx, aarch64_opnd_info *operand)
5936 {
5937 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5938 gas_assert (optional_operand_p (opcode, idx));
5939 gas_assert (!operand->present);
5940
5941 switch (type)
5942 {
5943 case AARCH64_OPND_Rd:
5944 case AARCH64_OPND_Rn:
5945 case AARCH64_OPND_Rm:
5946 case AARCH64_OPND_Rt:
5947 case AARCH64_OPND_Rt2:
5948 case AARCH64_OPND_Rt_LS64:
5949 case AARCH64_OPND_Rt_SP:
5950 case AARCH64_OPND_Rs:
5951 case AARCH64_OPND_Ra:
5952 case AARCH64_OPND_Rt_SYS:
5953 case AARCH64_OPND_Rd_SP:
5954 case AARCH64_OPND_Rn_SP:
5955 case AARCH64_OPND_Rm_SP:
5956 case AARCH64_OPND_Fd:
5957 case AARCH64_OPND_Fn:
5958 case AARCH64_OPND_Fm:
5959 case AARCH64_OPND_Fa:
5960 case AARCH64_OPND_Ft:
5961 case AARCH64_OPND_Ft2:
5962 case AARCH64_OPND_Sd:
5963 case AARCH64_OPND_Sn:
5964 case AARCH64_OPND_Sm:
5965 case AARCH64_OPND_Va:
5966 case AARCH64_OPND_Vd:
5967 case AARCH64_OPND_Vn:
5968 case AARCH64_OPND_Vm:
5969 case AARCH64_OPND_VdD1:
5970 case AARCH64_OPND_VnD1:
5971 operand->reg.regno = default_value;
5972 break;
5973
5974 case AARCH64_OPND_Ed:
5975 case AARCH64_OPND_En:
5976 case AARCH64_OPND_Em:
5977 case AARCH64_OPND_Em16:
5978 case AARCH64_OPND_SM3_IMM2:
5979 operand->reglane.regno = default_value;
5980 break;
5981
5982 case AARCH64_OPND_IDX:
5983 case AARCH64_OPND_BIT_NUM:
5984 case AARCH64_OPND_IMMR:
5985 case AARCH64_OPND_IMMS:
5986 case AARCH64_OPND_SHLL_IMM:
5987 case AARCH64_OPND_IMM_VLSL:
5988 case AARCH64_OPND_IMM_VLSR:
5989 case AARCH64_OPND_CCMP_IMM:
5990 case AARCH64_OPND_FBITS:
5991 case AARCH64_OPND_UIMM4:
5992 case AARCH64_OPND_UIMM3_OP1:
5993 case AARCH64_OPND_UIMM3_OP2:
5994 case AARCH64_OPND_IMM:
5995 case AARCH64_OPND_IMM_2:
5996 case AARCH64_OPND_WIDTH:
5997 case AARCH64_OPND_UIMM7:
5998 case AARCH64_OPND_NZCV:
5999 case AARCH64_OPND_SVE_PATTERN:
6000 case AARCH64_OPND_SVE_PRFOP:
6001 operand->imm.value = default_value;
6002 break;
6003
6004 case AARCH64_OPND_SVE_PATTERN_SCALED:
6005 operand->imm.value = default_value;
6006 operand->shifter.kind = AARCH64_MOD_MUL;
6007 operand->shifter.amount = 1;
6008 break;
6009
6010 case AARCH64_OPND_EXCEPTION:
6011 inst.reloc.type = BFD_RELOC_UNUSED;
6012 break;
6013
6014 case AARCH64_OPND_BARRIER_ISB:
6015 operand->barrier = aarch64_barrier_options + default_value;
6016 break;
6017
6018 case AARCH64_OPND_BTI_TARGET:
6019 operand->hint_option = aarch64_hint_options + default_value;
6020 break;
6021
6022 default:
6023 break;
6024 }
6025 }
6026
6027 /* Process the relocation type for move wide instructions.
6028 Return TRUE on success; otherwise return FALSE. */
6029
6030 static bool
6031 process_movw_reloc_info (void)
6032 {
6033 int is32;
6034 unsigned shift;
6035
6036 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6037
6038 if (inst.base.opcode->op == OP_MOVK)
6039 switch (inst.reloc.type)
6040 {
6041 case BFD_RELOC_AARCH64_MOVW_G0_S:
6042 case BFD_RELOC_AARCH64_MOVW_G1_S:
6043 case BFD_RELOC_AARCH64_MOVW_G2_S:
6044 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6045 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6046 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6047 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6048 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6049 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6050 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6051 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6052 set_syntax_error
6053 (_("the specified relocation type is not allowed for MOVK"));
6054 return false;
6055 default:
6056 break;
6057 }
6058
6059 switch (inst.reloc.type)
6060 {
6061 case BFD_RELOC_AARCH64_MOVW_G0:
6062 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6063 case BFD_RELOC_AARCH64_MOVW_G0_S:
6064 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6065 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6066 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6067 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6068 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6069 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6070 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6071 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6072 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6073 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6074 shift = 0;
6075 break;
6076 case BFD_RELOC_AARCH64_MOVW_G1:
6077 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6078 case BFD_RELOC_AARCH64_MOVW_G1_S:
6079 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6080 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6081 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6082 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6083 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6084 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6085 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6086 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6087 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6088 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6089 shift = 16;
6090 break;
6091 case BFD_RELOC_AARCH64_MOVW_G2:
6092 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6093 case BFD_RELOC_AARCH64_MOVW_G2_S:
6094 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6095 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6096 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6097 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6098 if (is32)
6099 {
6100 set_fatal_syntax_error
6101 (_("the specified relocation type is not allowed for 32-bit "
6102 "register"));
6103 return false;
6104 }
6105 shift = 32;
6106 break;
6107 case BFD_RELOC_AARCH64_MOVW_G3:
6108 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6109 if (is32)
6110 {
6111 set_fatal_syntax_error
6112 (_("the specified relocation type is not allowed for 32-bit "
6113 "register"));
6114 return false;
6115 }
6116 shift = 48;
6117 break;
6118 default:
6119 /* More cases should be added when more MOVW-related relocation types
6120 are supported in GAS. */
6121 gas_assert (aarch64_gas_internal_fixup_p ());
6122 /* The shift amount should have already been set by the parser. */
6123 return true;
6124 }
6125 inst.base.operands[1].shifter.amount = shift;
6126 return true;
6127 }
6128
6129 /* A primitive log calculator. */
6130
6131 static inline unsigned int
6132 get_logsz (unsigned int size)
6133 {
6134 const unsigned char ls[16] =
6135 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6136 if (size > 16)
6137 {
6138 gas_assert (0);
6139 return -1;
6140 }
6141 gas_assert (ls[size - 1] != (unsigned char)-1);
6142 return ls[size - 1];
6143 }
6144
6145 /* Determine and return the real reloc type code for an instruction
6146 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6147
6148 static inline bfd_reloc_code_real_type
6149 ldst_lo12_determine_real_reloc_type (void)
6150 {
6151 unsigned logsz, max_logsz;
6152 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6153 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6154
6155 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6156 {
6157 BFD_RELOC_AARCH64_LDST8_LO12,
6158 BFD_RELOC_AARCH64_LDST16_LO12,
6159 BFD_RELOC_AARCH64_LDST32_LO12,
6160 BFD_RELOC_AARCH64_LDST64_LO12,
6161 BFD_RELOC_AARCH64_LDST128_LO12
6162 },
6163 {
6164 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6165 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6166 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6167 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6168 BFD_RELOC_AARCH64_NONE
6169 },
6170 {
6171 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6172 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6173 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6174 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6175 BFD_RELOC_AARCH64_NONE
6176 },
6177 {
6178 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6179 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6180 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6181 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6182 BFD_RELOC_AARCH64_NONE
6183 },
6184 {
6185 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6186 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6187 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6188 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6189 BFD_RELOC_AARCH64_NONE
6190 }
6191 };
6192
6193 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6194 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6195 || (inst.reloc.type
6196 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6197 || (inst.reloc.type
6198 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6199 || (inst.reloc.type
6200 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6201 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6202
6203 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6204 opd1_qlf =
6205 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6206 1, opd0_qlf, 0);
6207 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6208
6209 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6210
6211 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6212 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6213 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6214 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6215 max_logsz = 3;
6216 else
6217 max_logsz = 4;
6218
6219 if (logsz > max_logsz)
6220 {
6221 /* SEE PR 27904 for an example of this. */
6222 set_fatal_syntax_error
6223 (_("relocation qualifier does not match instruction size"));
6224 return BFD_RELOC_AARCH64_NONE;
6225 }
6226
6227 /* In reloc.c, these pseudo relocation types should be defined in similar
6228 order as above reloc_ldst_lo12 array. Because the array index calculation
6229 below relies on this. */
6230 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6231 }
6232
6233 /* Check whether a register list REGINFO is valid. The registers must be
6234 numbered in increasing order (modulo 32), in increments of one or two.
6235
6236 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6237 increments of two.
6238
6239 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6240
6241 static bool
6242 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6243 {
6244 uint32_t i, nb_regs, prev_regno, incr;
6245
6246 nb_regs = 1 + (reginfo & 0x3);
6247 reginfo >>= 2;
6248 prev_regno = reginfo & 0x1f;
6249 incr = accept_alternate ? 2 : 1;
6250
6251 for (i = 1; i < nb_regs; ++i)
6252 {
6253 uint32_t curr_regno;
6254 reginfo >>= 5;
6255 curr_regno = reginfo & 0x1f;
6256 if (curr_regno != ((prev_regno + incr) & 0x1f))
6257 return false;
6258 prev_regno = curr_regno;
6259 }
6260
6261 return true;
6262 }
6263
6264 /* Generic instruction operand parser. This does no encoding and no
6265 semantic validation; it merely squirrels values away in the inst
6266 structure. Returns TRUE or FALSE depending on whether the
6267 specified grammar matched. */
6268
6269 static bool
6270 parse_operands (char *str, const aarch64_opcode *opcode)
6271 {
6272 int i;
6273 char *backtrack_pos = 0;
6274 const enum aarch64_opnd *operands = opcode->operands;
6275 aarch64_reg_type imm_reg_type;
6276
6277 clear_error ();
6278 skip_whitespace (str);
6279
6280 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6281 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6282 else
6283 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6284
6285 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6286 {
6287 int64_t val;
6288 const reg_entry *reg;
6289 int comma_skipped_p = 0;
6290 aarch64_reg_type rtype;
6291 struct vector_type_el vectype;
6292 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6293 aarch64_opnd_info *info = &inst.base.operands[i];
6294 aarch64_reg_type reg_type;
6295
6296 DEBUG_TRACE ("parse operand %d", i);
6297
6298 /* Assign the operand code. */
6299 info->type = operands[i];
6300
6301 if (optional_operand_p (opcode, i))
6302 {
6303 /* Remember where we are in case we need to backtrack. */
6304 gas_assert (!backtrack_pos);
6305 backtrack_pos = str;
6306 }
6307
6308 /* Expect comma between operands; the backtrack mechanism will take
6309 care of cases of omitted optional operand. */
6310 if (i > 0 && ! skip_past_char (&str, ','))
6311 {
6312 set_syntax_error (_("comma expected between operands"));
6313 goto failure;
6314 }
6315 else
6316 comma_skipped_p = 1;
6317
6318 switch (operands[i])
6319 {
6320 case AARCH64_OPND_Rd:
6321 case AARCH64_OPND_Rn:
6322 case AARCH64_OPND_Rm:
6323 case AARCH64_OPND_Rt:
6324 case AARCH64_OPND_Rt2:
6325 case AARCH64_OPND_Rs:
6326 case AARCH64_OPND_Ra:
6327 case AARCH64_OPND_Rt_LS64:
6328 case AARCH64_OPND_Rt_SYS:
6329 case AARCH64_OPND_PAIRREG:
6330 case AARCH64_OPND_SVE_Rm:
6331 po_int_reg_or_fail (REG_TYPE_R_Z);
6332
6333 /* In LS64 load/store instructions Rt register number must be even
6334 and <=22. */
6335 if (operands[i] == AARCH64_OPND_Rt_LS64)
6336 {
6337 /* We've already checked if this is valid register.
6338 This will check if register number (Rt) is not undefined for LS64
6339 instructions:
6340 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6341 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6342 {
6343 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6344 goto failure;
6345 }
6346 }
6347 break;
6348
6349 case AARCH64_OPND_Rd_SP:
6350 case AARCH64_OPND_Rn_SP:
6351 case AARCH64_OPND_Rt_SP:
6352 case AARCH64_OPND_SVE_Rn_SP:
6353 case AARCH64_OPND_Rm_SP:
6354 po_int_reg_or_fail (REG_TYPE_R_SP);
6355 break;
6356
6357 case AARCH64_OPND_Rm_EXT:
6358 case AARCH64_OPND_Rm_SFT:
6359 po_misc_or_fail (parse_shifter_operand
6360 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6361 ? SHIFTED_ARITH_IMM
6362 : SHIFTED_LOGIC_IMM)));
6363 if (!info->shifter.operator_present)
6364 {
6365 /* Default to LSL if not present. Libopcodes prefers shifter
6366 kind to be explicit. */
6367 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6368 info->shifter.kind = AARCH64_MOD_LSL;
6369 /* For Rm_EXT, libopcodes will carry out further check on whether
6370 or not stack pointer is used in the instruction (Recall that
6371 "the extend operator is not optional unless at least one of
6372 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6373 }
6374 break;
6375
6376 case AARCH64_OPND_Fd:
6377 case AARCH64_OPND_Fn:
6378 case AARCH64_OPND_Fm:
6379 case AARCH64_OPND_Fa:
6380 case AARCH64_OPND_Ft:
6381 case AARCH64_OPND_Ft2:
6382 case AARCH64_OPND_Sd:
6383 case AARCH64_OPND_Sn:
6384 case AARCH64_OPND_Sm:
6385 case AARCH64_OPND_SVE_VZn:
6386 case AARCH64_OPND_SVE_Vd:
6387 case AARCH64_OPND_SVE_Vm:
6388 case AARCH64_OPND_SVE_Vn:
6389 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6390 if (val == PARSE_FAIL)
6391 {
6392 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6393 goto failure;
6394 }
6395 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6396
6397 info->reg.regno = val;
6398 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6399 break;
6400
6401 case AARCH64_OPND_SVE_Pd:
6402 case AARCH64_OPND_SVE_Pg3:
6403 case AARCH64_OPND_SVE_Pg4_5:
6404 case AARCH64_OPND_SVE_Pg4_10:
6405 case AARCH64_OPND_SVE_Pg4_16:
6406 case AARCH64_OPND_SVE_Pm:
6407 case AARCH64_OPND_SVE_Pn:
6408 case AARCH64_OPND_SVE_Pt:
6409 case AARCH64_OPND_SME_Pm:
6410 reg_type = REG_TYPE_PN;
6411 goto vector_reg;
6412
6413 case AARCH64_OPND_SVE_Za_5:
6414 case AARCH64_OPND_SVE_Za_16:
6415 case AARCH64_OPND_SVE_Zd:
6416 case AARCH64_OPND_SVE_Zm_5:
6417 case AARCH64_OPND_SVE_Zm_16:
6418 case AARCH64_OPND_SVE_Zn:
6419 case AARCH64_OPND_SVE_Zt:
6420 reg_type = REG_TYPE_ZN;
6421 goto vector_reg;
6422
6423 case AARCH64_OPND_Va:
6424 case AARCH64_OPND_Vd:
6425 case AARCH64_OPND_Vn:
6426 case AARCH64_OPND_Vm:
6427 reg_type = REG_TYPE_VN;
6428 vector_reg:
6429 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6430 if (val == PARSE_FAIL)
6431 {
6432 first_error (_(get_reg_expected_msg (reg_type)));
6433 goto failure;
6434 }
6435 if (vectype.defined & NTA_HASINDEX)
6436 goto failure;
6437
6438 info->reg.regno = val;
6439 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6440 && vectype.type == NT_invtype)
6441 /* Unqualified Pn and Zn registers are allowed in certain
6442 contexts. Rely on F_STRICT qualifier checking to catch
6443 invalid uses. */
6444 info->qualifier = AARCH64_OPND_QLF_NIL;
6445 else
6446 {
6447 info->qualifier = vectype_to_qualifier (&vectype);
6448 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6449 goto failure;
6450 }
6451 break;
6452
6453 case AARCH64_OPND_VdD1:
6454 case AARCH64_OPND_VnD1:
6455 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6456 if (val == PARSE_FAIL)
6457 {
6458 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6459 goto failure;
6460 }
6461 if (vectype.type != NT_d || vectype.index != 1)
6462 {
6463 set_fatal_syntax_error
6464 (_("the top half of a 128-bit FP/SIMD register is expected"));
6465 goto failure;
6466 }
6467 info->reg.regno = val;
6468 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6469 here; it is correct for the purpose of encoding/decoding since
6470 only the register number is explicitly encoded in the related
6471 instructions, although this appears a bit hacky. */
6472 info->qualifier = AARCH64_OPND_QLF_S_D;
6473 break;
6474
6475 case AARCH64_OPND_SVE_Zm3_INDEX:
6476 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6477 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6478 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6479 case AARCH64_OPND_SVE_Zm4_INDEX:
6480 case AARCH64_OPND_SVE_Zn_INDEX:
6481 reg_type = REG_TYPE_ZN;
6482 goto vector_reg_index;
6483
6484 case AARCH64_OPND_Ed:
6485 case AARCH64_OPND_En:
6486 case AARCH64_OPND_Em:
6487 case AARCH64_OPND_Em16:
6488 case AARCH64_OPND_SM3_IMM2:
6489 reg_type = REG_TYPE_VN;
6490 vector_reg_index:
6491 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6492 if (val == PARSE_FAIL)
6493 {
6494 first_error (_(get_reg_expected_msg (reg_type)));
6495 goto failure;
6496 }
6497 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6498 goto failure;
6499
6500 info->reglane.regno = val;
6501 info->reglane.index = vectype.index;
6502 info->qualifier = vectype_to_qualifier (&vectype);
6503 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6504 goto failure;
6505 break;
6506
6507 case AARCH64_OPND_SVE_ZnxN:
6508 case AARCH64_OPND_SVE_ZtxN:
6509 reg_type = REG_TYPE_ZN;
6510 goto vector_reg_list;
6511
6512 case AARCH64_OPND_LVn:
6513 case AARCH64_OPND_LVt:
6514 case AARCH64_OPND_LVt_AL:
6515 case AARCH64_OPND_LEt:
6516 reg_type = REG_TYPE_VN;
6517 vector_reg_list:
6518 if (reg_type == REG_TYPE_ZN
6519 && get_opcode_dependent_value (opcode) == 1
6520 && *str != '{')
6521 {
6522 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6523 if (val == PARSE_FAIL)
6524 {
6525 first_error (_(get_reg_expected_msg (reg_type)));
6526 goto failure;
6527 }
6528 info->reglist.first_regno = val;
6529 info->reglist.num_regs = 1;
6530 }
6531 else
6532 {
6533 val = parse_vector_reg_list (&str, reg_type, &vectype);
6534 if (val == PARSE_FAIL)
6535 goto failure;
6536
6537 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6538 {
6539 set_fatal_syntax_error (_("invalid register list"));
6540 goto failure;
6541 }
6542
6543 if (vectype.width != 0 && *str != ',')
6544 {
6545 set_fatal_syntax_error
6546 (_("expected element type rather than vector type"));
6547 goto failure;
6548 }
6549
6550 info->reglist.first_regno = (val >> 2) & 0x1f;
6551 info->reglist.num_regs = (val & 0x3) + 1;
6552 }
6553 if (operands[i] == AARCH64_OPND_LEt)
6554 {
6555 if (!(vectype.defined & NTA_HASINDEX))
6556 goto failure;
6557 info->reglist.has_index = 1;
6558 info->reglist.index = vectype.index;
6559 }
6560 else
6561 {
6562 if (vectype.defined & NTA_HASINDEX)
6563 goto failure;
6564 if (!(vectype.defined & NTA_HASTYPE))
6565 {
6566 if (reg_type == REG_TYPE_ZN)
6567 set_fatal_syntax_error (_("missing type suffix"));
6568 goto failure;
6569 }
6570 }
6571 info->qualifier = vectype_to_qualifier (&vectype);
6572 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6573 goto failure;
6574 break;
6575
6576 case AARCH64_OPND_CRn:
6577 case AARCH64_OPND_CRm:
6578 {
6579 char prefix = *(str++);
6580 if (prefix != 'c' && prefix != 'C')
6581 goto failure;
6582
6583 po_imm_nc_or_fail ();
6584 if (val > 15)
6585 {
6586 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6587 goto failure;
6588 }
6589 info->qualifier = AARCH64_OPND_QLF_CR;
6590 info->imm.value = val;
6591 break;
6592 }
6593
6594 case AARCH64_OPND_SHLL_IMM:
6595 case AARCH64_OPND_IMM_VLSR:
6596 po_imm_or_fail (1, 64);
6597 info->imm.value = val;
6598 break;
6599
6600 case AARCH64_OPND_CCMP_IMM:
6601 case AARCH64_OPND_SIMM5:
6602 case AARCH64_OPND_FBITS:
6603 case AARCH64_OPND_TME_UIMM16:
6604 case AARCH64_OPND_UIMM4:
6605 case AARCH64_OPND_UIMM4_ADDG:
6606 case AARCH64_OPND_UIMM10:
6607 case AARCH64_OPND_UIMM3_OP1:
6608 case AARCH64_OPND_UIMM3_OP2:
6609 case AARCH64_OPND_IMM_VLSL:
6610 case AARCH64_OPND_IMM:
6611 case AARCH64_OPND_IMM_2:
6612 case AARCH64_OPND_WIDTH:
6613 case AARCH64_OPND_SVE_INV_LIMM:
6614 case AARCH64_OPND_SVE_LIMM:
6615 case AARCH64_OPND_SVE_LIMM_MOV:
6616 case AARCH64_OPND_SVE_SHLIMM_PRED:
6617 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6618 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6619 case AARCH64_OPND_SVE_SHRIMM_PRED:
6620 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6621 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6622 case AARCH64_OPND_SVE_SIMM5:
6623 case AARCH64_OPND_SVE_SIMM5B:
6624 case AARCH64_OPND_SVE_SIMM6:
6625 case AARCH64_OPND_SVE_SIMM8:
6626 case AARCH64_OPND_SVE_UIMM3:
6627 case AARCH64_OPND_SVE_UIMM7:
6628 case AARCH64_OPND_SVE_UIMM8:
6629 case AARCH64_OPND_SVE_UIMM8_53:
6630 case AARCH64_OPND_IMM_ROT1:
6631 case AARCH64_OPND_IMM_ROT2:
6632 case AARCH64_OPND_IMM_ROT3:
6633 case AARCH64_OPND_SVE_IMM_ROT1:
6634 case AARCH64_OPND_SVE_IMM_ROT2:
6635 case AARCH64_OPND_SVE_IMM_ROT3:
6636 po_imm_nc_or_fail ();
6637 info->imm.value = val;
6638 break;
6639
6640 case AARCH64_OPND_SVE_AIMM:
6641 case AARCH64_OPND_SVE_ASIMM:
6642 po_imm_nc_or_fail ();
6643 info->imm.value = val;
6644 skip_whitespace (str);
6645 if (skip_past_comma (&str))
6646 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6647 else
6648 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6649 break;
6650
6651 case AARCH64_OPND_SVE_PATTERN:
6652 po_enum_or_fail (aarch64_sve_pattern_array);
6653 info->imm.value = val;
6654 break;
6655
6656 case AARCH64_OPND_SVE_PATTERN_SCALED:
6657 po_enum_or_fail (aarch64_sve_pattern_array);
6658 info->imm.value = val;
6659 if (skip_past_comma (&str)
6660 && !parse_shift (&str, info, SHIFTED_MUL))
6661 goto failure;
6662 if (!info->shifter.operator_present)
6663 {
6664 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6665 info->shifter.kind = AARCH64_MOD_MUL;
6666 info->shifter.amount = 1;
6667 }
6668 break;
6669
6670 case AARCH64_OPND_SVE_PRFOP:
6671 po_enum_or_fail (aarch64_sve_prfop_array);
6672 info->imm.value = val;
6673 break;
6674
6675 case AARCH64_OPND_UIMM7:
6676 po_imm_or_fail (0, 127);
6677 info->imm.value = val;
6678 break;
6679
6680 case AARCH64_OPND_IDX:
6681 case AARCH64_OPND_MASK:
6682 case AARCH64_OPND_BIT_NUM:
6683 case AARCH64_OPND_IMMR:
6684 case AARCH64_OPND_IMMS:
6685 po_imm_or_fail (0, 63);
6686 info->imm.value = val;
6687 break;
6688
6689 case AARCH64_OPND_IMM0:
6690 po_imm_nc_or_fail ();
6691 if (val != 0)
6692 {
6693 set_fatal_syntax_error (_("immediate zero expected"));
6694 goto failure;
6695 }
6696 info->imm.value = 0;
6697 break;
6698
6699 case AARCH64_OPND_FPIMM0:
6700 {
6701 int qfloat;
6702 bool res1 = false, res2 = false;
6703 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6704 it is probably not worth the effort to support it. */
6705 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6706 imm_reg_type))
6707 && (error_p ()
6708 || !(res2 = parse_constant_immediate (&str, &val,
6709 imm_reg_type))))
6710 goto failure;
6711 if ((res1 && qfloat == 0) || (res2 && val == 0))
6712 {
6713 info->imm.value = 0;
6714 info->imm.is_fp = 1;
6715 break;
6716 }
6717 set_fatal_syntax_error (_("immediate zero expected"));
6718 goto failure;
6719 }
6720
6721 case AARCH64_OPND_IMM_MOV:
6722 {
6723 char *saved = str;
6724 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6725 reg_name_p (str, REG_TYPE_VN))
6726 goto failure;
6727 str = saved;
6728 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6729 GE_OPT_PREFIX, REJECT_ABSENT));
6730 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6731 later. fix_mov_imm_insn will try to determine a machine
6732 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6733 message if the immediate cannot be moved by a single
6734 instruction. */
6735 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6736 inst.base.operands[i].skip = 1;
6737 }
6738 break;
6739
6740 case AARCH64_OPND_SIMD_IMM:
6741 case AARCH64_OPND_SIMD_IMM_SFT:
6742 if (! parse_big_immediate (&str, &val, imm_reg_type))
6743 goto failure;
6744 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6745 /* addr_off_p */ 0,
6746 /* need_libopcodes_p */ 1,
6747 /* skip_p */ 1);
6748 /* Parse shift.
6749 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6750 shift, we don't check it here; we leave the checking to
6751 the libopcodes (operand_general_constraint_met_p). By
6752 doing this, we achieve better diagnostics. */
6753 if (skip_past_comma (&str)
6754 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6755 goto failure;
6756 if (!info->shifter.operator_present
6757 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6758 {
6759 /* Default to LSL if not present. Libopcodes prefers shifter
6760 kind to be explicit. */
6761 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6762 info->shifter.kind = AARCH64_MOD_LSL;
6763 }
6764 break;
6765
6766 case AARCH64_OPND_FPIMM:
6767 case AARCH64_OPND_SIMD_FPIMM:
6768 case AARCH64_OPND_SVE_FPIMM8:
6769 {
6770 int qfloat;
6771 bool dp_p;
6772
6773 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6774 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6775 || !aarch64_imm_float_p (qfloat))
6776 {
6777 if (!error_p ())
6778 set_fatal_syntax_error (_("invalid floating-point"
6779 " constant"));
6780 goto failure;
6781 }
6782 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6783 inst.base.operands[i].imm.is_fp = 1;
6784 }
6785 break;
6786
6787 case AARCH64_OPND_SVE_I1_HALF_ONE:
6788 case AARCH64_OPND_SVE_I1_HALF_TWO:
6789 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6790 {
6791 int qfloat;
6792 bool dp_p;
6793
6794 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6795 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6796 {
6797 if (!error_p ())
6798 set_fatal_syntax_error (_("invalid floating-point"
6799 " constant"));
6800 goto failure;
6801 }
6802 inst.base.operands[i].imm.value = qfloat;
6803 inst.base.operands[i].imm.is_fp = 1;
6804 }
6805 break;
6806
6807 case AARCH64_OPND_LIMM:
6808 po_misc_or_fail (parse_shifter_operand (&str, info,
6809 SHIFTED_LOGIC_IMM));
6810 if (info->shifter.operator_present)
6811 {
6812 set_fatal_syntax_error
6813 (_("shift not allowed for bitmask immediate"));
6814 goto failure;
6815 }
6816 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6817 /* addr_off_p */ 0,
6818 /* need_libopcodes_p */ 1,
6819 /* skip_p */ 1);
6820 break;
6821
6822 case AARCH64_OPND_AIMM:
6823 if (opcode->op == OP_ADD)
6824 /* ADD may have relocation types. */
6825 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6826 SHIFTED_ARITH_IMM));
6827 else
6828 po_misc_or_fail (parse_shifter_operand (&str, info,
6829 SHIFTED_ARITH_IMM));
6830 switch (inst.reloc.type)
6831 {
6832 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6833 info->shifter.amount = 12;
6834 break;
6835 case BFD_RELOC_UNUSED:
6836 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6837 if (info->shifter.kind != AARCH64_MOD_NONE)
6838 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6839 inst.reloc.pc_rel = 0;
6840 break;
6841 default:
6842 break;
6843 }
6844 info->imm.value = 0;
6845 if (!info->shifter.operator_present)
6846 {
6847 /* Default to LSL if not present. Libopcodes prefers shifter
6848 kind to be explicit. */
6849 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6850 info->shifter.kind = AARCH64_MOD_LSL;
6851 }
6852 break;
6853
6854 case AARCH64_OPND_HALF:
6855 {
6856 /* #<imm16> or relocation. */
6857 int internal_fixup_p;
6858 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6859 if (internal_fixup_p)
6860 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6861 skip_whitespace (str);
6862 if (skip_past_comma (&str))
6863 {
6864 /* {, LSL #<shift>} */
6865 if (! aarch64_gas_internal_fixup_p ())
6866 {
6867 set_fatal_syntax_error (_("can't mix relocation modifier "
6868 "with explicit shift"));
6869 goto failure;
6870 }
6871 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6872 }
6873 else
6874 inst.base.operands[i].shifter.amount = 0;
6875 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6876 inst.base.operands[i].imm.value = 0;
6877 if (! process_movw_reloc_info ())
6878 goto failure;
6879 }
6880 break;
6881
6882 case AARCH64_OPND_EXCEPTION:
6883 case AARCH64_OPND_UNDEFINED:
6884 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6885 imm_reg_type));
6886 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6887 /* addr_off_p */ 0,
6888 /* need_libopcodes_p */ 0,
6889 /* skip_p */ 1);
6890 break;
6891
6892 case AARCH64_OPND_NZCV:
6893 {
6894 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6895 if (nzcv != NULL)
6896 {
6897 str += 4;
6898 info->imm.value = nzcv->value;
6899 break;
6900 }
6901 po_imm_or_fail (0, 15);
6902 info->imm.value = val;
6903 }
6904 break;
6905
6906 case AARCH64_OPND_COND:
6907 case AARCH64_OPND_COND1:
6908 {
6909 char *start = str;
6910 do
6911 str++;
6912 while (ISALPHA (*str));
6913 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6914 if (info->cond == NULL)
6915 {
6916 set_syntax_error (_("invalid condition"));
6917 goto failure;
6918 }
6919 else if (operands[i] == AARCH64_OPND_COND1
6920 && (info->cond->value & 0xe) == 0xe)
6921 {
6922 /* Do not allow AL or NV. */
6923 set_default_error ();
6924 goto failure;
6925 }
6926 }
6927 break;
6928
6929 case AARCH64_OPND_ADDR_ADRP:
6930 po_misc_or_fail (parse_adrp (&str));
6931 /* Clear the value as operand needs to be relocated. */
6932 info->imm.value = 0;
6933 break;
6934
6935 case AARCH64_OPND_ADDR_PCREL14:
6936 case AARCH64_OPND_ADDR_PCREL19:
6937 case AARCH64_OPND_ADDR_PCREL21:
6938 case AARCH64_OPND_ADDR_PCREL26:
6939 po_misc_or_fail (parse_address (&str, info));
6940 if (!info->addr.pcrel)
6941 {
6942 set_syntax_error (_("invalid pc-relative address"));
6943 goto failure;
6944 }
6945 if (inst.gen_lit_pool
6946 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6947 {
6948 /* Only permit "=value" in the literal load instructions.
6949 The literal will be generated by programmer_friendly_fixup. */
6950 set_syntax_error (_("invalid use of \"=immediate\""));
6951 goto failure;
6952 }
6953 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6954 {
6955 set_syntax_error (_("unrecognized relocation suffix"));
6956 goto failure;
6957 }
6958 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6959 {
6960 info->imm.value = inst.reloc.exp.X_add_number;
6961 inst.reloc.type = BFD_RELOC_UNUSED;
6962 }
6963 else
6964 {
6965 info->imm.value = 0;
6966 if (inst.reloc.type == BFD_RELOC_UNUSED)
6967 switch (opcode->iclass)
6968 {
6969 case compbranch:
6970 case condbranch:
6971 /* e.g. CBZ or B.COND */
6972 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6973 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6974 break;
6975 case testbranch:
6976 /* e.g. TBZ */
6977 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6978 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6979 break;
6980 case branch_imm:
6981 /* e.g. B or BL */
6982 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6983 inst.reloc.type =
6984 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6985 : BFD_RELOC_AARCH64_JUMP26;
6986 break;
6987 case loadlit:
6988 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6989 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6990 break;
6991 case pcreladdr:
6992 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6993 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6994 break;
6995 default:
6996 gas_assert (0);
6997 abort ();
6998 }
6999 inst.reloc.pc_rel = 1;
7000 }
7001 break;
7002
7003 case AARCH64_OPND_ADDR_SIMPLE:
7004 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7005 {
7006 /* [<Xn|SP>{, #<simm>}] */
7007 char *start = str;
7008 /* First use the normal address-parsing routines, to get
7009 the usual syntax errors. */
7010 po_misc_or_fail (parse_address (&str, info));
7011 if (info->addr.pcrel || info->addr.offset.is_reg
7012 || !info->addr.preind || info->addr.postind
7013 || info->addr.writeback)
7014 {
7015 set_syntax_error (_("invalid addressing mode"));
7016 goto failure;
7017 }
7018
7019 /* Then retry, matching the specific syntax of these addresses. */
7020 str = start;
7021 po_char_or_fail ('[');
7022 po_reg_or_fail (REG_TYPE_R64_SP);
7023 /* Accept optional ", #0". */
7024 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7025 && skip_past_char (&str, ','))
7026 {
7027 skip_past_char (&str, '#');
7028 if (! skip_past_char (&str, '0'))
7029 {
7030 set_fatal_syntax_error
7031 (_("the optional immediate offset can only be 0"));
7032 goto failure;
7033 }
7034 }
7035 po_char_or_fail (']');
7036 break;
7037 }
7038
7039 case AARCH64_OPND_ADDR_REGOFF:
7040 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7041 po_misc_or_fail (parse_address (&str, info));
7042 regoff_addr:
7043 if (info->addr.pcrel || !info->addr.offset.is_reg
7044 || !info->addr.preind || info->addr.postind
7045 || info->addr.writeback)
7046 {
7047 set_syntax_error (_("invalid addressing mode"));
7048 goto failure;
7049 }
7050 if (!info->shifter.operator_present)
7051 {
7052 /* Default to LSL if not present. Libopcodes prefers shifter
7053 kind to be explicit. */
7054 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7055 info->shifter.kind = AARCH64_MOD_LSL;
7056 }
7057 /* Qualifier to be deduced by libopcodes. */
7058 break;
7059
7060 case AARCH64_OPND_ADDR_SIMM7:
7061 po_misc_or_fail (parse_address (&str, info));
7062 if (info->addr.pcrel || info->addr.offset.is_reg
7063 || (!info->addr.preind && !info->addr.postind))
7064 {
7065 set_syntax_error (_("invalid addressing mode"));
7066 goto failure;
7067 }
7068 if (inst.reloc.type != BFD_RELOC_UNUSED)
7069 {
7070 set_syntax_error (_("relocation not allowed"));
7071 goto failure;
7072 }
7073 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7074 /* addr_off_p */ 1,
7075 /* need_libopcodes_p */ 1,
7076 /* skip_p */ 0);
7077 break;
7078
7079 case AARCH64_OPND_ADDR_SIMM9:
7080 case AARCH64_OPND_ADDR_SIMM9_2:
7081 case AARCH64_OPND_ADDR_SIMM11:
7082 case AARCH64_OPND_ADDR_SIMM13:
7083 po_misc_or_fail (parse_address (&str, info));
7084 if (info->addr.pcrel || info->addr.offset.is_reg
7085 || (!info->addr.preind && !info->addr.postind)
7086 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7087 && info->addr.writeback))
7088 {
7089 set_syntax_error (_("invalid addressing mode"));
7090 goto failure;
7091 }
7092 if (inst.reloc.type != BFD_RELOC_UNUSED)
7093 {
7094 set_syntax_error (_("relocation not allowed"));
7095 goto failure;
7096 }
7097 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7098 /* addr_off_p */ 1,
7099 /* need_libopcodes_p */ 1,
7100 /* skip_p */ 0);
7101 break;
7102
7103 case AARCH64_OPND_ADDR_SIMM10:
7104 case AARCH64_OPND_ADDR_OFFSET:
7105 po_misc_or_fail (parse_address (&str, info));
7106 if (info->addr.pcrel || info->addr.offset.is_reg
7107 || !info->addr.preind || info->addr.postind)
7108 {
7109 set_syntax_error (_("invalid addressing mode"));
7110 goto failure;
7111 }
7112 if (inst.reloc.type != BFD_RELOC_UNUSED)
7113 {
7114 set_syntax_error (_("relocation not allowed"));
7115 goto failure;
7116 }
7117 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7118 /* addr_off_p */ 1,
7119 /* need_libopcodes_p */ 1,
7120 /* skip_p */ 0);
7121 break;
7122
7123 case AARCH64_OPND_ADDR_UIMM12:
7124 po_misc_or_fail (parse_address (&str, info));
7125 if (info->addr.pcrel || info->addr.offset.is_reg
7126 || !info->addr.preind || info->addr.writeback)
7127 {
7128 set_syntax_error (_("invalid addressing mode"));
7129 goto failure;
7130 }
7131 if (inst.reloc.type == BFD_RELOC_UNUSED)
7132 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7133 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7134 || (inst.reloc.type
7135 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7136 || (inst.reloc.type
7137 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7138 || (inst.reloc.type
7139 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7140 || (inst.reloc.type
7141 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7142 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7143 /* Leave qualifier to be determined by libopcodes. */
7144 break;
7145
7146 case AARCH64_OPND_SIMD_ADDR_POST:
7147 /* [<Xn|SP>], <Xm|#<amount>> */
7148 po_misc_or_fail (parse_address (&str, info));
7149 if (!info->addr.postind || !info->addr.writeback)
7150 {
7151 set_syntax_error (_("invalid addressing mode"));
7152 goto failure;
7153 }
7154 if (!info->addr.offset.is_reg)
7155 {
7156 if (inst.reloc.exp.X_op == O_constant)
7157 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7158 else
7159 {
7160 set_fatal_syntax_error
7161 (_("writeback value must be an immediate constant"));
7162 goto failure;
7163 }
7164 }
7165 /* No qualifier. */
7166 break;
7167
7168 case AARCH64_OPND_SME_SM_ZA:
7169 /* { SM | ZA } */
7170 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7171 {
7172 set_syntax_error (_("unknown or missing PSTATE field name"));
7173 goto failure;
7174 }
7175 info->reg.regno = val;
7176 break;
7177
7178 case AARCH64_OPND_SME_PnT_Wm_imm:
7179 /* <Pn>.<T>[<Wm>, #<imm>] */
7180 {
7181 int index_base_reg;
7182 int imm;
7183 val = parse_sme_pred_reg_with_index (&str,
7184 &index_base_reg,
7185 &imm,
7186 &qualifier);
7187 if (val == PARSE_FAIL)
7188 goto failure;
7189
7190 info->za_tile_vector.regno = val;
7191 info->za_tile_vector.index.regno = index_base_reg;
7192 info->za_tile_vector.index.imm = imm;
7193 info->qualifier = qualifier;
7194 break;
7195 }
7196
7197 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7198 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7199 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7200 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7201 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7202 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7203 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7204 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7205 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7206 case AARCH64_OPND_SVE_ADDR_RI_U6:
7207 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7208 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7209 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7210 /* [X<n>{, #imm, MUL VL}]
7211 [X<n>{, #imm}]
7212 but recognizing SVE registers. */
7213 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7214 &offset_qualifier));
7215 if (base_qualifier != AARCH64_OPND_QLF_X)
7216 {
7217 set_syntax_error (_("invalid addressing mode"));
7218 goto failure;
7219 }
7220 sve_regimm:
7221 if (info->addr.pcrel || info->addr.offset.is_reg
7222 || !info->addr.preind || info->addr.writeback)
7223 {
7224 set_syntax_error (_("invalid addressing mode"));
7225 goto failure;
7226 }
7227 if (inst.reloc.type != BFD_RELOC_UNUSED
7228 || inst.reloc.exp.X_op != O_constant)
7229 {
7230 /* Make sure this has priority over
7231 "invalid addressing mode". */
7232 set_fatal_syntax_error (_("constant offset required"));
7233 goto failure;
7234 }
7235 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7236 break;
7237
7238 case AARCH64_OPND_SVE_ADDR_R:
7239 /* [<Xn|SP>{, <R><m>}]
7240 but recognizing SVE registers. */
7241 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7242 &offset_qualifier));
7243 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7244 {
7245 offset_qualifier = AARCH64_OPND_QLF_X;
7246 info->addr.offset.is_reg = 1;
7247 info->addr.offset.regno = 31;
7248 }
7249 else if (base_qualifier != AARCH64_OPND_QLF_X
7250 || offset_qualifier != AARCH64_OPND_QLF_X)
7251 {
7252 set_syntax_error (_("invalid addressing mode"));
7253 goto failure;
7254 }
7255 goto regoff_addr;
7256
7257 case AARCH64_OPND_SVE_ADDR_RR:
7258 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7259 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7260 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7261 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7262 case AARCH64_OPND_SVE_ADDR_RX:
7263 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7264 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7265 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7266 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7267 but recognizing SVE registers. */
7268 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7269 &offset_qualifier));
7270 if (base_qualifier != AARCH64_OPND_QLF_X
7271 || offset_qualifier != AARCH64_OPND_QLF_X)
7272 {
7273 set_syntax_error (_("invalid addressing mode"));
7274 goto failure;
7275 }
7276 goto regoff_addr;
7277
7278 case AARCH64_OPND_SVE_ADDR_RZ:
7279 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7280 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7281 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7282 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7283 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7284 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7285 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7286 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7287 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7288 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7289 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7290 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7291 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7292 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7293 &offset_qualifier));
7294 if (base_qualifier != AARCH64_OPND_QLF_X
7295 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7296 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7297 {
7298 set_syntax_error (_("invalid addressing mode"));
7299 goto failure;
7300 }
7301 info->qualifier = offset_qualifier;
7302 goto regoff_addr;
7303
7304 case AARCH64_OPND_SVE_ADDR_ZX:
7305 /* [Zn.<T>{, <Xm>}]. */
7306 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7307 &offset_qualifier));
7308 /* Things to check:
7309 base_qualifier either S_S or S_D
7310 offset_qualifier must be X
7311 */
7312 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7313 && base_qualifier != AARCH64_OPND_QLF_S_D)
7314 || offset_qualifier != AARCH64_OPND_QLF_X)
7315 {
7316 set_syntax_error (_("invalid addressing mode"));
7317 goto failure;
7318 }
7319 info->qualifier = base_qualifier;
7320 if (!info->addr.offset.is_reg || info->addr.pcrel
7321 || !info->addr.preind || info->addr.writeback
7322 || info->shifter.operator_present != 0)
7323 {
7324 set_syntax_error (_("invalid addressing mode"));
7325 goto failure;
7326 }
7327 info->shifter.kind = AARCH64_MOD_LSL;
7328 break;
7329
7330
7331 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7332 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7333 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7334 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7335 /* [Z<n>.<T>{, #imm}] */
7336 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7337 &offset_qualifier));
7338 if (base_qualifier != AARCH64_OPND_QLF_S_S
7339 && base_qualifier != AARCH64_OPND_QLF_S_D)
7340 {
7341 set_syntax_error (_("invalid addressing mode"));
7342 goto failure;
7343 }
7344 info->qualifier = base_qualifier;
7345 goto sve_regimm;
7346
7347 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7348 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7349 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7350 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7351 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7352
7353 We don't reject:
7354
7355 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7356
7357 here since we get better error messages by leaving it to
7358 the qualifier checking routines. */
7359 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7360 &offset_qualifier));
7361 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7362 && base_qualifier != AARCH64_OPND_QLF_S_D)
7363 || offset_qualifier != base_qualifier)
7364 {
7365 set_syntax_error (_("invalid addressing mode"));
7366 goto failure;
7367 }
7368 info->qualifier = base_qualifier;
7369 goto regoff_addr;
7370
7371 case AARCH64_OPND_SYSREG:
7372 {
7373 uint32_t sysreg_flags;
7374 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7375 &sysreg_flags)) == PARSE_FAIL)
7376 {
7377 set_syntax_error (_("unknown or missing system register name"));
7378 goto failure;
7379 }
7380 inst.base.operands[i].sysreg.value = val;
7381 inst.base.operands[i].sysreg.flags = sysreg_flags;
7382 break;
7383 }
7384
7385 case AARCH64_OPND_PSTATEFIELD:
7386 {
7387 uint32_t sysreg_flags;
7388 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7389 &sysreg_flags)) == PARSE_FAIL)
7390 {
7391 set_syntax_error (_("unknown or missing PSTATE field name"));
7392 goto failure;
7393 }
7394 inst.base.operands[i].pstatefield = val;
7395 inst.base.operands[i].sysreg.flags = sysreg_flags;
7396 break;
7397 }
7398
7399 case AARCH64_OPND_SYSREG_IC:
7400 inst.base.operands[i].sysins_op =
7401 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7402 goto sys_reg_ins;
7403
7404 case AARCH64_OPND_SYSREG_DC:
7405 inst.base.operands[i].sysins_op =
7406 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7407 goto sys_reg_ins;
7408
7409 case AARCH64_OPND_SYSREG_AT:
7410 inst.base.operands[i].sysins_op =
7411 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7412 goto sys_reg_ins;
7413
7414 case AARCH64_OPND_SYSREG_SR:
7415 inst.base.operands[i].sysins_op =
7416 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7417 goto sys_reg_ins;
7418
7419 case AARCH64_OPND_SYSREG_TLBI:
7420 inst.base.operands[i].sysins_op =
7421 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7422 sys_reg_ins:
7423 if (inst.base.operands[i].sysins_op == NULL)
7424 {
7425 set_fatal_syntax_error ( _("unknown or missing operation name"));
7426 goto failure;
7427 }
7428 break;
7429
7430 case AARCH64_OPND_BARRIER:
7431 case AARCH64_OPND_BARRIER_ISB:
7432 val = parse_barrier (&str);
7433 if (val != PARSE_FAIL
7434 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7435 {
7436 /* ISB only accepts options name 'sy'. */
7437 set_syntax_error
7438 (_("the specified option is not accepted in ISB"));
7439 /* Turn off backtrack as this optional operand is present. */
7440 backtrack_pos = 0;
7441 goto failure;
7442 }
7443 if (val != PARSE_FAIL
7444 && operands[i] == AARCH64_OPND_BARRIER)
7445 {
7446 /* Regular barriers accept options CRm (C0-C15).
7447 DSB nXS barrier variant accepts values > 15. */
7448 if (val < 0 || val > 15)
7449 {
7450 set_syntax_error (_("the specified option is not accepted in DSB"));
7451 goto failure;
7452 }
7453 }
7454 /* This is an extension to accept a 0..15 immediate. */
7455 if (val == PARSE_FAIL)
7456 po_imm_or_fail (0, 15);
7457 info->barrier = aarch64_barrier_options + val;
7458 break;
7459
7460 case AARCH64_OPND_BARRIER_DSB_NXS:
7461 val = parse_barrier (&str);
7462 if (val != PARSE_FAIL)
7463 {
7464 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7465 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7466 {
7467 set_syntax_error (_("the specified option is not accepted in DSB"));
7468 /* Turn off backtrack as this optional operand is present. */
7469 backtrack_pos = 0;
7470 goto failure;
7471 }
7472 }
7473 else
7474 {
7475 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7476 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7477 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7478 goto failure;
7479 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7480 {
7481 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7482 goto failure;
7483 }
7484 }
7485 /* Option index is encoded as 2-bit value in val<3:2>. */
7486 val = (val >> 2) - 4;
7487 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7488 break;
7489
7490 case AARCH64_OPND_PRFOP:
7491 val = parse_pldop (&str);
7492 /* This is an extension to accept a 0..31 immediate. */
7493 if (val == PARSE_FAIL)
7494 po_imm_or_fail (0, 31);
7495 inst.base.operands[i].prfop = aarch64_prfops + val;
7496 break;
7497
7498 case AARCH64_OPND_BARRIER_PSB:
7499 val = parse_barrier_psb (&str, &(info->hint_option));
7500 if (val == PARSE_FAIL)
7501 goto failure;
7502 break;
7503
7504 case AARCH64_OPND_BTI_TARGET:
7505 val = parse_bti_operand (&str, &(info->hint_option));
7506 if (val == PARSE_FAIL)
7507 goto failure;
7508 break;
7509
7510 case AARCH64_OPND_SME_ZAda_2b:
7511 case AARCH64_OPND_SME_ZAda_3b:
7512 val = parse_sme_zada_operand (&str, &qualifier);
7513 if (val == PARSE_FAIL)
7514 goto failure;
7515 info->reg.regno = val;
7516 info->qualifier = qualifier;
7517 break;
7518
7519 case AARCH64_OPND_SME_ZA_HV_idx_src:
7520 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7521 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7522 {
7523 enum sme_hv_slice slice_indicator;
7524 int vector_select_register;
7525 int imm;
7526
7527 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7528 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7529 &slice_indicator,
7530 &vector_select_register,
7531 &imm,
7532 &qualifier);
7533 else
7534 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7535 &vector_select_register,
7536 &imm,
7537 &qualifier);
7538 if (val == PARSE_FAIL)
7539 goto failure;
7540 info->za_tile_vector.regno = val;
7541 info->za_tile_vector.index.regno = vector_select_register;
7542 info->za_tile_vector.index.imm = imm;
7543 info->za_tile_vector.v = slice_indicator;
7544 info->qualifier = qualifier;
7545 break;
7546 }
7547
7548 case AARCH64_OPND_SME_list_of_64bit_tiles:
7549 val = parse_sme_list_of_64bit_tiles (&str);
7550 if (val == PARSE_FAIL)
7551 goto failure;
7552 info->imm.value = val;
7553 break;
7554
7555 case AARCH64_OPND_SME_ZA_array:
7556 {
7557 int imm;
7558 val = parse_sme_za_array (&str, &imm);
7559 if (val == PARSE_FAIL)
7560 goto failure;
7561 info->za_tile_vector.index.regno = val;
7562 info->za_tile_vector.index.imm = imm;
7563 break;
7564 }
7565
7566 case AARCH64_OPND_MOPS_ADDR_Rd:
7567 case AARCH64_OPND_MOPS_ADDR_Rs:
7568 po_char_or_fail ('[');
7569 if (!parse_x0_to_x30 (&str, info))
7570 goto failure;
7571 po_char_or_fail (']');
7572 po_char_or_fail ('!');
7573 break;
7574
7575 case AARCH64_OPND_MOPS_WB_Rn:
7576 if (!parse_x0_to_x30 (&str, info))
7577 goto failure;
7578 po_char_or_fail ('!');
7579 break;
7580
7581 default:
7582 as_fatal (_("unhandled operand code %d"), operands[i]);
7583 }
7584
7585 /* If we get here, this operand was successfully parsed. */
7586 inst.base.operands[i].present = 1;
7587 continue;
7588
7589 failure:
7590 /* The parse routine should already have set the error, but in case
7591 not, set a default one here. */
7592 if (! error_p ())
7593 set_default_error ();
7594
7595 if (! backtrack_pos)
7596 goto parse_operands_return;
7597
7598 {
7599 /* We reach here because this operand is marked as optional, and
7600 either no operand was supplied or the operand was supplied but it
7601 was syntactically incorrect. In the latter case we report an
7602 error. In the former case we perform a few more checks before
7603 dropping through to the code to insert the default operand. */
7604
7605 char *tmp = backtrack_pos;
7606 char endchar = END_OF_INSN;
7607
7608 if (i != (aarch64_num_of_operands (opcode) - 1))
7609 endchar = ',';
7610 skip_past_char (&tmp, ',');
7611
7612 if (*tmp != endchar)
7613 /* The user has supplied an operand in the wrong format. */
7614 goto parse_operands_return;
7615
7616 /* Make sure there is not a comma before the optional operand.
7617 For example the fifth operand of 'sys' is optional:
7618
7619 sys #0,c0,c0,#0, <--- wrong
7620 sys #0,c0,c0,#0 <--- correct. */
7621 if (comma_skipped_p && i && endchar == END_OF_INSN)
7622 {
7623 set_fatal_syntax_error
7624 (_("unexpected comma before the omitted optional operand"));
7625 goto parse_operands_return;
7626 }
7627 }
7628
7629 /* Reaching here means we are dealing with an optional operand that is
7630 omitted from the assembly line. */
7631 gas_assert (optional_operand_p (opcode, i));
7632 info->present = 0;
7633 process_omitted_operand (operands[i], opcode, i, info);
7634
7635 /* Try again, skipping the optional operand at backtrack_pos. */
7636 str = backtrack_pos;
7637 backtrack_pos = 0;
7638
7639 /* Clear any error record after the omitted optional operand has been
7640 successfully handled. */
7641 clear_error ();
7642 }
7643
7644 /* Check if we have parsed all the operands. */
7645 if (*str != '\0' && ! error_p ())
7646 {
7647 /* Set I to the index of the last present operand; this is
7648 for the purpose of diagnostics. */
7649 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7650 ;
7651 set_fatal_syntax_error
7652 (_("unexpected characters following instruction"));
7653 }
7654
7655 parse_operands_return:
7656
7657 if (error_p ())
7658 {
7659 DEBUG_TRACE ("parsing FAIL: %s - %s",
7660 operand_mismatch_kind_names[get_error_kind ()],
7661 get_error_message ());
7662 /* Record the operand error properly; this is useful when there
7663 are multiple instruction templates for a mnemonic name, so that
7664 later on, we can select the error that most closely describes
7665 the problem. */
7666 record_operand_error (opcode, i, get_error_kind (),
7667 get_error_message ());
7668 return false;
7669 }
7670 else
7671 {
7672 DEBUG_TRACE ("parsing SUCCESS");
7673 return true;
7674 }
7675 }
7676
7677 /* It does some fix-up to provide some programmer friendly feature while
7678 keeping the libopcodes happy, i.e. libopcodes only accepts
7679 the preferred architectural syntax.
7680 Return FALSE if there is any failure; otherwise return TRUE. */
7681
7682 static bool
7683 programmer_friendly_fixup (aarch64_instruction *instr)
7684 {
7685 aarch64_inst *base = &instr->base;
7686 const aarch64_opcode *opcode = base->opcode;
7687 enum aarch64_op op = opcode->op;
7688 aarch64_opnd_info *operands = base->operands;
7689
7690 DEBUG_TRACE ("enter");
7691
7692 switch (opcode->iclass)
7693 {
7694 case testbranch:
7695 /* TBNZ Xn|Wn, #uimm6, label
7696 Test and Branch Not Zero: conditionally jumps to label if bit number
7697 uimm6 in register Xn is not zero. The bit number implies the width of
7698 the register, which may be written and should be disassembled as Wn if
7699 uimm is less than 32. */
7700 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7701 {
7702 if (operands[1].imm.value >= 32)
7703 {
7704 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7705 0, 31);
7706 return false;
7707 }
7708 operands[0].qualifier = AARCH64_OPND_QLF_X;
7709 }
7710 break;
7711 case loadlit:
7712 /* LDR Wt, label | =value
7713 As a convenience assemblers will typically permit the notation
7714 "=value" in conjunction with the pc-relative literal load instructions
7715 to automatically place an immediate value or symbolic address in a
7716 nearby literal pool and generate a hidden label which references it.
7717 ISREG has been set to 0 in the case of =value. */
7718 if (instr->gen_lit_pool
7719 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7720 {
7721 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7722 if (op == OP_LDRSW_LIT)
7723 size = 4;
7724 if (instr->reloc.exp.X_op != O_constant
7725 && instr->reloc.exp.X_op != O_big
7726 && instr->reloc.exp.X_op != O_symbol)
7727 {
7728 record_operand_error (opcode, 1,
7729 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7730 _("constant expression expected"));
7731 return false;
7732 }
7733 if (! add_to_lit_pool (&instr->reloc.exp, size))
7734 {
7735 record_operand_error (opcode, 1,
7736 AARCH64_OPDE_OTHER_ERROR,
7737 _("literal pool insertion failed"));
7738 return false;
7739 }
7740 }
7741 break;
7742 case log_shift:
7743 case bitfield:
7744 /* UXT[BHW] Wd, Wn
7745 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7746 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7747 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7748 A programmer-friendly assembler should accept a destination Xd in
7749 place of Wd, however that is not the preferred form for disassembly.
7750 */
7751 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7752 && operands[1].qualifier == AARCH64_OPND_QLF_W
7753 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7754 operands[0].qualifier = AARCH64_OPND_QLF_W;
7755 break;
7756
7757 case addsub_ext:
7758 {
7759 /* In the 64-bit form, the final register operand is written as Wm
7760 for all but the (possibly omitted) UXTX/LSL and SXTX
7761 operators.
7762 As a programmer-friendly assembler, we accept e.g.
7763 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7764 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7765 int idx = aarch64_operand_index (opcode->operands,
7766 AARCH64_OPND_Rm_EXT);
7767 gas_assert (idx == 1 || idx == 2);
7768 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7769 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7770 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7771 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7772 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7773 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7774 }
7775 break;
7776
7777 default:
7778 break;
7779 }
7780
7781 DEBUG_TRACE ("exit with SUCCESS");
7782 return true;
7783 }
7784
7785 /* Check for loads and stores that will cause unpredictable behavior. */
7786
7787 static void
7788 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7789 {
7790 aarch64_inst *base = &instr->base;
7791 const aarch64_opcode *opcode = base->opcode;
7792 const aarch64_opnd_info *opnds = base->operands;
7793 switch (opcode->iclass)
7794 {
7795 case ldst_pos:
7796 case ldst_imm9:
7797 case ldst_imm10:
7798 case ldst_unscaled:
7799 case ldst_unpriv:
7800 /* Loading/storing the base register is unpredictable if writeback. */
7801 if ((aarch64_get_operand_class (opnds[0].type)
7802 == AARCH64_OPND_CLASS_INT_REG)
7803 && opnds[0].reg.regno == opnds[1].addr.base_regno
7804 && opnds[1].addr.base_regno != REG_SP
7805 /* Exempt STG/STZG/ST2G/STZ2G. */
7806 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7807 && opnds[1].addr.writeback)
7808 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7809 break;
7810
7811 case ldstpair_off:
7812 case ldstnapair_offs:
7813 case ldstpair_indexed:
7814 /* Loading/storing the base register is unpredictable if writeback. */
7815 if ((aarch64_get_operand_class (opnds[0].type)
7816 == AARCH64_OPND_CLASS_INT_REG)
7817 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7818 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7819 && opnds[2].addr.base_regno != REG_SP
7820 /* Exempt STGP. */
7821 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7822 && opnds[2].addr.writeback)
7823 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7824 /* Load operations must load different registers. */
7825 if ((opcode->opcode & (1 << 22))
7826 && opnds[0].reg.regno == opnds[1].reg.regno)
7827 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7828 break;
7829
7830 case ldstexcl:
7831 if ((aarch64_get_operand_class (opnds[0].type)
7832 == AARCH64_OPND_CLASS_INT_REG)
7833 && (aarch64_get_operand_class (opnds[1].type)
7834 == AARCH64_OPND_CLASS_INT_REG))
7835 {
7836 if ((opcode->opcode & (1 << 22)))
7837 {
7838 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7839 if ((opcode->opcode & (1 << 21))
7840 && opnds[0].reg.regno == opnds[1].reg.regno)
7841 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7842 }
7843 else
7844 {
7845 /* Store-Exclusive is unpredictable if Rt == Rs. */
7846 if (opnds[0].reg.regno == opnds[1].reg.regno)
7847 as_warn
7848 (_("unpredictable: identical transfer and status registers"
7849 " --`%s'"),str);
7850
7851 if (opnds[0].reg.regno == opnds[2].reg.regno)
7852 {
7853 if (!(opcode->opcode & (1 << 21)))
7854 /* Store-Exclusive is unpredictable if Rn == Rs. */
7855 as_warn
7856 (_("unpredictable: identical base and status registers"
7857 " --`%s'"),str);
7858 else
7859 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7860 as_warn
7861 (_("unpredictable: "
7862 "identical transfer and status registers"
7863 " --`%s'"),str);
7864 }
7865
7866 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7867 if ((opcode->opcode & (1 << 21))
7868 && opnds[0].reg.regno == opnds[3].reg.regno
7869 && opnds[3].reg.regno != REG_SP)
7870 as_warn (_("unpredictable: identical base and status registers"
7871 " --`%s'"),str);
7872 }
7873 }
7874 break;
7875
7876 default:
7877 break;
7878 }
7879 }
7880
7881 static void
7882 force_automatic_sequence_close (void)
7883 {
7884 struct aarch64_segment_info_type *tc_seg_info;
7885
7886 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7887 if (tc_seg_info->insn_sequence.instr)
7888 {
7889 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7890 _("previous `%s' sequence has not been closed"),
7891 tc_seg_info->insn_sequence.instr->opcode->name);
7892 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7893 }
7894 }
7895
7896 /* A wrapper function to interface with libopcodes on encoding and
7897 record the error message if there is any.
7898
7899 Return TRUE on success; otherwise return FALSE. */
7900
7901 static bool
7902 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7903 aarch64_insn *code)
7904 {
7905 aarch64_operand_error error_info;
7906 memset (&error_info, '\0', sizeof (error_info));
7907 error_info.kind = AARCH64_OPDE_NIL;
7908 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7909 && !error_info.non_fatal)
7910 return true;
7911
7912 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7913 record_operand_error_info (opcode, &error_info);
7914 return error_info.non_fatal;
7915 }
7916
7917 #ifdef DEBUG_AARCH64
7918 static inline void
7919 dump_opcode_operands (const aarch64_opcode *opcode)
7920 {
7921 int i = 0;
7922 while (opcode->operands[i] != AARCH64_OPND_NIL)
7923 {
7924 aarch64_verbose ("\t\t opnd%d: %s", i,
7925 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7926 ? aarch64_get_operand_name (opcode->operands[i])
7927 : aarch64_get_operand_desc (opcode->operands[i]));
7928 ++i;
7929 }
7930 }
7931 #endif /* DEBUG_AARCH64 */
7932
7933 /* This is the guts of the machine-dependent assembler. STR points to a
7934 machine dependent instruction. This function is supposed to emit
7935 the frags/bytes it assembles to. */
7936
7937 void
7938 md_assemble (char *str)
7939 {
7940 templates *template;
7941 const aarch64_opcode *opcode;
7942 struct aarch64_segment_info_type *tc_seg_info;
7943 aarch64_inst *inst_base;
7944 unsigned saved_cond;
7945
7946 /* Align the previous label if needed. */
7947 if (last_label_seen != NULL)
7948 {
7949 symbol_set_frag (last_label_seen, frag_now);
7950 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7951 S_SET_SEGMENT (last_label_seen, now_seg);
7952 }
7953
7954 /* Update the current insn_sequence from the segment. */
7955 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7956 insn_sequence = &tc_seg_info->insn_sequence;
7957 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7958
7959 inst.reloc.type = BFD_RELOC_UNUSED;
7960
7961 DEBUG_TRACE ("\n\n");
7962 DEBUG_TRACE ("==============================");
7963 DEBUG_TRACE ("Enter md_assemble with %s", str);
7964
7965 /* Scan up to the end of the mnemonic, which must end in whitespace,
7966 '.', or end of string. */
7967 char *p = str;
7968 char *dot = 0;
7969 for (; is_part_of_name (*p); p++)
7970 if (*p == '.' && !dot)
7971 dot = p;
7972
7973 if (p == str)
7974 {
7975 as_bad (_("unknown mnemonic -- `%s'"), str);
7976 return;
7977 }
7978
7979 if (!dot && create_register_alias (str, p))
7980 return;
7981
7982 template = opcode_lookup (str, dot, p);
7983 if (!template)
7984 {
7985 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7986 str);
7987 return;
7988 }
7989
7990 skip_whitespace (p);
7991 if (*p == ',')
7992 {
7993 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7994 get_mnemonic_name (str), str);
7995 return;
7996 }
7997
7998 init_operand_error_report ();
7999
8000 /* Sections are assumed to start aligned. In executable section, there is no
8001 MAP_DATA symbol pending. So we only align the address during
8002 MAP_DATA --> MAP_INSN transition.
8003 For other sections, this is not guaranteed. */
8004 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8005 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8006 frag_align_code (2, 0);
8007
8008 saved_cond = inst.cond;
8009 reset_aarch64_instruction (&inst);
8010 inst.cond = saved_cond;
8011
8012 /* Iterate through all opcode entries with the same mnemonic name. */
8013 do
8014 {
8015 opcode = template->opcode;
8016
8017 DEBUG_TRACE ("opcode %s found", opcode->name);
8018 #ifdef DEBUG_AARCH64
8019 if (debug_dump)
8020 dump_opcode_operands (opcode);
8021 #endif /* DEBUG_AARCH64 */
8022
8023 mapping_state (MAP_INSN);
8024
8025 inst_base = &inst.base;
8026 inst_base->opcode = opcode;
8027
8028 /* Truly conditionally executed instructions, e.g. b.cond. */
8029 if (opcode->flags & F_COND)
8030 {
8031 gas_assert (inst.cond != COND_ALWAYS);
8032 inst_base->cond = get_cond_from_value (inst.cond);
8033 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8034 }
8035 else if (inst.cond != COND_ALWAYS)
8036 {
8037 /* It shouldn't arrive here, where the assembly looks like a
8038 conditional instruction but the found opcode is unconditional. */
8039 gas_assert (0);
8040 continue;
8041 }
8042
8043 if (parse_operands (p, opcode)
8044 && programmer_friendly_fixup (&inst)
8045 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8046 {
8047 /* Check that this instruction is supported for this CPU. */
8048 if (!opcode->avariant
8049 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8050 {
8051 as_bad (_("selected processor does not support `%s'"), str);
8052 return;
8053 }
8054
8055 warn_unpredictable_ldst (&inst, str);
8056
8057 if (inst.reloc.type == BFD_RELOC_UNUSED
8058 || !inst.reloc.need_libopcodes_p)
8059 output_inst (NULL);
8060 else
8061 {
8062 /* If there is relocation generated for the instruction,
8063 store the instruction information for the future fix-up. */
8064 struct aarch64_inst *copy;
8065 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8066 copy = XNEW (struct aarch64_inst);
8067 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8068 output_inst (copy);
8069 }
8070
8071 /* Issue non-fatal messages if any. */
8072 output_operand_error_report (str, true);
8073 return;
8074 }
8075
8076 template = template->next;
8077 if (template != NULL)
8078 {
8079 reset_aarch64_instruction (&inst);
8080 inst.cond = saved_cond;
8081 }
8082 }
8083 while (template != NULL);
8084
8085 /* Issue the error messages if any. */
8086 output_operand_error_report (str, false);
8087 }
8088
8089 /* Various frobbings of labels and their addresses. */
8090
8091 void
8092 aarch64_start_line_hook (void)
8093 {
8094 last_label_seen = NULL;
8095 }
8096
8097 void
8098 aarch64_frob_label (symbolS * sym)
8099 {
8100 last_label_seen = sym;
8101
8102 dwarf2_emit_label (sym);
8103 }
8104
8105 void
8106 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8107 {
8108 /* Check to see if we have a block to close. */
8109 force_automatic_sequence_close ();
8110 }
8111
8112 int
8113 aarch64_data_in_code (void)
8114 {
8115 if (startswith (input_line_pointer + 1, "data:"))
8116 {
8117 *input_line_pointer = '/';
8118 input_line_pointer += 5;
8119 *input_line_pointer = 0;
8120 return 1;
8121 }
8122
8123 return 0;
8124 }
8125
8126 char *
8127 aarch64_canonicalize_symbol_name (char *name)
8128 {
8129 int len;
8130
8131 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8132 *(name + len - 5) = 0;
8133
8134 return name;
8135 }
8136 \f
8137 /* Table of all register names defined by default. The user can
8138 define additional names with .req. Note that all register names
8139 should appear in both upper and lowercase variants. Some registers
8140 also have mixed-case names. */
8141
8142 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8143 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8144 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8145 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8146 #define REGSET16(p,t) \
8147 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8148 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8149 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8150 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8151 #define REGSET16S(p,s,t) \
8152 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8153 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8154 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8155 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8156 #define REGSET31(p,t) \
8157 REGSET16(p, t), \
8158 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8159 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8160 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8161 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8162 #define REGSET(p,t) \
8163 REGSET31(p,t), REGNUM(p,31,t)
8164
8165 /* These go into aarch64_reg_hsh hash-table. */
8166 static const reg_entry reg_names[] = {
8167 /* Integer registers. */
8168 REGSET31 (x, R_64), REGSET31 (X, R_64),
8169 REGSET31 (w, R_32), REGSET31 (W, R_32),
8170
8171 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8172 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8173 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8174 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8175 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8176 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8177
8178 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8179 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8180
8181 /* Floating-point single precision registers. */
8182 REGSET (s, FP_S), REGSET (S, FP_S),
8183
8184 /* Floating-point double precision registers. */
8185 REGSET (d, FP_D), REGSET (D, FP_D),
8186
8187 /* Floating-point half precision registers. */
8188 REGSET (h, FP_H), REGSET (H, FP_H),
8189
8190 /* Floating-point byte precision registers. */
8191 REGSET (b, FP_B), REGSET (B, FP_B),
8192
8193 /* Floating-point quad precision registers. */
8194 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8195
8196 /* FP/SIMD registers. */
8197 REGSET (v, VN), REGSET (V, VN),
8198
8199 /* SVE vector registers. */
8200 REGSET (z, ZN), REGSET (Z, ZN),
8201
8202 /* SVE predicate registers. */
8203 REGSET16 (p, PN), REGSET16 (P, PN),
8204
8205 /* SME ZA tile registers. */
8206 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8207
8208 /* SME ZA tile registers (horizontal slice). */
8209 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8210
8211 /* SME ZA tile registers (vertical slice). */
8212 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8213 };
8214
8215 #undef REGDEF
8216 #undef REGDEF_ALIAS
8217 #undef REGNUM
8218 #undef REGSET16
8219 #undef REGSET31
8220 #undef REGSET
8221
8222 #define N 1
8223 #define n 0
8224 #define Z 1
8225 #define z 0
8226 #define C 1
8227 #define c 0
8228 #define V 1
8229 #define v 0
8230 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8231 static const asm_nzcv nzcv_names[] = {
8232 {"nzcv", B (n, z, c, v)},
8233 {"nzcV", B (n, z, c, V)},
8234 {"nzCv", B (n, z, C, v)},
8235 {"nzCV", B (n, z, C, V)},
8236 {"nZcv", B (n, Z, c, v)},
8237 {"nZcV", B (n, Z, c, V)},
8238 {"nZCv", B (n, Z, C, v)},
8239 {"nZCV", B (n, Z, C, V)},
8240 {"Nzcv", B (N, z, c, v)},
8241 {"NzcV", B (N, z, c, V)},
8242 {"NzCv", B (N, z, C, v)},
8243 {"NzCV", B (N, z, C, V)},
8244 {"NZcv", B (N, Z, c, v)},
8245 {"NZcV", B (N, Z, c, V)},
8246 {"NZCv", B (N, Z, C, v)},
8247 {"NZCV", B (N, Z, C, V)}
8248 };
8249
8250 #undef N
8251 #undef n
8252 #undef Z
8253 #undef z
8254 #undef C
8255 #undef c
8256 #undef V
8257 #undef v
8258 #undef B
8259 \f
8260 /* MD interface: bits in the object file. */
8261
8262 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8263 for use in the a.out file, and stores them in the array pointed to by buf.
8264 This knows about the endian-ness of the target machine and does
8265 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8266 2 (short) and 4 (long) Floating numbers are put out as a series of
8267 LITTLENUMS (shorts, here at least). */
8268
8269 void
8270 md_number_to_chars (char *buf, valueT val, int n)
8271 {
8272 if (target_big_endian)
8273 number_to_chars_bigendian (buf, val, n);
8274 else
8275 number_to_chars_littleendian (buf, val, n);
8276 }
8277
8278 /* MD interface: Sections. */
8279
8280 /* Estimate the size of a frag before relaxing. Assume everything fits in
8281 4 bytes. */
8282
8283 int
8284 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8285 {
8286 fragp->fr_var = 4;
8287 return 4;
8288 }
8289
8290 /* Round up a section size to the appropriate boundary. */
8291
8292 valueT
8293 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8294 {
8295 return size;
8296 }
8297
8298 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8299 of an rs_align_code fragment.
8300
8301 Here we fill the frag with the appropriate info for padding the
8302 output stream. The resulting frag will consist of a fixed (fr_fix)
8303 and of a repeating (fr_var) part.
8304
8305 The fixed content is always emitted before the repeating content and
8306 these two parts are used as follows in constructing the output:
8307 - the fixed part will be used to align to a valid instruction word
8308 boundary, in case that we start at a misaligned address; as no
8309 executable instruction can live at the misaligned location, we
8310 simply fill with zeros;
8311 - the variable part will be used to cover the remaining padding and
8312 we fill using the AArch64 NOP instruction.
8313
8314 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8315 enough storage space for up to 3 bytes for padding the back to a valid
8316 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8317
8318 void
8319 aarch64_handle_align (fragS * fragP)
8320 {
8321 /* NOP = d503201f */
8322 /* AArch64 instructions are always little-endian. */
8323 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8324
8325 int bytes, fix, noop_size;
8326 char *p;
8327
8328 if (fragP->fr_type != rs_align_code)
8329 return;
8330
8331 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8332 p = fragP->fr_literal + fragP->fr_fix;
8333
8334 #ifdef OBJ_ELF
8335 gas_assert (fragP->tc_frag_data.recorded);
8336 #endif
8337
8338 noop_size = sizeof (aarch64_noop);
8339
8340 fix = bytes & (noop_size - 1);
8341 if (fix)
8342 {
8343 #ifdef OBJ_ELF
8344 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8345 #endif
8346 memset (p, 0, fix);
8347 p += fix;
8348 fragP->fr_fix += fix;
8349 }
8350
8351 if (noop_size)
8352 memcpy (p, aarch64_noop, noop_size);
8353 fragP->fr_var = noop_size;
8354 }
8355
8356 /* Perform target specific initialisation of a frag.
8357 Note - despite the name this initialisation is not done when the frag
8358 is created, but only when its type is assigned. A frag can be created
8359 and used a long time before its type is set, so beware of assuming that
8360 this initialisation is performed first. */
8361
8362 #ifndef OBJ_ELF
8363 void
8364 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8365 int max_chars ATTRIBUTE_UNUSED)
8366 {
8367 }
8368
8369 #else /* OBJ_ELF is defined. */
8370 void
8371 aarch64_init_frag (fragS * fragP, int max_chars)
8372 {
8373 /* Record a mapping symbol for alignment frags. We will delete this
8374 later if the alignment ends up empty. */
8375 if (!fragP->tc_frag_data.recorded)
8376 fragP->tc_frag_data.recorded = 1;
8377
8378 /* PR 21809: Do not set a mapping state for debug sections
8379 - it just confuses other tools. */
8380 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8381 return;
8382
8383 switch (fragP->fr_type)
8384 {
8385 case rs_align_test:
8386 case rs_fill:
8387 mapping_state_2 (MAP_DATA, max_chars);
8388 break;
8389 case rs_align:
8390 /* PR 20364: We can get alignment frags in code sections,
8391 so do not just assume that we should use the MAP_DATA state. */
8392 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8393 break;
8394 case rs_align_code:
8395 mapping_state_2 (MAP_INSN, max_chars);
8396 break;
8397 default:
8398 break;
8399 }
8400 }
8401 \f
8402 /* Initialize the DWARF-2 unwind information for this procedure. */
8403
8404 void
8405 tc_aarch64_frame_initial_instructions (void)
8406 {
8407 cfi_add_CFA_def_cfa (REG_SP, 0);
8408 }
8409 #endif /* OBJ_ELF */
8410
8411 /* Convert REGNAME to a DWARF-2 register number. */
8412
8413 int
8414 tc_aarch64_regname_to_dw2regnum (char *regname)
8415 {
8416 const reg_entry *reg = parse_reg (&regname);
8417 if (reg == NULL)
8418 return -1;
8419
8420 switch (reg->type)
8421 {
8422 case REG_TYPE_SP_32:
8423 case REG_TYPE_SP_64:
8424 case REG_TYPE_R_32:
8425 case REG_TYPE_R_64:
8426 return reg->number;
8427
8428 case REG_TYPE_FP_B:
8429 case REG_TYPE_FP_H:
8430 case REG_TYPE_FP_S:
8431 case REG_TYPE_FP_D:
8432 case REG_TYPE_FP_Q:
8433 return reg->number + 64;
8434
8435 default:
8436 break;
8437 }
8438 return -1;
8439 }
8440
8441 /* Implement DWARF2_ADDR_SIZE. */
8442
8443 int
8444 aarch64_dwarf2_addr_size (void)
8445 {
8446 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8447 if (ilp32_p)
8448 return 4;
8449 #endif
8450 return bfd_arch_bits_per_address (stdoutput) / 8;
8451 }
8452
8453 /* MD interface: Symbol and relocation handling. */
8454
8455 /* Return the address within the segment that a PC-relative fixup is
8456 relative to. For AArch64 PC-relative fixups applied to instructions
8457 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8458
8459 long
8460 md_pcrel_from_section (fixS * fixP, segT seg)
8461 {
8462 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8463
8464 /* If this is pc-relative and we are going to emit a relocation
8465 then we just want to put out any pipeline compensation that the linker
8466 will need. Otherwise we want to use the calculated base. */
8467 if (fixP->fx_pcrel
8468 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8469 || aarch64_force_relocation (fixP)))
8470 base = 0;
8471
8472 /* AArch64 should be consistent for all pc-relative relocations. */
8473 return base + AARCH64_PCREL_OFFSET;
8474 }
8475
8476 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8477 Otherwise we have no need to default values of symbols. */
8478
8479 symbolS *
8480 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8481 {
8482 #ifdef OBJ_ELF
8483 if (name[0] == '_' && name[1] == 'G'
8484 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8485 {
8486 if (!GOT_symbol)
8487 {
8488 if (symbol_find (name))
8489 as_bad (_("GOT already in the symbol table"));
8490
8491 GOT_symbol = symbol_new (name, undefined_section,
8492 &zero_address_frag, 0);
8493 }
8494
8495 return GOT_symbol;
8496 }
8497 #endif
8498
8499 return 0;
8500 }
8501
8502 /* Return non-zero if the indicated VALUE has overflowed the maximum
8503 range expressible by a unsigned number with the indicated number of
8504 BITS. */
8505
8506 static bool
8507 unsigned_overflow (valueT value, unsigned bits)
8508 {
8509 valueT lim;
8510 if (bits >= sizeof (valueT) * 8)
8511 return false;
8512 lim = (valueT) 1 << bits;
8513 return (value >= lim);
8514 }
8515
8516
8517 /* Return non-zero if the indicated VALUE has overflowed the maximum
8518 range expressible by an signed number with the indicated number of
8519 BITS. */
8520
8521 static bool
8522 signed_overflow (offsetT value, unsigned bits)
8523 {
8524 offsetT lim;
8525 if (bits >= sizeof (offsetT) * 8)
8526 return false;
8527 lim = (offsetT) 1 << (bits - 1);
8528 return (value < -lim || value >= lim);
8529 }
8530
8531 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8532 unsigned immediate offset load/store instruction, try to encode it as
8533 an unscaled, 9-bit, signed immediate offset load/store instruction.
8534 Return TRUE if it is successful; otherwise return FALSE.
8535
8536 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8537 in response to the standard LDR/STR mnemonics when the immediate offset is
8538 unambiguous, i.e. when it is negative or unaligned. */
8539
8540 static bool
8541 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8542 {
8543 int idx;
8544 enum aarch64_op new_op;
8545 const aarch64_opcode *new_opcode;
8546
8547 gas_assert (instr->opcode->iclass == ldst_pos);
8548
8549 switch (instr->opcode->op)
8550 {
8551 case OP_LDRB_POS:new_op = OP_LDURB; break;
8552 case OP_STRB_POS: new_op = OP_STURB; break;
8553 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8554 case OP_LDRH_POS: new_op = OP_LDURH; break;
8555 case OP_STRH_POS: new_op = OP_STURH; break;
8556 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8557 case OP_LDR_POS: new_op = OP_LDUR; break;
8558 case OP_STR_POS: new_op = OP_STUR; break;
8559 case OP_LDRF_POS: new_op = OP_LDURV; break;
8560 case OP_STRF_POS: new_op = OP_STURV; break;
8561 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8562 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8563 default: new_op = OP_NIL; break;
8564 }
8565
8566 if (new_op == OP_NIL)
8567 return false;
8568
8569 new_opcode = aarch64_get_opcode (new_op);
8570 gas_assert (new_opcode != NULL);
8571
8572 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8573 instr->opcode->op, new_opcode->op);
8574
8575 aarch64_replace_opcode (instr, new_opcode);
8576
8577 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8578 qualifier matching may fail because the out-of-date qualifier will
8579 prevent the operand being updated with a new and correct qualifier. */
8580 idx = aarch64_operand_index (instr->opcode->operands,
8581 AARCH64_OPND_ADDR_SIMM9);
8582 gas_assert (idx == 1);
8583 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8584
8585 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8586
8587 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8588 insn_sequence))
8589 return false;
8590
8591 return true;
8592 }
8593
8594 /* Called by fix_insn to fix a MOV immediate alias instruction.
8595
8596 Operand for a generic move immediate instruction, which is an alias
8597 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8598 a 32-bit/64-bit immediate value into general register. An assembler error
8599 shall result if the immediate cannot be created by a single one of these
8600 instructions. If there is a choice, then to ensure reversability an
8601 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8602
8603 static void
8604 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8605 {
8606 const aarch64_opcode *opcode;
8607
8608 /* Need to check if the destination is SP/ZR. The check has to be done
8609 before any aarch64_replace_opcode. */
8610 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8611 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8612
8613 instr->operands[1].imm.value = value;
8614 instr->operands[1].skip = 0;
8615
8616 if (try_mov_wide_p)
8617 {
8618 /* Try the MOVZ alias. */
8619 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8620 aarch64_replace_opcode (instr, opcode);
8621 if (aarch64_opcode_encode (instr->opcode, instr,
8622 &instr->value, NULL, NULL, insn_sequence))
8623 {
8624 put_aarch64_insn (buf, instr->value);
8625 return;
8626 }
8627 /* Try the MOVK alias. */
8628 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8629 aarch64_replace_opcode (instr, opcode);
8630 if (aarch64_opcode_encode (instr->opcode, instr,
8631 &instr->value, NULL, NULL, insn_sequence))
8632 {
8633 put_aarch64_insn (buf, instr->value);
8634 return;
8635 }
8636 }
8637
8638 if (try_mov_bitmask_p)
8639 {
8640 /* Try the ORR alias. */
8641 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8642 aarch64_replace_opcode (instr, opcode);
8643 if (aarch64_opcode_encode (instr->opcode, instr,
8644 &instr->value, NULL, NULL, insn_sequence))
8645 {
8646 put_aarch64_insn (buf, instr->value);
8647 return;
8648 }
8649 }
8650
8651 as_bad_where (fixP->fx_file, fixP->fx_line,
8652 _("immediate cannot be moved by a single instruction"));
8653 }
8654
8655 /* An instruction operand which is immediate related may have symbol used
8656 in the assembly, e.g.
8657
8658 mov w0, u32
8659 .set u32, 0x00ffff00
8660
8661 At the time when the assembly instruction is parsed, a referenced symbol,
8662 like 'u32' in the above example may not have been seen; a fixS is created
8663 in such a case and is handled here after symbols have been resolved.
8664 Instruction is fixed up with VALUE using the information in *FIXP plus
8665 extra information in FLAGS.
8666
8667 This function is called by md_apply_fix to fix up instructions that need
8668 a fix-up described above but does not involve any linker-time relocation. */
8669
8670 static void
8671 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8672 {
8673 int idx;
8674 uint32_t insn;
8675 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8676 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8677 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8678
8679 if (new_inst)
8680 {
8681 /* Now the instruction is about to be fixed-up, so the operand that
8682 was previously marked as 'ignored' needs to be unmarked in order
8683 to get the encoding done properly. */
8684 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8685 new_inst->operands[idx].skip = 0;
8686 }
8687
8688 gas_assert (opnd != AARCH64_OPND_NIL);
8689
8690 switch (opnd)
8691 {
8692 case AARCH64_OPND_EXCEPTION:
8693 case AARCH64_OPND_UNDEFINED:
8694 if (unsigned_overflow (value, 16))
8695 as_bad_where (fixP->fx_file, fixP->fx_line,
8696 _("immediate out of range"));
8697 insn = get_aarch64_insn (buf);
8698 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8699 put_aarch64_insn (buf, insn);
8700 break;
8701
8702 case AARCH64_OPND_AIMM:
8703 /* ADD or SUB with immediate.
8704 NOTE this assumes we come here with a add/sub shifted reg encoding
8705 3 322|2222|2 2 2 21111 111111
8706 1 098|7654|3 2 1 09876 543210 98765 43210
8707 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8708 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8709 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8710 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8711 ->
8712 3 322|2222|2 2 221111111111
8713 1 098|7654|3 2 109876543210 98765 43210
8714 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8715 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8716 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8717 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8718 Fields sf Rn Rd are already set. */
8719 insn = get_aarch64_insn (buf);
8720 if (value < 0)
8721 {
8722 /* Add <-> sub. */
8723 insn = reencode_addsub_switch_add_sub (insn);
8724 value = -value;
8725 }
8726
8727 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8728 && unsigned_overflow (value, 12))
8729 {
8730 /* Try to shift the value by 12 to make it fit. */
8731 if (((value >> 12) << 12) == value
8732 && ! unsigned_overflow (value, 12 + 12))
8733 {
8734 value >>= 12;
8735 insn |= encode_addsub_imm_shift_amount (1);
8736 }
8737 }
8738
8739 if (unsigned_overflow (value, 12))
8740 as_bad_where (fixP->fx_file, fixP->fx_line,
8741 _("immediate out of range"));
8742
8743 insn |= encode_addsub_imm (value);
8744
8745 put_aarch64_insn (buf, insn);
8746 break;
8747
8748 case AARCH64_OPND_SIMD_IMM:
8749 case AARCH64_OPND_SIMD_IMM_SFT:
8750 case AARCH64_OPND_LIMM:
8751 /* Bit mask immediate. */
8752 gas_assert (new_inst != NULL);
8753 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8754 new_inst->operands[idx].imm.value = value;
8755 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8756 &new_inst->value, NULL, NULL, insn_sequence))
8757 put_aarch64_insn (buf, new_inst->value);
8758 else
8759 as_bad_where (fixP->fx_file, fixP->fx_line,
8760 _("invalid immediate"));
8761 break;
8762
8763 case AARCH64_OPND_HALF:
8764 /* 16-bit unsigned immediate. */
8765 if (unsigned_overflow (value, 16))
8766 as_bad_where (fixP->fx_file, fixP->fx_line,
8767 _("immediate out of range"));
8768 insn = get_aarch64_insn (buf);
8769 insn |= encode_movw_imm (value & 0xffff);
8770 put_aarch64_insn (buf, insn);
8771 break;
8772
8773 case AARCH64_OPND_IMM_MOV:
8774 /* Operand for a generic move immediate instruction, which is
8775 an alias instruction that generates a single MOVZ, MOVN or ORR
8776 instruction to loads a 32-bit/64-bit immediate value into general
8777 register. An assembler error shall result if the immediate cannot be
8778 created by a single one of these instructions. If there is a choice,
8779 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8780 and MOVZ or MOVN to ORR. */
8781 gas_assert (new_inst != NULL);
8782 fix_mov_imm_insn (fixP, buf, new_inst, value);
8783 break;
8784
8785 case AARCH64_OPND_ADDR_SIMM7:
8786 case AARCH64_OPND_ADDR_SIMM9:
8787 case AARCH64_OPND_ADDR_SIMM9_2:
8788 case AARCH64_OPND_ADDR_SIMM10:
8789 case AARCH64_OPND_ADDR_UIMM12:
8790 case AARCH64_OPND_ADDR_SIMM11:
8791 case AARCH64_OPND_ADDR_SIMM13:
8792 /* Immediate offset in an address. */
8793 insn = get_aarch64_insn (buf);
8794
8795 gas_assert (new_inst != NULL && new_inst->value == insn);
8796 gas_assert (new_inst->opcode->operands[1] == opnd
8797 || new_inst->opcode->operands[2] == opnd);
8798
8799 /* Get the index of the address operand. */
8800 if (new_inst->opcode->operands[1] == opnd)
8801 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8802 idx = 1;
8803 else
8804 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8805 idx = 2;
8806
8807 /* Update the resolved offset value. */
8808 new_inst->operands[idx].addr.offset.imm = value;
8809
8810 /* Encode/fix-up. */
8811 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8812 &new_inst->value, NULL, NULL, insn_sequence))
8813 {
8814 put_aarch64_insn (buf, new_inst->value);
8815 break;
8816 }
8817 else if (new_inst->opcode->iclass == ldst_pos
8818 && try_to_encode_as_unscaled_ldst (new_inst))
8819 {
8820 put_aarch64_insn (buf, new_inst->value);
8821 break;
8822 }
8823
8824 as_bad_where (fixP->fx_file, fixP->fx_line,
8825 _("immediate offset out of range"));
8826 break;
8827
8828 default:
8829 gas_assert (0);
8830 as_fatal (_("unhandled operand code %d"), opnd);
8831 }
8832 }
8833
8834 /* Apply a fixup (fixP) to segment data, once it has been determined
8835 by our caller that we have all the info we need to fix it up.
8836
8837 Parameter valP is the pointer to the value of the bits. */
8838
8839 void
8840 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8841 {
8842 offsetT value = *valP;
8843 uint32_t insn;
8844 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8845 int scale;
8846 unsigned flags = fixP->fx_addnumber;
8847
8848 DEBUG_TRACE ("\n\n");
8849 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8850 DEBUG_TRACE ("Enter md_apply_fix");
8851
8852 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8853
8854 /* Note whether this will delete the relocation. */
8855
8856 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8857 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8858 fixP->fx_done = 1;
8859
8860 /* Process the relocations. */
8861 switch (fixP->fx_r_type)
8862 {
8863 case BFD_RELOC_NONE:
8864 /* This will need to go in the object file. */
8865 fixP->fx_done = 0;
8866 break;
8867
8868 case BFD_RELOC_8:
8869 case BFD_RELOC_8_PCREL:
8870 if (fixP->fx_done || !seg->use_rela_p)
8871 md_number_to_chars (buf, value, 1);
8872 break;
8873
8874 case BFD_RELOC_16:
8875 case BFD_RELOC_16_PCREL:
8876 if (fixP->fx_done || !seg->use_rela_p)
8877 md_number_to_chars (buf, value, 2);
8878 break;
8879
8880 case BFD_RELOC_32:
8881 case BFD_RELOC_32_PCREL:
8882 if (fixP->fx_done || !seg->use_rela_p)
8883 md_number_to_chars (buf, value, 4);
8884 break;
8885
8886 case BFD_RELOC_64:
8887 case BFD_RELOC_64_PCREL:
8888 if (fixP->fx_done || !seg->use_rela_p)
8889 md_number_to_chars (buf, value, 8);
8890 break;
8891
8892 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8893 /* We claim that these fixups have been processed here, even if
8894 in fact we generate an error because we do not have a reloc
8895 for them, so tc_gen_reloc() will reject them. */
8896 fixP->fx_done = 1;
8897 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8898 {
8899 as_bad_where (fixP->fx_file, fixP->fx_line,
8900 _("undefined symbol %s used as an immediate value"),
8901 S_GET_NAME (fixP->fx_addsy));
8902 goto apply_fix_return;
8903 }
8904 fix_insn (fixP, flags, value);
8905 break;
8906
8907 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8908 if (fixP->fx_done || !seg->use_rela_p)
8909 {
8910 if (value & 3)
8911 as_bad_where (fixP->fx_file, fixP->fx_line,
8912 _("pc-relative load offset not word aligned"));
8913 if (signed_overflow (value, 21))
8914 as_bad_where (fixP->fx_file, fixP->fx_line,
8915 _("pc-relative load offset out of range"));
8916 insn = get_aarch64_insn (buf);
8917 insn |= encode_ld_lit_ofs_19 (value >> 2);
8918 put_aarch64_insn (buf, insn);
8919 }
8920 break;
8921
8922 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8923 if (fixP->fx_done || !seg->use_rela_p)
8924 {
8925 if (signed_overflow (value, 21))
8926 as_bad_where (fixP->fx_file, fixP->fx_line,
8927 _("pc-relative address offset out of range"));
8928 insn = get_aarch64_insn (buf);
8929 insn |= encode_adr_imm (value);
8930 put_aarch64_insn (buf, insn);
8931 }
8932 break;
8933
8934 case BFD_RELOC_AARCH64_BRANCH19:
8935 if (fixP->fx_done || !seg->use_rela_p)
8936 {
8937 if (value & 3)
8938 as_bad_where (fixP->fx_file, fixP->fx_line,
8939 _("conditional branch target not word aligned"));
8940 if (signed_overflow (value, 21))
8941 as_bad_where (fixP->fx_file, fixP->fx_line,
8942 _("conditional branch out of range"));
8943 insn = get_aarch64_insn (buf);
8944 insn |= encode_cond_branch_ofs_19 (value >> 2);
8945 put_aarch64_insn (buf, insn);
8946 }
8947 break;
8948
8949 case BFD_RELOC_AARCH64_TSTBR14:
8950 if (fixP->fx_done || !seg->use_rela_p)
8951 {
8952 if (value & 3)
8953 as_bad_where (fixP->fx_file, fixP->fx_line,
8954 _("conditional branch target not word aligned"));
8955 if (signed_overflow (value, 16))
8956 as_bad_where (fixP->fx_file, fixP->fx_line,
8957 _("conditional branch out of range"));
8958 insn = get_aarch64_insn (buf);
8959 insn |= encode_tst_branch_ofs_14 (value >> 2);
8960 put_aarch64_insn (buf, insn);
8961 }
8962 break;
8963
8964 case BFD_RELOC_AARCH64_CALL26:
8965 case BFD_RELOC_AARCH64_JUMP26:
8966 if (fixP->fx_done || !seg->use_rela_p)
8967 {
8968 if (value & 3)
8969 as_bad_where (fixP->fx_file, fixP->fx_line,
8970 _("branch target not word aligned"));
8971 if (signed_overflow (value, 28))
8972 as_bad_where (fixP->fx_file, fixP->fx_line,
8973 _("branch out of range"));
8974 insn = get_aarch64_insn (buf);
8975 insn |= encode_branch_ofs_26 (value >> 2);
8976 put_aarch64_insn (buf, insn);
8977 }
8978 break;
8979
8980 case BFD_RELOC_AARCH64_MOVW_G0:
8981 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8982 case BFD_RELOC_AARCH64_MOVW_G0_S:
8983 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8984 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8985 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8986 scale = 0;
8987 goto movw_common;
8988 case BFD_RELOC_AARCH64_MOVW_G1:
8989 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8990 case BFD_RELOC_AARCH64_MOVW_G1_S:
8991 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8992 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8993 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8994 scale = 16;
8995 goto movw_common;
8996 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8997 scale = 0;
8998 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8999 /* Should always be exported to object file, see
9000 aarch64_force_relocation(). */
9001 gas_assert (!fixP->fx_done);
9002 gas_assert (seg->use_rela_p);
9003 goto movw_common;
9004 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9005 scale = 16;
9006 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9007 /* Should always be exported to object file, see
9008 aarch64_force_relocation(). */
9009 gas_assert (!fixP->fx_done);
9010 gas_assert (seg->use_rela_p);
9011 goto movw_common;
9012 case BFD_RELOC_AARCH64_MOVW_G2:
9013 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9014 case BFD_RELOC_AARCH64_MOVW_G2_S:
9015 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9016 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9017 scale = 32;
9018 goto movw_common;
9019 case BFD_RELOC_AARCH64_MOVW_G3:
9020 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9021 scale = 48;
9022 movw_common:
9023 if (fixP->fx_done || !seg->use_rela_p)
9024 {
9025 insn = get_aarch64_insn (buf);
9026
9027 if (!fixP->fx_done)
9028 {
9029 /* REL signed addend must fit in 16 bits */
9030 if (signed_overflow (value, 16))
9031 as_bad_where (fixP->fx_file, fixP->fx_line,
9032 _("offset out of range"));
9033 }
9034 else
9035 {
9036 /* Check for overflow and scale. */
9037 switch (fixP->fx_r_type)
9038 {
9039 case BFD_RELOC_AARCH64_MOVW_G0:
9040 case BFD_RELOC_AARCH64_MOVW_G1:
9041 case BFD_RELOC_AARCH64_MOVW_G2:
9042 case BFD_RELOC_AARCH64_MOVW_G3:
9043 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9044 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9045 if (unsigned_overflow (value, scale + 16))
9046 as_bad_where (fixP->fx_file, fixP->fx_line,
9047 _("unsigned value out of range"));
9048 break;
9049 case BFD_RELOC_AARCH64_MOVW_G0_S:
9050 case BFD_RELOC_AARCH64_MOVW_G1_S:
9051 case BFD_RELOC_AARCH64_MOVW_G2_S:
9052 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9053 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9054 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9055 /* NOTE: We can only come here with movz or movn. */
9056 if (signed_overflow (value, scale + 16))
9057 as_bad_where (fixP->fx_file, fixP->fx_line,
9058 _("signed value out of range"));
9059 if (value < 0)
9060 {
9061 /* Force use of MOVN. */
9062 value = ~value;
9063 insn = reencode_movzn_to_movn (insn);
9064 }
9065 else
9066 {
9067 /* Force use of MOVZ. */
9068 insn = reencode_movzn_to_movz (insn);
9069 }
9070 break;
9071 default:
9072 /* Unchecked relocations. */
9073 break;
9074 }
9075 value >>= scale;
9076 }
9077
9078 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9079 insn |= encode_movw_imm (value & 0xffff);
9080
9081 put_aarch64_insn (buf, insn);
9082 }
9083 break;
9084
9085 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9086 fixP->fx_r_type = (ilp32_p
9087 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9088 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9089 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9090 /* Should always be exported to object file, see
9091 aarch64_force_relocation(). */
9092 gas_assert (!fixP->fx_done);
9093 gas_assert (seg->use_rela_p);
9094 break;
9095
9096 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9097 fixP->fx_r_type = (ilp32_p
9098 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9099 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9100 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9101 /* Should always be exported to object file, see
9102 aarch64_force_relocation(). */
9103 gas_assert (!fixP->fx_done);
9104 gas_assert (seg->use_rela_p);
9105 break;
9106
9107 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9108 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9109 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9110 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9111 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9112 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9113 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9114 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9115 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9116 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9117 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9118 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9119 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9120 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9121 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9122 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9123 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9125 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9126 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9127 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9128 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9129 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9130 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9131 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9132 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9133 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9134 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9135 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9136 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9137 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9141 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9142 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9143 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9144 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9145 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9146 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9147 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9148 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9149 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9150 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9152 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9153 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9157 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9158 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9159 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9160 /* Should always be exported to object file, see
9161 aarch64_force_relocation(). */
9162 gas_assert (!fixP->fx_done);
9163 gas_assert (seg->use_rela_p);
9164 break;
9165
9166 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9167 /* Should always be exported to object file, see
9168 aarch64_force_relocation(). */
9169 fixP->fx_r_type = (ilp32_p
9170 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9171 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9172 gas_assert (!fixP->fx_done);
9173 gas_assert (seg->use_rela_p);
9174 break;
9175
9176 case BFD_RELOC_AARCH64_ADD_LO12:
9177 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9178 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9179 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9180 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9181 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9182 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9183 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9184 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9185 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9186 case BFD_RELOC_AARCH64_LDST128_LO12:
9187 case BFD_RELOC_AARCH64_LDST16_LO12:
9188 case BFD_RELOC_AARCH64_LDST32_LO12:
9189 case BFD_RELOC_AARCH64_LDST64_LO12:
9190 case BFD_RELOC_AARCH64_LDST8_LO12:
9191 /* Should always be exported to object file, see
9192 aarch64_force_relocation(). */
9193 gas_assert (!fixP->fx_done);
9194 gas_assert (seg->use_rela_p);
9195 break;
9196
9197 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9198 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9199 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9200 break;
9201
9202 case BFD_RELOC_UNUSED:
9203 /* An error will already have been reported. */
9204 break;
9205
9206 default:
9207 as_bad_where (fixP->fx_file, fixP->fx_line,
9208 _("unexpected %s fixup"),
9209 bfd_get_reloc_code_name (fixP->fx_r_type));
9210 break;
9211 }
9212
9213 apply_fix_return:
9214 /* Free the allocated the struct aarch64_inst.
9215 N.B. currently there are very limited number of fix-up types actually use
9216 this field, so the impact on the performance should be minimal . */
9217 free (fixP->tc_fix_data.inst);
9218
9219 return;
9220 }
9221
9222 /* Translate internal representation of relocation info to BFD target
9223 format. */
9224
9225 arelent *
9226 tc_gen_reloc (asection * section, fixS * fixp)
9227 {
9228 arelent *reloc;
9229 bfd_reloc_code_real_type code;
9230
9231 reloc = XNEW (arelent);
9232
9233 reloc->sym_ptr_ptr = XNEW (asymbol *);
9234 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9235 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9236
9237 if (fixp->fx_pcrel)
9238 {
9239 if (section->use_rela_p)
9240 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9241 else
9242 fixp->fx_offset = reloc->address;
9243 }
9244 reloc->addend = fixp->fx_offset;
9245
9246 code = fixp->fx_r_type;
9247 switch (code)
9248 {
9249 case BFD_RELOC_16:
9250 if (fixp->fx_pcrel)
9251 code = BFD_RELOC_16_PCREL;
9252 break;
9253
9254 case BFD_RELOC_32:
9255 if (fixp->fx_pcrel)
9256 code = BFD_RELOC_32_PCREL;
9257 break;
9258
9259 case BFD_RELOC_64:
9260 if (fixp->fx_pcrel)
9261 code = BFD_RELOC_64_PCREL;
9262 break;
9263
9264 default:
9265 break;
9266 }
9267
9268 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9269 if (reloc->howto == NULL)
9270 {
9271 as_bad_where (fixp->fx_file, fixp->fx_line,
9272 _
9273 ("cannot represent %s relocation in this object file format"),
9274 bfd_get_reloc_code_name (code));
9275 return NULL;
9276 }
9277
9278 return reloc;
9279 }
9280
9281 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9282
9283 void
9284 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9285 {
9286 bfd_reloc_code_real_type type;
9287 int pcrel = 0;
9288
9289 /* Pick a reloc.
9290 FIXME: @@ Should look at CPU word size. */
9291 switch (size)
9292 {
9293 case 1:
9294 type = BFD_RELOC_8;
9295 break;
9296 case 2:
9297 type = BFD_RELOC_16;
9298 break;
9299 case 4:
9300 type = BFD_RELOC_32;
9301 break;
9302 case 8:
9303 type = BFD_RELOC_64;
9304 break;
9305 default:
9306 as_bad (_("cannot do %u-byte relocation"), size);
9307 type = BFD_RELOC_UNUSED;
9308 break;
9309 }
9310
9311 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9312 }
9313
9314 #ifdef OBJ_ELF
9315
9316 /* Implement md_after_parse_args. This is the earliest time we need to decide
9317 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9318
9319 void
9320 aarch64_after_parse_args (void)
9321 {
9322 if (aarch64_abi != AARCH64_ABI_NONE)
9323 return;
9324
9325 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9326 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9327 aarch64_abi = AARCH64_ABI_ILP32;
9328 else
9329 aarch64_abi = AARCH64_ABI_LP64;
9330 }
9331
9332 const char *
9333 elf64_aarch64_target_format (void)
9334 {
9335 #ifdef TE_CLOUDABI
9336 /* FIXME: What to do for ilp32_p ? */
9337 if (target_big_endian)
9338 return "elf64-bigaarch64-cloudabi";
9339 else
9340 return "elf64-littleaarch64-cloudabi";
9341 #else
9342 if (target_big_endian)
9343 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9344 else
9345 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9346 #endif
9347 }
9348
9349 void
9350 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9351 {
9352 elf_frob_symbol (symp, puntp);
9353 }
9354 #endif
9355
9356 /* MD interface: Finalization. */
9357
9358 /* A good place to do this, although this was probably not intended
9359 for this kind of use. We need to dump the literal pool before
9360 references are made to a null symbol pointer. */
9361
9362 void
9363 aarch64_cleanup (void)
9364 {
9365 literal_pool *pool;
9366
9367 for (pool = list_of_pools; pool; pool = pool->next)
9368 {
9369 /* Put it at the end of the relevant section. */
9370 subseg_set (pool->section, pool->sub_section);
9371 s_ltorg (0);
9372 }
9373 }
9374
9375 #ifdef OBJ_ELF
9376 /* Remove any excess mapping symbols generated for alignment frags in
9377 SEC. We may have created a mapping symbol before a zero byte
9378 alignment; remove it if there's a mapping symbol after the
9379 alignment. */
9380 static void
9381 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9382 void *dummy ATTRIBUTE_UNUSED)
9383 {
9384 segment_info_type *seginfo = seg_info (sec);
9385 fragS *fragp;
9386
9387 if (seginfo == NULL || seginfo->frchainP == NULL)
9388 return;
9389
9390 for (fragp = seginfo->frchainP->frch_root;
9391 fragp != NULL; fragp = fragp->fr_next)
9392 {
9393 symbolS *sym = fragp->tc_frag_data.last_map;
9394 fragS *next = fragp->fr_next;
9395
9396 /* Variable-sized frags have been converted to fixed size by
9397 this point. But if this was variable-sized to start with,
9398 there will be a fixed-size frag after it. So don't handle
9399 next == NULL. */
9400 if (sym == NULL || next == NULL)
9401 continue;
9402
9403 if (S_GET_VALUE (sym) < next->fr_address)
9404 /* Not at the end of this frag. */
9405 continue;
9406 know (S_GET_VALUE (sym) == next->fr_address);
9407
9408 do
9409 {
9410 if (next->tc_frag_data.first_map != NULL)
9411 {
9412 /* Next frag starts with a mapping symbol. Discard this
9413 one. */
9414 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9415 break;
9416 }
9417
9418 if (next->fr_next == NULL)
9419 {
9420 /* This mapping symbol is at the end of the section. Discard
9421 it. */
9422 know (next->fr_fix == 0 && next->fr_var == 0);
9423 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9424 break;
9425 }
9426
9427 /* As long as we have empty frags without any mapping symbols,
9428 keep looking. */
9429 /* If the next frag is non-empty and does not start with a
9430 mapping symbol, then this mapping symbol is required. */
9431 if (next->fr_address != next->fr_next->fr_address)
9432 break;
9433
9434 next = next->fr_next;
9435 }
9436 while (next != NULL);
9437 }
9438 }
9439 #endif
9440
9441 /* Adjust the symbol table. */
9442
9443 void
9444 aarch64_adjust_symtab (void)
9445 {
9446 #ifdef OBJ_ELF
9447 /* Remove any overlapping mapping symbols generated by alignment frags. */
9448 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9449 /* Now do generic ELF adjustments. */
9450 elf_adjust_symtab ();
9451 #endif
9452 }
9453
9454 static void
9455 checked_hash_insert (htab_t table, const char *key, void *value)
9456 {
9457 str_hash_insert (table, key, value, 0);
9458 }
9459
9460 static void
9461 sysreg_hash_insert (htab_t table, const char *key, void *value)
9462 {
9463 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9464 checked_hash_insert (table, key, value);
9465 }
9466
9467 static void
9468 fill_instruction_hash_table (void)
9469 {
9470 const aarch64_opcode *opcode = aarch64_opcode_table;
9471
9472 while (opcode->name != NULL)
9473 {
9474 templates *templ, *new_templ;
9475 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9476
9477 new_templ = XNEW (templates);
9478 new_templ->opcode = opcode;
9479 new_templ->next = NULL;
9480
9481 if (!templ)
9482 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9483 else
9484 {
9485 new_templ->next = templ->next;
9486 templ->next = new_templ;
9487 }
9488 ++opcode;
9489 }
9490 }
9491
9492 static inline void
9493 convert_to_upper (char *dst, const char *src, size_t num)
9494 {
9495 unsigned int i;
9496 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9497 *dst = TOUPPER (*src);
9498 *dst = '\0';
9499 }
9500
9501 /* Assume STR point to a lower-case string, allocate, convert and return
9502 the corresponding upper-case string. */
9503 static inline const char*
9504 get_upper_str (const char *str)
9505 {
9506 char *ret;
9507 size_t len = strlen (str);
9508 ret = XNEWVEC (char, len + 1);
9509 convert_to_upper (ret, str, len);
9510 return ret;
9511 }
9512
9513 /* MD interface: Initialization. */
9514
9515 void
9516 md_begin (void)
9517 {
9518 unsigned mach;
9519 unsigned int i;
9520
9521 aarch64_ops_hsh = str_htab_create ();
9522 aarch64_cond_hsh = str_htab_create ();
9523 aarch64_shift_hsh = str_htab_create ();
9524 aarch64_sys_regs_hsh = str_htab_create ();
9525 aarch64_pstatefield_hsh = str_htab_create ();
9526 aarch64_sys_regs_ic_hsh = str_htab_create ();
9527 aarch64_sys_regs_dc_hsh = str_htab_create ();
9528 aarch64_sys_regs_at_hsh = str_htab_create ();
9529 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9530 aarch64_sys_regs_sr_hsh = str_htab_create ();
9531 aarch64_reg_hsh = str_htab_create ();
9532 aarch64_barrier_opt_hsh = str_htab_create ();
9533 aarch64_nzcv_hsh = str_htab_create ();
9534 aarch64_pldop_hsh = str_htab_create ();
9535 aarch64_hint_opt_hsh = str_htab_create ();
9536
9537 fill_instruction_hash_table ();
9538
9539 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9540 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9541 (void *) (aarch64_sys_regs + i));
9542
9543 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9544 sysreg_hash_insert (aarch64_pstatefield_hsh,
9545 aarch64_pstatefields[i].name,
9546 (void *) (aarch64_pstatefields + i));
9547
9548 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9549 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9550 aarch64_sys_regs_ic[i].name,
9551 (void *) (aarch64_sys_regs_ic + i));
9552
9553 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9554 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9555 aarch64_sys_regs_dc[i].name,
9556 (void *) (aarch64_sys_regs_dc + i));
9557
9558 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9559 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9560 aarch64_sys_regs_at[i].name,
9561 (void *) (aarch64_sys_regs_at + i));
9562
9563 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9564 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9565 aarch64_sys_regs_tlbi[i].name,
9566 (void *) (aarch64_sys_regs_tlbi + i));
9567
9568 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9569 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9570 aarch64_sys_regs_sr[i].name,
9571 (void *) (aarch64_sys_regs_sr + i));
9572
9573 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9574 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9575 (void *) (reg_names + i));
9576
9577 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9578 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9579 (void *) (nzcv_names + i));
9580
9581 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9582 {
9583 const char *name = aarch64_operand_modifiers[i].name;
9584 checked_hash_insert (aarch64_shift_hsh, name,
9585 (void *) (aarch64_operand_modifiers + i));
9586 /* Also hash the name in the upper case. */
9587 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9588 (void *) (aarch64_operand_modifiers + i));
9589 }
9590
9591 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9592 {
9593 unsigned int j;
9594 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9595 the same condition code. */
9596 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9597 {
9598 const char *name = aarch64_conds[i].names[j];
9599 if (name == NULL)
9600 break;
9601 checked_hash_insert (aarch64_cond_hsh, name,
9602 (void *) (aarch64_conds + i));
9603 /* Also hash the name in the upper case. */
9604 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9605 (void *) (aarch64_conds + i));
9606 }
9607 }
9608
9609 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9610 {
9611 const char *name = aarch64_barrier_options[i].name;
9612 /* Skip xx00 - the unallocated values of option. */
9613 if ((i & 0x3) == 0)
9614 continue;
9615 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9616 (void *) (aarch64_barrier_options + i));
9617 /* Also hash the name in the upper case. */
9618 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9619 (void *) (aarch64_barrier_options + i));
9620 }
9621
9622 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9623 {
9624 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9625 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9626 (void *) (aarch64_barrier_dsb_nxs_options + i));
9627 /* Also hash the name in the upper case. */
9628 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9629 (void *) (aarch64_barrier_dsb_nxs_options + i));
9630 }
9631
9632 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9633 {
9634 const char* name = aarch64_prfops[i].name;
9635 /* Skip the unallocated hint encodings. */
9636 if (name == NULL)
9637 continue;
9638 checked_hash_insert (aarch64_pldop_hsh, name,
9639 (void *) (aarch64_prfops + i));
9640 /* Also hash the name in the upper case. */
9641 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9642 (void *) (aarch64_prfops + i));
9643 }
9644
9645 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9646 {
9647 const char* name = aarch64_hint_options[i].name;
9648 const char* upper_name = get_upper_str(name);
9649
9650 checked_hash_insert (aarch64_hint_opt_hsh, name,
9651 (void *) (aarch64_hint_options + i));
9652
9653 /* Also hash the name in the upper case if not the same. */
9654 if (strcmp (name, upper_name) != 0)
9655 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9656 (void *) (aarch64_hint_options + i));
9657 }
9658
9659 /* Set the cpu variant based on the command-line options. */
9660 if (!mcpu_cpu_opt)
9661 mcpu_cpu_opt = march_cpu_opt;
9662
9663 if (!mcpu_cpu_opt)
9664 mcpu_cpu_opt = &cpu_default;
9665
9666 cpu_variant = *mcpu_cpu_opt;
9667
9668 /* Record the CPU type. */
9669 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9670
9671 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9672 }
9673
9674 /* Command line processing. */
9675
9676 const char *md_shortopts = "m:";
9677
9678 #ifdef AARCH64_BI_ENDIAN
9679 #define OPTION_EB (OPTION_MD_BASE + 0)
9680 #define OPTION_EL (OPTION_MD_BASE + 1)
9681 #else
9682 #if TARGET_BYTES_BIG_ENDIAN
9683 #define OPTION_EB (OPTION_MD_BASE + 0)
9684 #else
9685 #define OPTION_EL (OPTION_MD_BASE + 1)
9686 #endif
9687 #endif
9688
9689 struct option md_longopts[] = {
9690 #ifdef OPTION_EB
9691 {"EB", no_argument, NULL, OPTION_EB},
9692 #endif
9693 #ifdef OPTION_EL
9694 {"EL", no_argument, NULL, OPTION_EL},
9695 #endif
9696 {NULL, no_argument, NULL, 0}
9697 };
9698
9699 size_t md_longopts_size = sizeof (md_longopts);
9700
9701 struct aarch64_option_table
9702 {
9703 const char *option; /* Option name to match. */
9704 const char *help; /* Help information. */
9705 int *var; /* Variable to change. */
9706 int value; /* What to change it to. */
9707 char *deprecated; /* If non-null, print this message. */
9708 };
9709
9710 static struct aarch64_option_table aarch64_opts[] = {
9711 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9712 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9713 NULL},
9714 #ifdef DEBUG_AARCH64
9715 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9716 #endif /* DEBUG_AARCH64 */
9717 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9718 NULL},
9719 {"mno-verbose-error", N_("do not output verbose error messages"),
9720 &verbose_error_p, 0, NULL},
9721 {NULL, NULL, NULL, 0, NULL}
9722 };
9723
9724 struct aarch64_cpu_option_table
9725 {
9726 const char *name;
9727 const aarch64_feature_set value;
9728 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9729 case. */
9730 const char *canonical_name;
9731 };
9732
9733 /* This list should, at a minimum, contain all the cpu names
9734 recognized by GCC. */
9735 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9736 {"all", AARCH64_ANY, NULL},
9737 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9738 AARCH64_FEATURE_CRC), "Cortex-A34"},
9739 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9740 AARCH64_FEATURE_CRC), "Cortex-A35"},
9741 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9742 AARCH64_FEATURE_CRC), "Cortex-A53"},
9743 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9744 AARCH64_FEATURE_CRC), "Cortex-A57"},
9745 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9746 AARCH64_FEATURE_CRC), "Cortex-A72"},
9747 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9748 AARCH64_FEATURE_CRC), "Cortex-A73"},
9749 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9750 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9751 "Cortex-A55"},
9752 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9753 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9754 "Cortex-A75"},
9755 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9756 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9757 "Cortex-A76"},
9758 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9759 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9760 | AARCH64_FEATURE_DOTPROD
9761 | AARCH64_FEATURE_SSBS),
9762 "Cortex-A76AE"},
9763 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9764 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9765 | AARCH64_FEATURE_DOTPROD
9766 | AARCH64_FEATURE_SSBS),
9767 "Cortex-A77"},
9768 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9769 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9770 | AARCH64_FEATURE_DOTPROD
9771 | AARCH64_FEATURE_SSBS),
9772 "Cortex-A65"},
9773 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9774 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9775 | AARCH64_FEATURE_DOTPROD
9776 | AARCH64_FEATURE_SSBS),
9777 "Cortex-A65AE"},
9778 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9779 AARCH64_FEATURE_F16
9780 | AARCH64_FEATURE_RCPC
9781 | AARCH64_FEATURE_DOTPROD
9782 | AARCH64_FEATURE_SSBS
9783 | AARCH64_FEATURE_PROFILE),
9784 "Cortex-A78"},
9785 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9786 AARCH64_FEATURE_F16
9787 | AARCH64_FEATURE_RCPC
9788 | AARCH64_FEATURE_DOTPROD
9789 | AARCH64_FEATURE_SSBS
9790 | AARCH64_FEATURE_PROFILE),
9791 "Cortex-A78AE"},
9792 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9793 AARCH64_FEATURE_DOTPROD
9794 | AARCH64_FEATURE_F16
9795 | AARCH64_FEATURE_FLAGM
9796 | AARCH64_FEATURE_PAC
9797 | AARCH64_FEATURE_PROFILE
9798 | AARCH64_FEATURE_RCPC
9799 | AARCH64_FEATURE_SSBS),
9800 "Cortex-A78C"},
9801 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9802 AARCH64_FEATURE_BFLOAT16
9803 | AARCH64_FEATURE_I8MM
9804 | AARCH64_FEATURE_MEMTAG
9805 | AARCH64_FEATURE_SVE2_BITPERM),
9806 "Cortex-A510"},
9807 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9808 AARCH64_FEATURE_BFLOAT16
9809 | AARCH64_FEATURE_I8MM
9810 | AARCH64_FEATURE_MEMTAG
9811 | AARCH64_FEATURE_SVE2_BITPERM),
9812 "Cortex-A710"},
9813 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9814 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9815 | AARCH64_FEATURE_DOTPROD
9816 | AARCH64_FEATURE_PROFILE),
9817 "Ares"},
9818 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9819 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9820 "Samsung Exynos M1"},
9821 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9822 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9823 | AARCH64_FEATURE_RDMA),
9824 "Qualcomm Falkor"},
9825 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9826 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9827 | AARCH64_FEATURE_DOTPROD
9828 | AARCH64_FEATURE_SSBS),
9829 "Neoverse E1"},
9830 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9831 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9832 | AARCH64_FEATURE_DOTPROD
9833 | AARCH64_FEATURE_PROFILE),
9834 "Neoverse N1"},
9835 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9836 AARCH64_FEATURE_BFLOAT16
9837 | AARCH64_FEATURE_I8MM
9838 | AARCH64_FEATURE_F16
9839 | AARCH64_FEATURE_SVE
9840 | AARCH64_FEATURE_SVE2
9841 | AARCH64_FEATURE_SVE2_BITPERM
9842 | AARCH64_FEATURE_MEMTAG
9843 | AARCH64_FEATURE_RNG),
9844 "Neoverse N2"},
9845 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9846 AARCH64_FEATURE_PROFILE
9847 | AARCH64_FEATURE_CVADP
9848 | AARCH64_FEATURE_SVE
9849 | AARCH64_FEATURE_SSBS
9850 | AARCH64_FEATURE_RNG
9851 | AARCH64_FEATURE_F16
9852 | AARCH64_FEATURE_BFLOAT16
9853 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9854 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9855 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9856 | AARCH64_FEATURE_RDMA),
9857 "Qualcomm QDF24XX"},
9858 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9859 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9860 "Qualcomm Saphira"},
9861 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9862 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9863 "Cavium ThunderX"},
9864 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9865 AARCH64_FEATURE_CRYPTO),
9866 "Broadcom Vulcan"},
9867 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9868 in earlier releases and is superseded by 'xgene1' in all
9869 tools. */
9870 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9871 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9872 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9873 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9874 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9875 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9876 AARCH64_FEATURE_F16
9877 | AARCH64_FEATURE_RCPC
9878 | AARCH64_FEATURE_DOTPROD
9879 | AARCH64_FEATURE_SSBS
9880 | AARCH64_FEATURE_PROFILE),
9881 "Cortex-X1"},
9882 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9883 AARCH64_FEATURE_BFLOAT16
9884 | AARCH64_FEATURE_I8MM
9885 | AARCH64_FEATURE_MEMTAG
9886 | AARCH64_FEATURE_SVE2_BITPERM),
9887 "Cortex-X2"},
9888 {"generic", AARCH64_ARCH_V8, NULL},
9889
9890 {NULL, AARCH64_ARCH_NONE, NULL}
9891 };
9892
9893 struct aarch64_arch_option_table
9894 {
9895 const char *name;
9896 const aarch64_feature_set value;
9897 };
9898
9899 /* This list should, at a minimum, contain all the architecture names
9900 recognized by GCC. */
9901 static const struct aarch64_arch_option_table aarch64_archs[] = {
9902 {"all", AARCH64_ANY},
9903 {"armv8-a", AARCH64_ARCH_V8},
9904 {"armv8.1-a", AARCH64_ARCH_V8_1},
9905 {"armv8.2-a", AARCH64_ARCH_V8_2},
9906 {"armv8.3-a", AARCH64_ARCH_V8_3},
9907 {"armv8.4-a", AARCH64_ARCH_V8_4},
9908 {"armv8.5-a", AARCH64_ARCH_V8_5},
9909 {"armv8.6-a", AARCH64_ARCH_V8_6},
9910 {"armv8.7-a", AARCH64_ARCH_V8_7},
9911 {"armv8.8-a", AARCH64_ARCH_V8_8},
9912 {"armv8-r", AARCH64_ARCH_V8_R},
9913 {"armv9-a", AARCH64_ARCH_V9},
9914 {"armv9.1-a", AARCH64_ARCH_V9_1},
9915 {"armv9.2-a", AARCH64_ARCH_V9_2},
9916 {"armv9.3-a", AARCH64_ARCH_V9_3},
9917 {NULL, AARCH64_ARCH_NONE}
9918 };
9919
9920 /* ISA extensions. */
9921 struct aarch64_option_cpu_value_table
9922 {
9923 const char *name;
9924 const aarch64_feature_set value;
9925 const aarch64_feature_set require; /* Feature dependencies. */
9926 };
9927
9928 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9929 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9930 AARCH64_ARCH_NONE},
9931 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9932 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9933 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9934 AARCH64_ARCH_NONE},
9935 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9936 AARCH64_ARCH_NONE},
9937 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9938 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9939 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9940 AARCH64_ARCH_NONE},
9941 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9942 AARCH64_ARCH_NONE},
9943 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9944 AARCH64_ARCH_NONE},
9945 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9946 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9947 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9948 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9949 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9950 AARCH64_FEATURE (AARCH64_FEATURE_FP
9951 | AARCH64_FEATURE_F16, 0)},
9952 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9953 AARCH64_ARCH_NONE},
9954 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9955 AARCH64_FEATURE (AARCH64_FEATURE_F16
9956 | AARCH64_FEATURE_SIMD
9957 | AARCH64_FEATURE_COMPNUM, 0)},
9958 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9959 AARCH64_ARCH_NONE},
9960 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9961 AARCH64_FEATURE (AARCH64_FEATURE_F16
9962 | AARCH64_FEATURE_SIMD, 0)},
9963 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9964 AARCH64_ARCH_NONE},
9965 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9966 AARCH64_ARCH_NONE},
9967 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9968 AARCH64_ARCH_NONE},
9969 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9970 AARCH64_ARCH_NONE},
9971 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9972 AARCH64_ARCH_NONE},
9973 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9974 AARCH64_ARCH_NONE},
9975 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9976 AARCH64_ARCH_NONE},
9977 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9978 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9979 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9980 AARCH64_ARCH_NONE},
9981 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9982 AARCH64_ARCH_NONE},
9983 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9984 AARCH64_ARCH_NONE},
9985 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9986 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9987 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9988 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9989 | AARCH64_FEATURE_SM4, 0)},
9990 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9991 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9992 | AARCH64_FEATURE_AES, 0)},
9993 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9994 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9995 | AARCH64_FEATURE_SHA3, 0)},
9996 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9997 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9998 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9999 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10000 | AARCH64_FEATURE_BFLOAT16, 0)},
10001 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
10002 AARCH64_FEATURE (AARCH64_FEATURE_SME
10003 | AARCH64_FEATURE_SVE2
10004 | AARCH64_FEATURE_BFLOAT16, 0)},
10005 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
10006 AARCH64_FEATURE (AARCH64_FEATURE_SME
10007 | AARCH64_FEATURE_SVE2
10008 | AARCH64_FEATURE_BFLOAT16, 0)},
10009 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10010 AARCH64_ARCH_NONE},
10011 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10012 AARCH64_ARCH_NONE},
10013 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10014 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10015 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10016 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10017 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10018 AARCH64_ARCH_NONE},
10019 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10020 AARCH64_ARCH_NONE},
10021 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10022 AARCH64_ARCH_NONE},
10023 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10024 AARCH64_ARCH_NONE},
10025 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10026 AARCH64_ARCH_NONE},
10027 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10028 };
10029
10030 struct aarch64_long_option_table
10031 {
10032 const char *option; /* Substring to match. */
10033 const char *help; /* Help information. */
10034 int (*func) (const char *subopt); /* Function to decode sub-option. */
10035 char *deprecated; /* If non-null, print this message. */
10036 };
10037
10038 /* Transitive closure of features depending on set. */
10039 static aarch64_feature_set
10040 aarch64_feature_disable_set (aarch64_feature_set set)
10041 {
10042 const struct aarch64_option_cpu_value_table *opt;
10043 aarch64_feature_set prev = 0;
10044
10045 while (prev != set) {
10046 prev = set;
10047 for (opt = aarch64_features; opt->name != NULL; opt++)
10048 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10049 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10050 }
10051 return set;
10052 }
10053
10054 /* Transitive closure of dependencies of set. */
10055 static aarch64_feature_set
10056 aarch64_feature_enable_set (aarch64_feature_set set)
10057 {
10058 const struct aarch64_option_cpu_value_table *opt;
10059 aarch64_feature_set prev = 0;
10060
10061 while (prev != set) {
10062 prev = set;
10063 for (opt = aarch64_features; opt->name != NULL; opt++)
10064 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10065 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10066 }
10067 return set;
10068 }
10069
10070 static int
10071 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10072 bool ext_only)
10073 {
10074 /* We insist on extensions being added before being removed. We achieve
10075 this by using the ADDING_VALUE variable to indicate whether we are
10076 adding an extension (1) or removing it (0) and only allowing it to
10077 change in the order -1 -> 1 -> 0. */
10078 int adding_value = -1;
10079 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10080
10081 /* Copy the feature set, so that we can modify it. */
10082 *ext_set = **opt_p;
10083 *opt_p = ext_set;
10084
10085 while (str != NULL && *str != 0)
10086 {
10087 const struct aarch64_option_cpu_value_table *opt;
10088 const char *ext = NULL;
10089 int optlen;
10090
10091 if (!ext_only)
10092 {
10093 if (*str != '+')
10094 {
10095 as_bad (_("invalid architectural extension"));
10096 return 0;
10097 }
10098
10099 ext = strchr (++str, '+');
10100 }
10101
10102 if (ext != NULL)
10103 optlen = ext - str;
10104 else
10105 optlen = strlen (str);
10106
10107 if (optlen >= 2 && startswith (str, "no"))
10108 {
10109 if (adding_value != 0)
10110 adding_value = 0;
10111 optlen -= 2;
10112 str += 2;
10113 }
10114 else if (optlen > 0)
10115 {
10116 if (adding_value == -1)
10117 adding_value = 1;
10118 else if (adding_value != 1)
10119 {
10120 as_bad (_("must specify extensions to add before specifying "
10121 "those to remove"));
10122 return false;
10123 }
10124 }
10125
10126 if (optlen == 0)
10127 {
10128 as_bad (_("missing architectural extension"));
10129 return 0;
10130 }
10131
10132 gas_assert (adding_value != -1);
10133
10134 for (opt = aarch64_features; opt->name != NULL; opt++)
10135 if (strncmp (opt->name, str, optlen) == 0)
10136 {
10137 aarch64_feature_set set;
10138
10139 /* Add or remove the extension. */
10140 if (adding_value)
10141 {
10142 set = aarch64_feature_enable_set (opt->value);
10143 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10144 }
10145 else
10146 {
10147 set = aarch64_feature_disable_set (opt->value);
10148 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10149 }
10150 break;
10151 }
10152
10153 if (opt->name == NULL)
10154 {
10155 as_bad (_("unknown architectural extension `%s'"), str);
10156 return 0;
10157 }
10158
10159 str = ext;
10160 };
10161
10162 return 1;
10163 }
10164
10165 static int
10166 aarch64_parse_cpu (const char *str)
10167 {
10168 const struct aarch64_cpu_option_table *opt;
10169 const char *ext = strchr (str, '+');
10170 size_t optlen;
10171
10172 if (ext != NULL)
10173 optlen = ext - str;
10174 else
10175 optlen = strlen (str);
10176
10177 if (optlen == 0)
10178 {
10179 as_bad (_("missing cpu name `%s'"), str);
10180 return 0;
10181 }
10182
10183 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10184 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10185 {
10186 mcpu_cpu_opt = &opt->value;
10187 if (ext != NULL)
10188 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10189
10190 return 1;
10191 }
10192
10193 as_bad (_("unknown cpu `%s'"), str);
10194 return 0;
10195 }
10196
10197 static int
10198 aarch64_parse_arch (const char *str)
10199 {
10200 const struct aarch64_arch_option_table *opt;
10201 const char *ext = strchr (str, '+');
10202 size_t optlen;
10203
10204 if (ext != NULL)
10205 optlen = ext - str;
10206 else
10207 optlen = strlen (str);
10208
10209 if (optlen == 0)
10210 {
10211 as_bad (_("missing architecture name `%s'"), str);
10212 return 0;
10213 }
10214
10215 for (opt = aarch64_archs; opt->name != NULL; opt++)
10216 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10217 {
10218 march_cpu_opt = &opt->value;
10219 if (ext != NULL)
10220 return aarch64_parse_features (ext, &march_cpu_opt, false);
10221
10222 return 1;
10223 }
10224
10225 as_bad (_("unknown architecture `%s'\n"), str);
10226 return 0;
10227 }
10228
10229 /* ABIs. */
10230 struct aarch64_option_abi_value_table
10231 {
10232 const char *name;
10233 enum aarch64_abi_type value;
10234 };
10235
10236 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10237 {"ilp32", AARCH64_ABI_ILP32},
10238 {"lp64", AARCH64_ABI_LP64},
10239 };
10240
10241 static int
10242 aarch64_parse_abi (const char *str)
10243 {
10244 unsigned int i;
10245
10246 if (str[0] == '\0')
10247 {
10248 as_bad (_("missing abi name `%s'"), str);
10249 return 0;
10250 }
10251
10252 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10253 if (strcmp (str, aarch64_abis[i].name) == 0)
10254 {
10255 aarch64_abi = aarch64_abis[i].value;
10256 return 1;
10257 }
10258
10259 as_bad (_("unknown abi `%s'\n"), str);
10260 return 0;
10261 }
10262
10263 static struct aarch64_long_option_table aarch64_long_opts[] = {
10264 #ifdef OBJ_ELF
10265 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10266 aarch64_parse_abi, NULL},
10267 #endif /* OBJ_ELF */
10268 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10269 aarch64_parse_cpu, NULL},
10270 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10271 aarch64_parse_arch, NULL},
10272 {NULL, NULL, 0, NULL}
10273 };
10274
10275 int
10276 md_parse_option (int c, const char *arg)
10277 {
10278 struct aarch64_option_table *opt;
10279 struct aarch64_long_option_table *lopt;
10280
10281 switch (c)
10282 {
10283 #ifdef OPTION_EB
10284 case OPTION_EB:
10285 target_big_endian = 1;
10286 break;
10287 #endif
10288
10289 #ifdef OPTION_EL
10290 case OPTION_EL:
10291 target_big_endian = 0;
10292 break;
10293 #endif
10294
10295 case 'a':
10296 /* Listing option. Just ignore these, we don't support additional
10297 ones. */
10298 return 0;
10299
10300 default:
10301 for (opt = aarch64_opts; opt->option != NULL; opt++)
10302 {
10303 if (c == opt->option[0]
10304 && ((arg == NULL && opt->option[1] == 0)
10305 || streq (arg, opt->option + 1)))
10306 {
10307 /* If the option is deprecated, tell the user. */
10308 if (opt->deprecated != NULL)
10309 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10310 arg ? arg : "", _(opt->deprecated));
10311
10312 if (opt->var != NULL)
10313 *opt->var = opt->value;
10314
10315 return 1;
10316 }
10317 }
10318
10319 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10320 {
10321 /* These options are expected to have an argument. */
10322 if (c == lopt->option[0]
10323 && arg != NULL
10324 && startswith (arg, lopt->option + 1))
10325 {
10326 /* If the option is deprecated, tell the user. */
10327 if (lopt->deprecated != NULL)
10328 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10329 _(lopt->deprecated));
10330
10331 /* Call the sup-option parser. */
10332 return lopt->func (arg + strlen (lopt->option) - 1);
10333 }
10334 }
10335
10336 return 0;
10337 }
10338
10339 return 1;
10340 }
10341
10342 void
10343 md_show_usage (FILE * fp)
10344 {
10345 struct aarch64_option_table *opt;
10346 struct aarch64_long_option_table *lopt;
10347
10348 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10349
10350 for (opt = aarch64_opts; opt->option != NULL; opt++)
10351 if (opt->help != NULL)
10352 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10353
10354 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10355 if (lopt->help != NULL)
10356 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10357
10358 #ifdef OPTION_EB
10359 fprintf (fp, _("\
10360 -EB assemble code for a big-endian cpu\n"));
10361 #endif
10362
10363 #ifdef OPTION_EL
10364 fprintf (fp, _("\
10365 -EL assemble code for a little-endian cpu\n"));
10366 #endif
10367 }
10368
10369 /* Parse a .cpu directive. */
10370
10371 static void
10372 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10373 {
10374 const struct aarch64_cpu_option_table *opt;
10375 char saved_char;
10376 char *name;
10377 char *ext;
10378 size_t optlen;
10379
10380 name = input_line_pointer;
10381 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10382 input_line_pointer++;
10383 saved_char = *input_line_pointer;
10384 *input_line_pointer = 0;
10385
10386 ext = strchr (name, '+');
10387
10388 if (ext != NULL)
10389 optlen = ext - name;
10390 else
10391 optlen = strlen (name);
10392
10393 /* Skip the first "all" entry. */
10394 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10395 if (strlen (opt->name) == optlen
10396 && strncmp (name, opt->name, optlen) == 0)
10397 {
10398 mcpu_cpu_opt = &opt->value;
10399 if (ext != NULL)
10400 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10401 return;
10402
10403 cpu_variant = *mcpu_cpu_opt;
10404
10405 *input_line_pointer = saved_char;
10406 demand_empty_rest_of_line ();
10407 return;
10408 }
10409 as_bad (_("unknown cpu `%s'"), name);
10410 *input_line_pointer = saved_char;
10411 ignore_rest_of_line ();
10412 }
10413
10414
10415 /* Parse a .arch directive. */
10416
10417 static void
10418 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10419 {
10420 const struct aarch64_arch_option_table *opt;
10421 char saved_char;
10422 char *name;
10423 char *ext;
10424 size_t optlen;
10425
10426 name = input_line_pointer;
10427 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10428 input_line_pointer++;
10429 saved_char = *input_line_pointer;
10430 *input_line_pointer = 0;
10431
10432 ext = strchr (name, '+');
10433
10434 if (ext != NULL)
10435 optlen = ext - name;
10436 else
10437 optlen = strlen (name);
10438
10439 /* Skip the first "all" entry. */
10440 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10441 if (strlen (opt->name) == optlen
10442 && strncmp (name, opt->name, optlen) == 0)
10443 {
10444 mcpu_cpu_opt = &opt->value;
10445 if (ext != NULL)
10446 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10447 return;
10448
10449 cpu_variant = *mcpu_cpu_opt;
10450
10451 *input_line_pointer = saved_char;
10452 demand_empty_rest_of_line ();
10453 return;
10454 }
10455
10456 as_bad (_("unknown architecture `%s'\n"), name);
10457 *input_line_pointer = saved_char;
10458 ignore_rest_of_line ();
10459 }
10460
10461 /* Parse a .arch_extension directive. */
10462
10463 static void
10464 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10465 {
10466 char saved_char;
10467 char *ext = input_line_pointer;;
10468
10469 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10470 input_line_pointer++;
10471 saved_char = *input_line_pointer;
10472 *input_line_pointer = 0;
10473
10474 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10475 return;
10476
10477 cpu_variant = *mcpu_cpu_opt;
10478
10479 *input_line_pointer = saved_char;
10480 demand_empty_rest_of_line ();
10481 }
10482
10483 /* Copy symbol information. */
10484
10485 void
10486 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10487 {
10488 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10489 }
10490
10491 #ifdef OBJ_ELF
10492 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10493 This is needed so AArch64 specific st_other values can be independently
10494 specified for an IFUNC resolver (that is called by the dynamic linker)
10495 and the symbol it resolves (aliased to the resolver). In particular,
10496 if a function symbol has special st_other value set via directives,
10497 then attaching an IFUNC resolver to that symbol should not override
10498 the st_other setting. Requiring the directive on the IFUNC resolver
10499 symbol would be unexpected and problematic in C code, where the two
10500 symbols appear as two independent function declarations. */
10501
10502 void
10503 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10504 {
10505 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10506 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10507 /* If size is unset, copy size from src. Because we don't track whether
10508 .size has been used, we can't differentiate .size dest, 0 from the case
10509 where dest's size is unset. */
10510 if (!destelf->size && S_GET_SIZE (dest) == 0)
10511 {
10512 if (srcelf->size)
10513 {
10514 destelf->size = XNEW (expressionS);
10515 *destelf->size = *srcelf->size;
10516 }
10517 S_SET_SIZE (dest, S_GET_SIZE (src));
10518 }
10519 }
10520 #endif