]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: [SME] Add SME instructions
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bool parse_operands (char *, const aarch64_opcode *);
150 static bool programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bool
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
282 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
283 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
284 /* Typecheck: same, plus SVE registers. */ \
285 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
286 | REG_TYPE(ZN)) \
287 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
288 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
289 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
290 /* Typecheck: same, plus SVE registers. */ \
291 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
292 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
293 | REG_TYPE(ZN)) \
294 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
295 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
297 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
298 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
299 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
301 /* Typecheck: any [BHSDQ]P FP. */ \
302 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
303 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
304 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
305 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
307 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
310 be used for SVE instructions, since Zn and Pn are valid symbols \
311 in other contexts. */ \
312 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
317 | REG_TYPE(ZN) | REG_TYPE(PN)) \
318 /* Any integer register; used for error messages only. */ \
319 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
322 /* Pseudo type to mark the end of the enumerator sequence. */ \
323 BASIC_REG_TYPE(MAX)
324
325 #undef BASIC_REG_TYPE
326 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
327 #undef MULTI_REG_TYPE
328 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
329
330 /* Register type enumerators. */
331 typedef enum aarch64_reg_type_
332 {
333 /* A list of REG_TYPE_*. */
334 AARCH64_REG_TYPES
335 } aarch64_reg_type;
336
337 #undef BASIC_REG_TYPE
338 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
339 #undef REG_TYPE
340 #define REG_TYPE(T) (1 << REG_TYPE_##T)
341 #undef MULTI_REG_TYPE
342 #define MULTI_REG_TYPE(T,V) V,
343
344 /* Structure for a hash table entry for a register. */
345 typedef struct
346 {
347 const char *name;
348 unsigned char number;
349 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
350 unsigned char builtin;
351 } reg_entry;
352
353 /* Values indexed by aarch64_reg_type to assist the type checking. */
354 static const unsigned reg_type_masks[] =
355 {
356 AARCH64_REG_TYPES
357 };
358
359 #undef BASIC_REG_TYPE
360 #undef REG_TYPE
361 #undef MULTI_REG_TYPE
362 #undef AARCH64_REG_TYPES
363
364 /* Diagnostics used when we don't get a register of the expected type.
365 Note: this has to synchronized with aarch64_reg_type definitions
366 above. */
367 static const char *
368 get_reg_expected_msg (aarch64_reg_type reg_type)
369 {
370 const char *msg;
371
372 switch (reg_type)
373 {
374 case REG_TYPE_R_32:
375 msg = N_("integer 32-bit register expected");
376 break;
377 case REG_TYPE_R_64:
378 msg = N_("integer 64-bit register expected");
379 break;
380 case REG_TYPE_R_N:
381 msg = N_("integer register expected");
382 break;
383 case REG_TYPE_R64_SP:
384 msg = N_("64-bit integer or SP register expected");
385 break;
386 case REG_TYPE_SVE_BASE:
387 msg = N_("base register expected");
388 break;
389 case REG_TYPE_R_Z:
390 msg = N_("integer or zero register expected");
391 break;
392 case REG_TYPE_SVE_OFFSET:
393 msg = N_("offset register expected");
394 break;
395 case REG_TYPE_R_SP:
396 msg = N_("integer or SP register expected");
397 break;
398 case REG_TYPE_R_Z_SP:
399 msg = N_("integer, zero or SP register expected");
400 break;
401 case REG_TYPE_FP_B:
402 msg = N_("8-bit SIMD scalar register expected");
403 break;
404 case REG_TYPE_FP_H:
405 msg = N_("16-bit SIMD scalar or floating-point half precision "
406 "register expected");
407 break;
408 case REG_TYPE_FP_S:
409 msg = N_("32-bit SIMD scalar or floating-point single precision "
410 "register expected");
411 break;
412 case REG_TYPE_FP_D:
413 msg = N_("64-bit SIMD scalar or floating-point double precision "
414 "register expected");
415 break;
416 case REG_TYPE_FP_Q:
417 msg = N_("128-bit SIMD scalar or floating-point quad precision "
418 "register expected");
419 break;
420 case REG_TYPE_R_Z_BHSDQ_V:
421 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
422 msg = N_("register expected");
423 break;
424 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
425 msg = N_("SIMD scalar or floating-point register expected");
426 break;
427 case REG_TYPE_VN: /* any V reg */
428 msg = N_("vector register expected");
429 break;
430 case REG_TYPE_ZN:
431 msg = N_("SVE vector register expected");
432 break;
433 case REG_TYPE_PN:
434 msg = N_("SVE predicate register expected");
435 break;
436 default:
437 as_fatal (_("invalid register type %d"), reg_type);
438 }
439 return msg;
440 }
441
442 /* Some well known registers that we refer to directly elsewhere. */
443 #define REG_SP 31
444 #define REG_ZR 31
445
446 /* Instructions take 4 bytes in the object file. */
447 #define INSN_SIZE 4
448
449 static htab_t aarch64_ops_hsh;
450 static htab_t aarch64_cond_hsh;
451 static htab_t aarch64_shift_hsh;
452 static htab_t aarch64_sys_regs_hsh;
453 static htab_t aarch64_pstatefield_hsh;
454 static htab_t aarch64_sys_regs_ic_hsh;
455 static htab_t aarch64_sys_regs_dc_hsh;
456 static htab_t aarch64_sys_regs_at_hsh;
457 static htab_t aarch64_sys_regs_tlbi_hsh;
458 static htab_t aarch64_sys_regs_sr_hsh;
459 static htab_t aarch64_reg_hsh;
460 static htab_t aarch64_barrier_opt_hsh;
461 static htab_t aarch64_nzcv_hsh;
462 static htab_t aarch64_pldop_hsh;
463 static htab_t aarch64_hint_opt_hsh;
464
465 /* Stuff needed to resolve the label ambiguity
466 As:
467 ...
468 label: <insn>
469 may differ from:
470 ...
471 label:
472 <insn> */
473
474 static symbolS *last_label_seen;
475
476 /* Literal pool structure. Held on a per-section
477 and per-sub-section basis. */
478
479 #define MAX_LITERAL_POOL_SIZE 1024
480 typedef struct literal_expression
481 {
482 expressionS exp;
483 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
484 LITTLENUM_TYPE * bignum;
485 } literal_expression;
486
487 typedef struct literal_pool
488 {
489 literal_expression literals[MAX_LITERAL_POOL_SIZE];
490 unsigned int next_free_entry;
491 unsigned int id;
492 symbolS *symbol;
493 segT section;
494 subsegT sub_section;
495 int size;
496 struct literal_pool *next;
497 } literal_pool;
498
499 /* Pointer to a linked list of literal pools. */
500 static literal_pool *list_of_pools = NULL;
501 \f
502 /* Pure syntax. */
503
504 /* This array holds the chars that always start a comment. If the
505 pre-processor is disabled, these aren't very useful. */
506 const char comment_chars[] = "";
507
508 /* This array holds the chars that only start a comment at the beginning of
509 a line. If the line seems to have the form '# 123 filename'
510 .line and .file directives will appear in the pre-processed output. */
511 /* Note that input_file.c hand checks for '#' at the beginning of the
512 first line of the input file. This is because the compiler outputs
513 #NO_APP at the beginning of its output. */
514 /* Also note that comments like this one will always work. */
515 const char line_comment_chars[] = "#";
516
517 const char line_separator_chars[] = ";";
518
519 /* Chars that can be used to separate mant
520 from exp in floating point numbers. */
521 const char EXP_CHARS[] = "eE";
522
523 /* Chars that mean this number is a floating point constant. */
524 /* As in 0f12.456 */
525 /* or 0d1.2345e12 */
526
527 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
528
529 /* Prefix character that indicates the start of an immediate value. */
530 #define is_immediate_prefix(C) ((C) == '#')
531
532 /* Separator character handling. */
533
534 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
535
536 static inline bool
537 skip_past_char (char **str, char c)
538 {
539 if (**str == c)
540 {
541 (*str)++;
542 return true;
543 }
544 else
545 return false;
546 }
547
548 #define skip_past_comma(str) skip_past_char (str, ',')
549
550 /* Arithmetic expressions (possibly involving symbols). */
551
552 static bool in_aarch64_get_expression = false;
553
554 /* Third argument to aarch64_get_expression. */
555 #define GE_NO_PREFIX false
556 #define GE_OPT_PREFIX true
557
558 /* Fourth argument to aarch64_get_expression. */
559 #define ALLOW_ABSENT false
560 #define REJECT_ABSENT true
561
562 /* Fifth argument to aarch64_get_expression. */
563 #define NORMAL_RESOLUTION false
564
565 /* Return TRUE if the string pointed by *STR is successfully parsed
566 as an valid expression; *EP will be filled with the information of
567 such an expression. Otherwise return FALSE.
568
569 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
570 If REJECT_ABSENT is true then trat missing expressions as an error.
571 If DEFER_RESOLUTION is true, then do not resolve expressions against
572 constant symbols. Necessary if the expression is part of a fixup
573 that uses a reloc that must be emitted. */
574
575 static bool
576 aarch64_get_expression (expressionS * ep,
577 char ** str,
578 bool allow_immediate_prefix,
579 bool reject_absent,
580 bool defer_resolution)
581 {
582 char *save_in;
583 segT seg;
584 bool prefix_present = false;
585
586 if (allow_immediate_prefix)
587 {
588 if (is_immediate_prefix (**str))
589 {
590 (*str)++;
591 prefix_present = true;
592 }
593 }
594
595 memset (ep, 0, sizeof (expressionS));
596
597 save_in = input_line_pointer;
598 input_line_pointer = *str;
599 in_aarch64_get_expression = true;
600 if (defer_resolution)
601 seg = deferred_expression (ep);
602 else
603 seg = expression (ep);
604 in_aarch64_get_expression = false;
605
606 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
607 {
608 /* We found a bad expression in md_operand(). */
609 *str = input_line_pointer;
610 input_line_pointer = save_in;
611 if (prefix_present && ! error_p ())
612 set_fatal_syntax_error (_("bad expression"));
613 else
614 set_first_syntax_error (_("bad expression"));
615 return false;
616 }
617
618 #ifdef OBJ_AOUT
619 if (seg != absolute_section
620 && seg != text_section
621 && seg != data_section
622 && seg != bss_section
623 && seg != undefined_section)
624 {
625 set_syntax_error (_("bad segment"));
626 *str = input_line_pointer;
627 input_line_pointer = save_in;
628 return false;
629 }
630 #else
631 (void) seg;
632 #endif
633
634 *str = input_line_pointer;
635 input_line_pointer = save_in;
636 return true;
637 }
638
639 /* Turn a string in input_line_pointer into a floating point constant
640 of type TYPE, and store the appropriate bytes in *LITP. The number
641 of LITTLENUMS emitted is stored in *SIZEP. An error message is
642 returned, or NULL on OK. */
643
644 const char *
645 md_atof (int type, char *litP, int *sizeP)
646 {
647 return ieee_md_atof (type, litP, sizeP, target_big_endian);
648 }
649
650 /* We handle all bad expressions here, so that we can report the faulty
651 instruction in the error message. */
652 void
653 md_operand (expressionS * exp)
654 {
655 if (in_aarch64_get_expression)
656 exp->X_op = O_illegal;
657 }
658
659 /* Immediate values. */
660
661 /* Errors may be set multiple times during parsing or bit encoding
662 (particularly in the Neon bits), but usually the earliest error which is set
663 will be the most meaningful. Avoid overwriting it with later (cascading)
664 errors by calling this function. */
665
666 static void
667 first_error (const char *error)
668 {
669 if (! error_p ())
670 set_syntax_error (error);
671 }
672
673 /* Similar to first_error, but this function accepts formatted error
674 message. */
675 static void
676 first_error_fmt (const char *format, ...)
677 {
678 va_list args;
679 enum
680 { size = 100 };
681 /* N.B. this single buffer will not cause error messages for different
682 instructions to pollute each other; this is because at the end of
683 processing of each assembly line, error message if any will be
684 collected by as_bad. */
685 static char buffer[size];
686
687 if (! error_p ())
688 {
689 int ret ATTRIBUTE_UNUSED;
690 va_start (args, format);
691 ret = vsnprintf (buffer, size, format, args);
692 know (ret <= size - 1 && ret >= 0);
693 va_end (args);
694 set_syntax_error (buffer);
695 }
696 }
697
698 /* Register parsing. */
699
700 /* Generic register parser which is called by other specialized
701 register parsers.
702 CCP points to what should be the beginning of a register name.
703 If it is indeed a valid register name, advance CCP over it and
704 return the reg_entry structure; otherwise return NULL.
705 It does not issue diagnostics. */
706
707 static reg_entry *
708 parse_reg (char **ccp)
709 {
710 char *start = *ccp;
711 char *p;
712 reg_entry *reg;
713
714 #ifdef REGISTER_PREFIX
715 if (*start != REGISTER_PREFIX)
716 return NULL;
717 start++;
718 #endif
719
720 p = start;
721 if (!ISALPHA (*p) || !is_name_beginner (*p))
722 return NULL;
723
724 do
725 p++;
726 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
727
728 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
729
730 if (!reg)
731 return NULL;
732
733 *ccp = p;
734 return reg;
735 }
736
737 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
738 return FALSE. */
739 static bool
740 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
741 {
742 return (reg_type_masks[type] & (1 << reg->type)) != 0;
743 }
744
745 /* Try to parse a base or offset register. Allow SVE base and offset
746 registers if REG_TYPE includes SVE registers. Return the register
747 entry on success, setting *QUALIFIER to the register qualifier.
748 Return null otherwise.
749
750 Note that this function does not issue any diagnostics. */
751
752 static const reg_entry *
753 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
754 aarch64_opnd_qualifier_t *qualifier)
755 {
756 char *str = *ccp;
757 const reg_entry *reg = parse_reg (&str);
758
759 if (reg == NULL)
760 return NULL;
761
762 switch (reg->type)
763 {
764 case REG_TYPE_R_32:
765 case REG_TYPE_SP_32:
766 case REG_TYPE_Z_32:
767 *qualifier = AARCH64_OPND_QLF_W;
768 break;
769
770 case REG_TYPE_R_64:
771 case REG_TYPE_SP_64:
772 case REG_TYPE_Z_64:
773 *qualifier = AARCH64_OPND_QLF_X;
774 break;
775
776 case REG_TYPE_ZN:
777 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
778 || str[0] != '.')
779 return NULL;
780 switch (TOLOWER (str[1]))
781 {
782 case 's':
783 *qualifier = AARCH64_OPND_QLF_S_S;
784 break;
785 case 'd':
786 *qualifier = AARCH64_OPND_QLF_S_D;
787 break;
788 default:
789 return NULL;
790 }
791 str += 2;
792 break;
793
794 default:
795 return NULL;
796 }
797
798 *ccp = str;
799
800 return reg;
801 }
802
803 /* Try to parse a base or offset register. Return the register entry
804 on success, setting *QUALIFIER to the register qualifier. Return null
805 otherwise.
806
807 Note that this function does not issue any diagnostics. */
808
809 static const reg_entry *
810 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
811 {
812 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
813 }
814
815 /* Parse the qualifier of a vector register or vector element of type
816 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
817 succeeds; otherwise return FALSE.
818
819 Accept only one occurrence of:
820 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
821 b h s d q */
822 static bool
823 parse_vector_type_for_operand (aarch64_reg_type reg_type,
824 struct vector_type_el *parsed_type, char **str)
825 {
826 char *ptr = *str;
827 unsigned width;
828 unsigned element_size;
829 enum vector_el_type type;
830
831 /* skip '.' */
832 gas_assert (*ptr == '.');
833 ptr++;
834
835 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
836 {
837 width = 0;
838 goto elt_size;
839 }
840 width = strtoul (ptr, &ptr, 10);
841 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
842 {
843 first_error_fmt (_("bad size %d in vector width specifier"), width);
844 return false;
845 }
846
847 elt_size:
848 switch (TOLOWER (*ptr))
849 {
850 case 'b':
851 type = NT_b;
852 element_size = 8;
853 break;
854 case 'h':
855 type = NT_h;
856 element_size = 16;
857 break;
858 case 's':
859 type = NT_s;
860 element_size = 32;
861 break;
862 case 'd':
863 type = NT_d;
864 element_size = 64;
865 break;
866 case 'q':
867 if (reg_type == REG_TYPE_ZN || width == 1)
868 {
869 type = NT_q;
870 element_size = 128;
871 break;
872 }
873 /* fall through. */
874 default:
875 if (*ptr != '\0')
876 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
877 else
878 first_error (_("missing element size"));
879 return false;
880 }
881 if (width != 0 && width * element_size != 64
882 && width * element_size != 128
883 && !(width == 2 && element_size == 16)
884 && !(width == 4 && element_size == 8))
885 {
886 first_error_fmt (_
887 ("invalid element size %d and vector size combination %c"),
888 width, *ptr);
889 return false;
890 }
891 ptr++;
892
893 parsed_type->type = type;
894 parsed_type->width = width;
895
896 *str = ptr;
897
898 return true;
899 }
900
901 /* *STR contains an SVE zero/merge predication suffix. Parse it into
902 *PARSED_TYPE and point *STR at the end of the suffix. */
903
904 static bool
905 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
906 {
907 char *ptr = *str;
908
909 /* Skip '/'. */
910 gas_assert (*ptr == '/');
911 ptr++;
912 switch (TOLOWER (*ptr))
913 {
914 case 'z':
915 parsed_type->type = NT_zero;
916 break;
917 case 'm':
918 parsed_type->type = NT_merge;
919 break;
920 default:
921 if (*ptr != '\0' && *ptr != ',')
922 first_error_fmt (_("unexpected character `%c' in predication type"),
923 *ptr);
924 else
925 first_error (_("missing predication type"));
926 return false;
927 }
928 parsed_type->width = 0;
929 *str = ptr + 1;
930 return true;
931 }
932
933 /* Parse a register of the type TYPE.
934
935 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
936 name or the parsed register is not of TYPE.
937
938 Otherwise return the register number, and optionally fill in the actual
939 type of the register in *RTYPE when multiple alternatives were given, and
940 return the register shape and element index information in *TYPEINFO.
941
942 IN_REG_LIST should be set with TRUE if the caller is parsing a register
943 list. */
944
945 static int
946 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
947 struct vector_type_el *typeinfo, bool in_reg_list)
948 {
949 char *str = *ccp;
950 const reg_entry *reg = parse_reg (&str);
951 struct vector_type_el atype;
952 struct vector_type_el parsetype;
953 bool is_typed_vecreg = false;
954
955 atype.defined = 0;
956 atype.type = NT_invtype;
957 atype.width = -1;
958 atype.index = 0;
959
960 if (reg == NULL)
961 {
962 if (typeinfo)
963 *typeinfo = atype;
964 set_default_error ();
965 return PARSE_FAIL;
966 }
967
968 if (! aarch64_check_reg_type (reg, type))
969 {
970 DEBUG_TRACE ("reg type check failed");
971 set_default_error ();
972 return PARSE_FAIL;
973 }
974 type = reg->type;
975
976 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
977 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
978 {
979 if (*str == '.')
980 {
981 if (!parse_vector_type_for_operand (type, &parsetype, &str))
982 return PARSE_FAIL;
983 }
984 else
985 {
986 if (!parse_predication_for_operand (&parsetype, &str))
987 return PARSE_FAIL;
988 }
989
990 /* Register if of the form Vn.[bhsdq]. */
991 is_typed_vecreg = true;
992
993 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
994 {
995 /* The width is always variable; we don't allow an integer width
996 to be specified. */
997 gas_assert (parsetype.width == 0);
998 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
999 }
1000 else if (parsetype.width == 0)
1001 /* Expect index. In the new scheme we cannot have
1002 Vn.[bhsdq] represent a scalar. Therefore any
1003 Vn.[bhsdq] should have an index following it.
1004 Except in reglists of course. */
1005 atype.defined |= NTA_HASINDEX;
1006 else
1007 atype.defined |= NTA_HASTYPE;
1008
1009 atype.type = parsetype.type;
1010 atype.width = parsetype.width;
1011 }
1012
1013 if (skip_past_char (&str, '['))
1014 {
1015 expressionS exp;
1016
1017 /* Reject Sn[index] syntax. */
1018 if (!is_typed_vecreg)
1019 {
1020 first_error (_("this type of register can't be indexed"));
1021 return PARSE_FAIL;
1022 }
1023
1024 if (in_reg_list)
1025 {
1026 first_error (_("index not allowed inside register list"));
1027 return PARSE_FAIL;
1028 }
1029
1030 atype.defined |= NTA_HASINDEX;
1031
1032 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1033 NORMAL_RESOLUTION);
1034
1035 if (exp.X_op != O_constant)
1036 {
1037 first_error (_("constant expression required"));
1038 return PARSE_FAIL;
1039 }
1040
1041 if (! skip_past_char (&str, ']'))
1042 return PARSE_FAIL;
1043
1044 atype.index = exp.X_add_number;
1045 }
1046 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1047 {
1048 /* Indexed vector register expected. */
1049 first_error (_("indexed vector register expected"));
1050 return PARSE_FAIL;
1051 }
1052
1053 /* A vector reg Vn should be typed or indexed. */
1054 if (type == REG_TYPE_VN && atype.defined == 0)
1055 {
1056 first_error (_("invalid use of vector register"));
1057 }
1058
1059 if (typeinfo)
1060 *typeinfo = atype;
1061
1062 if (rtype)
1063 *rtype = type;
1064
1065 *ccp = str;
1066
1067 return reg->number;
1068 }
1069
1070 /* Parse register.
1071
1072 Return the register number on success; return PARSE_FAIL otherwise.
1073
1074 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1075 the register (e.g. NEON double or quad reg when either has been requested).
1076
1077 If this is a NEON vector register with additional type information, fill
1078 in the struct pointed to by VECTYPE (if non-NULL).
1079
1080 This parser does not handle register list. */
1081
1082 static int
1083 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1084 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1085 {
1086 struct vector_type_el atype;
1087 char *str = *ccp;
1088 int reg = parse_typed_reg (&str, type, rtype, &atype,
1089 /*in_reg_list= */ false);
1090
1091 if (reg == PARSE_FAIL)
1092 return PARSE_FAIL;
1093
1094 if (vectype)
1095 *vectype = atype;
1096
1097 *ccp = str;
1098
1099 return reg;
1100 }
1101
1102 static inline bool
1103 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1104 {
1105 return
1106 e1.type == e2.type
1107 && e1.defined == e2.defined
1108 && e1.width == e2.width && e1.index == e2.index;
1109 }
1110
1111 /* This function parses a list of vector registers of type TYPE.
1112 On success, it returns the parsed register list information in the
1113 following encoded format:
1114
1115 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1116 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1117
1118 The information of the register shape and/or index is returned in
1119 *VECTYPE.
1120
1121 It returns PARSE_FAIL if the register list is invalid.
1122
1123 The list contains one to four registers.
1124 Each register can be one of:
1125 <Vt>.<T>[<index>]
1126 <Vt>.<T>
1127 All <T> should be identical.
1128 All <index> should be identical.
1129 There are restrictions on <Vt> numbers which are checked later
1130 (by reg_list_valid_p). */
1131
1132 static int
1133 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1134 struct vector_type_el *vectype)
1135 {
1136 char *str = *ccp;
1137 int nb_regs;
1138 struct vector_type_el typeinfo, typeinfo_first;
1139 int val, val_range;
1140 int in_range;
1141 int ret_val;
1142 int i;
1143 bool error = false;
1144 bool expect_index = false;
1145
1146 if (*str != '{')
1147 {
1148 set_syntax_error (_("expecting {"));
1149 return PARSE_FAIL;
1150 }
1151 str++;
1152
1153 nb_regs = 0;
1154 typeinfo_first.defined = 0;
1155 typeinfo_first.type = NT_invtype;
1156 typeinfo_first.width = -1;
1157 typeinfo_first.index = 0;
1158 ret_val = 0;
1159 val = -1;
1160 val_range = -1;
1161 in_range = 0;
1162 do
1163 {
1164 if (in_range)
1165 {
1166 str++; /* skip over '-' */
1167 val_range = val;
1168 }
1169 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1170 /*in_reg_list= */ true);
1171 if (val == PARSE_FAIL)
1172 {
1173 set_first_syntax_error (_("invalid vector register in list"));
1174 error = true;
1175 continue;
1176 }
1177 /* reject [bhsd]n */
1178 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1179 {
1180 set_first_syntax_error (_("invalid scalar register in list"));
1181 error = true;
1182 continue;
1183 }
1184
1185 if (typeinfo.defined & NTA_HASINDEX)
1186 expect_index = true;
1187
1188 if (in_range)
1189 {
1190 if (val < val_range)
1191 {
1192 set_first_syntax_error
1193 (_("invalid range in vector register list"));
1194 error = true;
1195 }
1196 val_range++;
1197 }
1198 else
1199 {
1200 val_range = val;
1201 if (nb_regs == 0)
1202 typeinfo_first = typeinfo;
1203 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1204 {
1205 set_first_syntax_error
1206 (_("type mismatch in vector register list"));
1207 error = true;
1208 }
1209 }
1210 if (! error)
1211 for (i = val_range; i <= val; i++)
1212 {
1213 ret_val |= i << (5 * nb_regs);
1214 nb_regs++;
1215 }
1216 in_range = 0;
1217 }
1218 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1219
1220 skip_whitespace (str);
1221 if (*str != '}')
1222 {
1223 set_first_syntax_error (_("end of vector register list not found"));
1224 error = true;
1225 }
1226 str++;
1227
1228 skip_whitespace (str);
1229
1230 if (expect_index)
1231 {
1232 if (skip_past_char (&str, '['))
1233 {
1234 expressionS exp;
1235
1236 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1237 NORMAL_RESOLUTION);
1238 if (exp.X_op != O_constant)
1239 {
1240 set_first_syntax_error (_("constant expression required."));
1241 error = true;
1242 }
1243 if (! skip_past_char (&str, ']'))
1244 error = true;
1245 else
1246 typeinfo_first.index = exp.X_add_number;
1247 }
1248 else
1249 {
1250 set_first_syntax_error (_("expected index"));
1251 error = true;
1252 }
1253 }
1254
1255 if (nb_regs > 4)
1256 {
1257 set_first_syntax_error (_("too many registers in vector register list"));
1258 error = true;
1259 }
1260 else if (nb_regs == 0)
1261 {
1262 set_first_syntax_error (_("empty vector register list"));
1263 error = true;
1264 }
1265
1266 *ccp = str;
1267 if (! error)
1268 *vectype = typeinfo_first;
1269
1270 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1271 }
1272
1273 /* Directives: register aliases. */
1274
1275 static reg_entry *
1276 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1277 {
1278 reg_entry *new;
1279 const char *name;
1280
1281 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1282 {
1283 if (new->builtin)
1284 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1285 str);
1286
1287 /* Only warn about a redefinition if it's not defined as the
1288 same register. */
1289 else if (new->number != number || new->type != type)
1290 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1291
1292 return NULL;
1293 }
1294
1295 name = xstrdup (str);
1296 new = XNEW (reg_entry);
1297
1298 new->name = name;
1299 new->number = number;
1300 new->type = type;
1301 new->builtin = false;
1302
1303 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1304
1305 return new;
1306 }
1307
1308 /* Look for the .req directive. This is of the form:
1309
1310 new_register_name .req existing_register_name
1311
1312 If we find one, or if it looks sufficiently like one that we want to
1313 handle any error here, return TRUE. Otherwise return FALSE. */
1314
1315 static bool
1316 create_register_alias (char *newname, char *p)
1317 {
1318 const reg_entry *old;
1319 char *oldname, *nbuf;
1320 size_t nlen;
1321
1322 /* The input scrubber ensures that whitespace after the mnemonic is
1323 collapsed to single spaces. */
1324 oldname = p;
1325 if (!startswith (oldname, " .req "))
1326 return false;
1327
1328 oldname += 6;
1329 if (*oldname == '\0')
1330 return false;
1331
1332 old = str_hash_find (aarch64_reg_hsh, oldname);
1333 if (!old)
1334 {
1335 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1336 return true;
1337 }
1338
1339 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1340 the desired alias name, and p points to its end. If not, then
1341 the desired alias name is in the global original_case_string. */
1342 #ifdef TC_CASE_SENSITIVE
1343 nlen = p - newname;
1344 #else
1345 newname = original_case_string;
1346 nlen = strlen (newname);
1347 #endif
1348
1349 nbuf = xmemdup0 (newname, nlen);
1350
1351 /* Create aliases under the new name as stated; an all-lowercase
1352 version of the new name; and an all-uppercase version of the new
1353 name. */
1354 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1355 {
1356 for (p = nbuf; *p; p++)
1357 *p = TOUPPER (*p);
1358
1359 if (strncmp (nbuf, newname, nlen))
1360 {
1361 /* If this attempt to create an additional alias fails, do not bother
1362 trying to create the all-lower case alias. We will fail and issue
1363 a second, duplicate error message. This situation arises when the
1364 programmer does something like:
1365 foo .req r0
1366 Foo .req r1
1367 The second .req creates the "Foo" alias but then fails to create
1368 the artificial FOO alias because it has already been created by the
1369 first .req. */
1370 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1371 {
1372 free (nbuf);
1373 return true;
1374 }
1375 }
1376
1377 for (p = nbuf; *p; p++)
1378 *p = TOLOWER (*p);
1379
1380 if (strncmp (nbuf, newname, nlen))
1381 insert_reg_alias (nbuf, old->number, old->type);
1382 }
1383
1384 free (nbuf);
1385 return true;
1386 }
1387
1388 /* Should never be called, as .req goes between the alias and the
1389 register name, not at the beginning of the line. */
1390 static void
1391 s_req (int a ATTRIBUTE_UNUSED)
1392 {
1393 as_bad (_("invalid syntax for .req directive"));
1394 }
1395
1396 /* The .unreq directive deletes an alias which was previously defined
1397 by .req. For example:
1398
1399 my_alias .req r11
1400 .unreq my_alias */
1401
1402 static void
1403 s_unreq (int a ATTRIBUTE_UNUSED)
1404 {
1405 char *name;
1406 char saved_char;
1407
1408 name = input_line_pointer;
1409
1410 while (*input_line_pointer != 0
1411 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1412 ++input_line_pointer;
1413
1414 saved_char = *input_line_pointer;
1415 *input_line_pointer = 0;
1416
1417 if (!*name)
1418 as_bad (_("invalid syntax for .unreq directive"));
1419 else
1420 {
1421 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1422
1423 if (!reg)
1424 as_bad (_("unknown register alias '%s'"), name);
1425 else if (reg->builtin)
1426 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1427 name);
1428 else
1429 {
1430 char *p;
1431 char *nbuf;
1432
1433 str_hash_delete (aarch64_reg_hsh, name);
1434 free ((char *) reg->name);
1435 free (reg);
1436
1437 /* Also locate the all upper case and all lower case versions.
1438 Do not complain if we cannot find one or the other as it
1439 was probably deleted above. */
1440
1441 nbuf = strdup (name);
1442 for (p = nbuf; *p; p++)
1443 *p = TOUPPER (*p);
1444 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1445 if (reg)
1446 {
1447 str_hash_delete (aarch64_reg_hsh, nbuf);
1448 free ((char *) reg->name);
1449 free (reg);
1450 }
1451
1452 for (p = nbuf; *p; p++)
1453 *p = TOLOWER (*p);
1454 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1455 if (reg)
1456 {
1457 str_hash_delete (aarch64_reg_hsh, nbuf);
1458 free ((char *) reg->name);
1459 free (reg);
1460 }
1461
1462 free (nbuf);
1463 }
1464 }
1465
1466 *input_line_pointer = saved_char;
1467 demand_empty_rest_of_line ();
1468 }
1469
1470 /* Directives: Instruction set selection. */
1471
1472 #ifdef OBJ_ELF
1473 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1474 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1475 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1476 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1477
1478 /* Create a new mapping symbol for the transition to STATE. */
1479
1480 static void
1481 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1482 {
1483 symbolS *symbolP;
1484 const char *symname;
1485 int type;
1486
1487 switch (state)
1488 {
1489 case MAP_DATA:
1490 symname = "$d";
1491 type = BSF_NO_FLAGS;
1492 break;
1493 case MAP_INSN:
1494 symname = "$x";
1495 type = BSF_NO_FLAGS;
1496 break;
1497 default:
1498 abort ();
1499 }
1500
1501 symbolP = symbol_new (symname, now_seg, frag, value);
1502 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1503
1504 /* Save the mapping symbols for future reference. Also check that
1505 we do not place two mapping symbols at the same offset within a
1506 frag. We'll handle overlap between frags in
1507 check_mapping_symbols.
1508
1509 If .fill or other data filling directive generates zero sized data,
1510 the mapping symbol for the following code will have the same value
1511 as the one generated for the data filling directive. In this case,
1512 we replace the old symbol with the new one at the same address. */
1513 if (value == 0)
1514 {
1515 if (frag->tc_frag_data.first_map != NULL)
1516 {
1517 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1518 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1519 &symbol_lastP);
1520 }
1521 frag->tc_frag_data.first_map = symbolP;
1522 }
1523 if (frag->tc_frag_data.last_map != NULL)
1524 {
1525 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1526 S_GET_VALUE (symbolP));
1527 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1528 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1529 &symbol_lastP);
1530 }
1531 frag->tc_frag_data.last_map = symbolP;
1532 }
1533
1534 /* We must sometimes convert a region marked as code to data during
1535 code alignment, if an odd number of bytes have to be padded. The
1536 code mapping symbol is pushed to an aligned address. */
1537
1538 static void
1539 insert_data_mapping_symbol (enum mstate state,
1540 valueT value, fragS * frag, offsetT bytes)
1541 {
1542 /* If there was already a mapping symbol, remove it. */
1543 if (frag->tc_frag_data.last_map != NULL
1544 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1545 frag->fr_address + value)
1546 {
1547 symbolS *symp = frag->tc_frag_data.last_map;
1548
1549 if (value == 0)
1550 {
1551 know (frag->tc_frag_data.first_map == symp);
1552 frag->tc_frag_data.first_map = NULL;
1553 }
1554 frag->tc_frag_data.last_map = NULL;
1555 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1556 }
1557
1558 make_mapping_symbol (MAP_DATA, value, frag);
1559 make_mapping_symbol (state, value + bytes, frag);
1560 }
1561
1562 static void mapping_state_2 (enum mstate state, int max_chars);
1563
1564 /* Set the mapping state to STATE. Only call this when about to
1565 emit some STATE bytes to the file. */
1566
1567 void
1568 mapping_state (enum mstate state)
1569 {
1570 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1571
1572 if (state == MAP_INSN)
1573 /* AArch64 instructions require 4-byte alignment. When emitting
1574 instructions into any section, record the appropriate section
1575 alignment. */
1576 record_alignment (now_seg, 2);
1577
1578 if (mapstate == state)
1579 /* The mapping symbol has already been emitted.
1580 There is nothing else to do. */
1581 return;
1582
1583 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1584 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1585 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1586 evaluated later in the next else. */
1587 return;
1588 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1589 {
1590 /* Only add the symbol if the offset is > 0:
1591 if we're at the first frag, check it's size > 0;
1592 if we're not at the first frag, then for sure
1593 the offset is > 0. */
1594 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1595 const int add_symbol = (frag_now != frag_first)
1596 || (frag_now_fix () > 0);
1597
1598 if (add_symbol)
1599 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1600 }
1601 #undef TRANSITION
1602
1603 mapping_state_2 (state, 0);
1604 }
1605
1606 /* Same as mapping_state, but MAX_CHARS bytes have already been
1607 allocated. Put the mapping symbol that far back. */
1608
1609 static void
1610 mapping_state_2 (enum mstate state, int max_chars)
1611 {
1612 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1613
1614 if (!SEG_NORMAL (now_seg))
1615 return;
1616
1617 if (mapstate == state)
1618 /* The mapping symbol has already been emitted.
1619 There is nothing else to do. */
1620 return;
1621
1622 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1623 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1624 }
1625 #else
1626 #define mapping_state(x) /* nothing */
1627 #define mapping_state_2(x, y) /* nothing */
1628 #endif
1629
1630 /* Directives: sectioning and alignment. */
1631
1632 static void
1633 s_bss (int ignore ATTRIBUTE_UNUSED)
1634 {
1635 /* We don't support putting frags in the BSS segment, we fake it by
1636 marking in_bss, then looking at s_skip for clues. */
1637 subseg_set (bss_section, 0);
1638 demand_empty_rest_of_line ();
1639 mapping_state (MAP_DATA);
1640 }
1641
1642 static void
1643 s_even (int ignore ATTRIBUTE_UNUSED)
1644 {
1645 /* Never make frag if expect extra pass. */
1646 if (!need_pass_2)
1647 frag_align (1, 0, 0);
1648
1649 record_alignment (now_seg, 1);
1650
1651 demand_empty_rest_of_line ();
1652 }
1653
1654 /* Directives: Literal pools. */
1655
1656 static literal_pool *
1657 find_literal_pool (int size)
1658 {
1659 literal_pool *pool;
1660
1661 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1662 {
1663 if (pool->section == now_seg
1664 && pool->sub_section == now_subseg && pool->size == size)
1665 break;
1666 }
1667
1668 return pool;
1669 }
1670
1671 static literal_pool *
1672 find_or_make_literal_pool (int size)
1673 {
1674 /* Next literal pool ID number. */
1675 static unsigned int latest_pool_num = 1;
1676 literal_pool *pool;
1677
1678 pool = find_literal_pool (size);
1679
1680 if (pool == NULL)
1681 {
1682 /* Create a new pool. */
1683 pool = XNEW (literal_pool);
1684 if (!pool)
1685 return NULL;
1686
1687 /* Currently we always put the literal pool in the current text
1688 section. If we were generating "small" model code where we
1689 knew that all code and initialised data was within 1MB then
1690 we could output literals to mergeable, read-only data
1691 sections. */
1692
1693 pool->next_free_entry = 0;
1694 pool->section = now_seg;
1695 pool->sub_section = now_subseg;
1696 pool->size = size;
1697 pool->next = list_of_pools;
1698 pool->symbol = NULL;
1699
1700 /* Add it to the list. */
1701 list_of_pools = pool;
1702 }
1703
1704 /* New pools, and emptied pools, will have a NULL symbol. */
1705 if (pool->symbol == NULL)
1706 {
1707 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1708 &zero_address_frag, 0);
1709 pool->id = latest_pool_num++;
1710 }
1711
1712 /* Done. */
1713 return pool;
1714 }
1715
1716 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1717 Return TRUE on success, otherwise return FALSE. */
1718 static bool
1719 add_to_lit_pool (expressionS *exp, int size)
1720 {
1721 literal_pool *pool;
1722 unsigned int entry;
1723
1724 pool = find_or_make_literal_pool (size);
1725
1726 /* Check if this literal value is already in the pool. */
1727 for (entry = 0; entry < pool->next_free_entry; entry++)
1728 {
1729 expressionS * litexp = & pool->literals[entry].exp;
1730
1731 if ((litexp->X_op == exp->X_op)
1732 && (exp->X_op == O_constant)
1733 && (litexp->X_add_number == exp->X_add_number)
1734 && (litexp->X_unsigned == exp->X_unsigned))
1735 break;
1736
1737 if ((litexp->X_op == exp->X_op)
1738 && (exp->X_op == O_symbol)
1739 && (litexp->X_add_number == exp->X_add_number)
1740 && (litexp->X_add_symbol == exp->X_add_symbol)
1741 && (litexp->X_op_symbol == exp->X_op_symbol))
1742 break;
1743 }
1744
1745 /* Do we need to create a new entry? */
1746 if (entry == pool->next_free_entry)
1747 {
1748 if (entry >= MAX_LITERAL_POOL_SIZE)
1749 {
1750 set_syntax_error (_("literal pool overflow"));
1751 return false;
1752 }
1753
1754 pool->literals[entry].exp = *exp;
1755 pool->next_free_entry += 1;
1756 if (exp->X_op == O_big)
1757 {
1758 /* PR 16688: Bignums are held in a single global array. We must
1759 copy and preserve that value now, before it is overwritten. */
1760 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1761 exp->X_add_number);
1762 memcpy (pool->literals[entry].bignum, generic_bignum,
1763 CHARS_PER_LITTLENUM * exp->X_add_number);
1764 }
1765 else
1766 pool->literals[entry].bignum = NULL;
1767 }
1768
1769 exp->X_op = O_symbol;
1770 exp->X_add_number = ((int) entry) * size;
1771 exp->X_add_symbol = pool->symbol;
1772
1773 return true;
1774 }
1775
1776 /* Can't use symbol_new here, so have to create a symbol and then at
1777 a later date assign it a value. That's what these functions do. */
1778
1779 static void
1780 symbol_locate (symbolS * symbolP,
1781 const char *name,/* It is copied, the caller can modify. */
1782 segT segment, /* Segment identifier (SEG_<something>). */
1783 valueT valu, /* Symbol value. */
1784 fragS * frag) /* Associated fragment. */
1785 {
1786 size_t name_length;
1787 char *preserved_copy_of_name;
1788
1789 name_length = strlen (name) + 1; /* +1 for \0. */
1790 obstack_grow (&notes, name, name_length);
1791 preserved_copy_of_name = obstack_finish (&notes);
1792
1793 #ifdef tc_canonicalize_symbol_name
1794 preserved_copy_of_name =
1795 tc_canonicalize_symbol_name (preserved_copy_of_name);
1796 #endif
1797
1798 S_SET_NAME (symbolP, preserved_copy_of_name);
1799
1800 S_SET_SEGMENT (symbolP, segment);
1801 S_SET_VALUE (symbolP, valu);
1802 symbol_clear_list_pointers (symbolP);
1803
1804 symbol_set_frag (symbolP, frag);
1805
1806 /* Link to end of symbol chain. */
1807 {
1808 extern int symbol_table_frozen;
1809
1810 if (symbol_table_frozen)
1811 abort ();
1812 }
1813
1814 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1815
1816 obj_symbol_new_hook (symbolP);
1817
1818 #ifdef tc_symbol_new_hook
1819 tc_symbol_new_hook (symbolP);
1820 #endif
1821
1822 #ifdef DEBUG_SYMS
1823 verify_symbol_chain (symbol_rootP, symbol_lastP);
1824 #endif /* DEBUG_SYMS */
1825 }
1826
1827
1828 static void
1829 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1830 {
1831 unsigned int entry;
1832 literal_pool *pool;
1833 char sym_name[20];
1834 int align;
1835
1836 for (align = 2; align <= 4; align++)
1837 {
1838 int size = 1 << align;
1839
1840 pool = find_literal_pool (size);
1841 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1842 continue;
1843
1844 /* Align pool as you have word accesses.
1845 Only make a frag if we have to. */
1846 if (!need_pass_2)
1847 frag_align (align, 0, 0);
1848
1849 mapping_state (MAP_DATA);
1850
1851 record_alignment (now_seg, align);
1852
1853 sprintf (sym_name, "$$lit_\002%x", pool->id);
1854
1855 symbol_locate (pool->symbol, sym_name, now_seg,
1856 (valueT) frag_now_fix (), frag_now);
1857 symbol_table_insert (pool->symbol);
1858
1859 for (entry = 0; entry < pool->next_free_entry; entry++)
1860 {
1861 expressionS * exp = & pool->literals[entry].exp;
1862
1863 if (exp->X_op == O_big)
1864 {
1865 /* PR 16688: Restore the global bignum value. */
1866 gas_assert (pool->literals[entry].bignum != NULL);
1867 memcpy (generic_bignum, pool->literals[entry].bignum,
1868 CHARS_PER_LITTLENUM * exp->X_add_number);
1869 }
1870
1871 /* First output the expression in the instruction to the pool. */
1872 emit_expr (exp, size); /* .word|.xword */
1873
1874 if (exp->X_op == O_big)
1875 {
1876 free (pool->literals[entry].bignum);
1877 pool->literals[entry].bignum = NULL;
1878 }
1879 }
1880
1881 /* Mark the pool as empty. */
1882 pool->next_free_entry = 0;
1883 pool->symbol = NULL;
1884 }
1885 }
1886
1887 #ifdef OBJ_ELF
1888 /* Forward declarations for functions below, in the MD interface
1889 section. */
1890 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1891 static struct reloc_table_entry * find_reloc_table_entry (char **);
1892
1893 /* Directives: Data. */
1894 /* N.B. the support for relocation suffix in this directive needs to be
1895 implemented properly. */
1896
1897 static void
1898 s_aarch64_elf_cons (int nbytes)
1899 {
1900 expressionS exp;
1901
1902 #ifdef md_flush_pending_output
1903 md_flush_pending_output ();
1904 #endif
1905
1906 if (is_it_end_of_statement ())
1907 {
1908 demand_empty_rest_of_line ();
1909 return;
1910 }
1911
1912 #ifdef md_cons_align
1913 md_cons_align (nbytes);
1914 #endif
1915
1916 mapping_state (MAP_DATA);
1917 do
1918 {
1919 struct reloc_table_entry *reloc;
1920
1921 expression (&exp);
1922
1923 if (exp.X_op != O_symbol)
1924 emit_expr (&exp, (unsigned int) nbytes);
1925 else
1926 {
1927 skip_past_char (&input_line_pointer, '#');
1928 if (skip_past_char (&input_line_pointer, ':'))
1929 {
1930 reloc = find_reloc_table_entry (&input_line_pointer);
1931 if (reloc == NULL)
1932 as_bad (_("unrecognized relocation suffix"));
1933 else
1934 as_bad (_("unimplemented relocation suffix"));
1935 ignore_rest_of_line ();
1936 return;
1937 }
1938 else
1939 emit_expr (&exp, (unsigned int) nbytes);
1940 }
1941 }
1942 while (*input_line_pointer++ == ',');
1943
1944 /* Put terminator back into stream. */
1945 input_line_pointer--;
1946 demand_empty_rest_of_line ();
1947 }
1948
1949 /* Mark symbol that it follows a variant PCS convention. */
1950
1951 static void
1952 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1953 {
1954 char *name;
1955 char c;
1956 symbolS *sym;
1957 asymbol *bfdsym;
1958 elf_symbol_type *elfsym;
1959
1960 c = get_symbol_name (&name);
1961 if (!*name)
1962 as_bad (_("Missing symbol name in directive"));
1963 sym = symbol_find_or_make (name);
1964 restore_line_pointer (c);
1965 demand_empty_rest_of_line ();
1966 bfdsym = symbol_get_bfdsym (sym);
1967 elfsym = elf_symbol_from (bfdsym);
1968 gas_assert (elfsym);
1969 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1970 }
1971 #endif /* OBJ_ELF */
1972
1973 /* Output a 32-bit word, but mark as an instruction. */
1974
1975 static void
1976 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1977 {
1978 expressionS exp;
1979
1980 #ifdef md_flush_pending_output
1981 md_flush_pending_output ();
1982 #endif
1983
1984 if (is_it_end_of_statement ())
1985 {
1986 demand_empty_rest_of_line ();
1987 return;
1988 }
1989
1990 /* Sections are assumed to start aligned. In executable section, there is no
1991 MAP_DATA symbol pending. So we only align the address during
1992 MAP_DATA --> MAP_INSN transition.
1993 For other sections, this is not guaranteed. */
1994 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1995 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1996 frag_align_code (2, 0);
1997
1998 #ifdef OBJ_ELF
1999 mapping_state (MAP_INSN);
2000 #endif
2001
2002 do
2003 {
2004 expression (&exp);
2005 if (exp.X_op != O_constant)
2006 {
2007 as_bad (_("constant expression required"));
2008 ignore_rest_of_line ();
2009 return;
2010 }
2011
2012 if (target_big_endian)
2013 {
2014 unsigned int val = exp.X_add_number;
2015 exp.X_add_number = SWAP_32 (val);
2016 }
2017 emit_expr (&exp, 4);
2018 }
2019 while (*input_line_pointer++ == ',');
2020
2021 /* Put terminator back into stream. */
2022 input_line_pointer--;
2023 demand_empty_rest_of_line ();
2024 }
2025
2026 static void
2027 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2028 {
2029 demand_empty_rest_of_line ();
2030 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2031 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2032 }
2033
2034 #ifdef OBJ_ELF
2035 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2036
2037 static void
2038 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2039 {
2040 expressionS exp;
2041
2042 expression (&exp);
2043 frag_grow (4);
2044 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2045 BFD_RELOC_AARCH64_TLSDESC_ADD);
2046
2047 demand_empty_rest_of_line ();
2048 }
2049
2050 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2051
2052 static void
2053 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2054 {
2055 expressionS exp;
2056
2057 /* Since we're just labelling the code, there's no need to define a
2058 mapping symbol. */
2059 expression (&exp);
2060 /* Make sure there is enough room in this frag for the following
2061 blr. This trick only works if the blr follows immediately after
2062 the .tlsdesc directive. */
2063 frag_grow (4);
2064 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2065 BFD_RELOC_AARCH64_TLSDESC_CALL);
2066
2067 demand_empty_rest_of_line ();
2068 }
2069
2070 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2071
2072 static void
2073 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2074 {
2075 expressionS exp;
2076
2077 expression (&exp);
2078 frag_grow (4);
2079 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2080 BFD_RELOC_AARCH64_TLSDESC_LDR);
2081
2082 demand_empty_rest_of_line ();
2083 }
2084 #endif /* OBJ_ELF */
2085
2086 static void s_aarch64_arch (int);
2087 static void s_aarch64_cpu (int);
2088 static void s_aarch64_arch_extension (int);
2089
2090 /* This table describes all the machine specific pseudo-ops the assembler
2091 has to support. The fields are:
2092 pseudo-op name without dot
2093 function to call to execute this pseudo-op
2094 Integer arg to pass to the function. */
2095
2096 const pseudo_typeS md_pseudo_table[] = {
2097 /* Never called because '.req' does not start a line. */
2098 {"req", s_req, 0},
2099 {"unreq", s_unreq, 0},
2100 {"bss", s_bss, 0},
2101 {"even", s_even, 0},
2102 {"ltorg", s_ltorg, 0},
2103 {"pool", s_ltorg, 0},
2104 {"cpu", s_aarch64_cpu, 0},
2105 {"arch", s_aarch64_arch, 0},
2106 {"arch_extension", s_aarch64_arch_extension, 0},
2107 {"inst", s_aarch64_inst, 0},
2108 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2109 #ifdef OBJ_ELF
2110 {"tlsdescadd", s_tlsdescadd, 0},
2111 {"tlsdesccall", s_tlsdesccall, 0},
2112 {"tlsdescldr", s_tlsdescldr, 0},
2113 {"word", s_aarch64_elf_cons, 4},
2114 {"long", s_aarch64_elf_cons, 4},
2115 {"xword", s_aarch64_elf_cons, 8},
2116 {"dword", s_aarch64_elf_cons, 8},
2117 {"variant_pcs", s_variant_pcs, 0},
2118 #endif
2119 {"float16", float_cons, 'h'},
2120 {"bfloat16", float_cons, 'b'},
2121 {0, 0, 0}
2122 };
2123 \f
2124
2125 /* Check whether STR points to a register name followed by a comma or the
2126 end of line; REG_TYPE indicates which register types are checked
2127 against. Return TRUE if STR is such a register name; otherwise return
2128 FALSE. The function does not intend to produce any diagnostics, but since
2129 the register parser aarch64_reg_parse, which is called by this function,
2130 does produce diagnostics, we call clear_error to clear any diagnostics
2131 that may be generated by aarch64_reg_parse.
2132 Also, the function returns FALSE directly if there is any user error
2133 present at the function entry. This prevents the existing diagnostics
2134 state from being spoiled.
2135 The function currently serves parse_constant_immediate and
2136 parse_big_immediate only. */
2137 static bool
2138 reg_name_p (char *str, aarch64_reg_type reg_type)
2139 {
2140 int reg;
2141
2142 /* Prevent the diagnostics state from being spoiled. */
2143 if (error_p ())
2144 return false;
2145
2146 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2147
2148 /* Clear the parsing error that may be set by the reg parser. */
2149 clear_error ();
2150
2151 if (reg == PARSE_FAIL)
2152 return false;
2153
2154 skip_whitespace (str);
2155 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2156 return true;
2157
2158 return false;
2159 }
2160
2161 /* Parser functions used exclusively in instruction operands. */
2162
2163 /* Parse an immediate expression which may not be constant.
2164
2165 To prevent the expression parser from pushing a register name
2166 into the symbol table as an undefined symbol, firstly a check is
2167 done to find out whether STR is a register of type REG_TYPE followed
2168 by a comma or the end of line. Return FALSE if STR is such a string. */
2169
2170 static bool
2171 parse_immediate_expression (char **str, expressionS *exp,
2172 aarch64_reg_type reg_type)
2173 {
2174 if (reg_name_p (*str, reg_type))
2175 {
2176 set_recoverable_error (_("immediate operand required"));
2177 return false;
2178 }
2179
2180 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2181 NORMAL_RESOLUTION);
2182
2183 if (exp->X_op == O_absent)
2184 {
2185 set_fatal_syntax_error (_("missing immediate expression"));
2186 return false;
2187 }
2188
2189 return true;
2190 }
2191
2192 /* Constant immediate-value read function for use in insn parsing.
2193 STR points to the beginning of the immediate (with the optional
2194 leading #); *VAL receives the value. REG_TYPE says which register
2195 names should be treated as registers rather than as symbolic immediates.
2196
2197 Return TRUE on success; otherwise return FALSE. */
2198
2199 static bool
2200 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2201 {
2202 expressionS exp;
2203
2204 if (! parse_immediate_expression (str, &exp, reg_type))
2205 return false;
2206
2207 if (exp.X_op != O_constant)
2208 {
2209 set_syntax_error (_("constant expression required"));
2210 return false;
2211 }
2212
2213 *val = exp.X_add_number;
2214 return true;
2215 }
2216
2217 static uint32_t
2218 encode_imm_float_bits (uint32_t imm)
2219 {
2220 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2221 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2222 }
2223
2224 /* Return TRUE if the single-precision floating-point value encoded in IMM
2225 can be expressed in the AArch64 8-bit signed floating-point format with
2226 3-bit exponent and normalized 4 bits of precision; in other words, the
2227 floating-point value must be expressable as
2228 (+/-) n / 16 * power (2, r)
2229 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2230
2231 static bool
2232 aarch64_imm_float_p (uint32_t imm)
2233 {
2234 /* If a single-precision floating-point value has the following bit
2235 pattern, it can be expressed in the AArch64 8-bit floating-point
2236 format:
2237
2238 3 32222222 2221111111111
2239 1 09876543 21098765432109876543210
2240 n Eeeeeexx xxxx0000000000000000000
2241
2242 where n, e and each x are either 0 or 1 independently, with
2243 E == ~ e. */
2244
2245 uint32_t pattern;
2246
2247 /* Prepare the pattern for 'Eeeeee'. */
2248 if (((imm >> 30) & 0x1) == 0)
2249 pattern = 0x3e000000;
2250 else
2251 pattern = 0x40000000;
2252
2253 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2254 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2255 }
2256
2257 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2258 as an IEEE float without any loss of precision. Store the value in
2259 *FPWORD if so. */
2260
2261 static bool
2262 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2263 {
2264 /* If a double-precision floating-point value has the following bit
2265 pattern, it can be expressed in a float:
2266
2267 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2268 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2269 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2270
2271 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2272 if Eeee_eeee != 1111_1111
2273
2274 where n, e, s and S are either 0 or 1 independently and where ~ is the
2275 inverse of E. */
2276
2277 uint32_t pattern;
2278 uint32_t high32 = imm >> 32;
2279 uint32_t low32 = imm;
2280
2281 /* Lower 29 bits need to be 0s. */
2282 if ((imm & 0x1fffffff) != 0)
2283 return false;
2284
2285 /* Prepare the pattern for 'Eeeeeeeee'. */
2286 if (((high32 >> 30) & 0x1) == 0)
2287 pattern = 0x38000000;
2288 else
2289 pattern = 0x40000000;
2290
2291 /* Check E~~~. */
2292 if ((high32 & 0x78000000) != pattern)
2293 return false;
2294
2295 /* Check Eeee_eeee != 1111_1111. */
2296 if ((high32 & 0x7ff00000) == 0x47f00000)
2297 return false;
2298
2299 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2300 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2301 | (low32 >> 29)); /* 3 S bits. */
2302 return true;
2303 }
2304
2305 /* Return true if we should treat OPERAND as a double-precision
2306 floating-point operand rather than a single-precision one. */
2307 static bool
2308 double_precision_operand_p (const aarch64_opnd_info *operand)
2309 {
2310 /* Check for unsuffixed SVE registers, which are allowed
2311 for LDR and STR but not in instructions that require an
2312 immediate. We get better error messages if we arbitrarily
2313 pick one size, parse the immediate normally, and then
2314 report the match failure in the normal way. */
2315 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2316 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2317 }
2318
2319 /* Parse a floating-point immediate. Return TRUE on success and return the
2320 value in *IMMED in the format of IEEE754 single-precision encoding.
2321 *CCP points to the start of the string; DP_P is TRUE when the immediate
2322 is expected to be in double-precision (N.B. this only matters when
2323 hexadecimal representation is involved). REG_TYPE says which register
2324 names should be treated as registers rather than as symbolic immediates.
2325
2326 This routine accepts any IEEE float; it is up to the callers to reject
2327 invalid ones. */
2328
2329 static bool
2330 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2331 aarch64_reg_type reg_type)
2332 {
2333 char *str = *ccp;
2334 char *fpnum;
2335 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2336 int64_t val = 0;
2337 unsigned fpword = 0;
2338 bool hex_p = false;
2339
2340 skip_past_char (&str, '#');
2341
2342 fpnum = str;
2343 skip_whitespace (fpnum);
2344
2345 if (startswith (fpnum, "0x"))
2346 {
2347 /* Support the hexadecimal representation of the IEEE754 encoding.
2348 Double-precision is expected when DP_P is TRUE, otherwise the
2349 representation should be in single-precision. */
2350 if (! parse_constant_immediate (&str, &val, reg_type))
2351 goto invalid_fp;
2352
2353 if (dp_p)
2354 {
2355 if (!can_convert_double_to_float (val, &fpword))
2356 goto invalid_fp;
2357 }
2358 else if ((uint64_t) val > 0xffffffff)
2359 goto invalid_fp;
2360 else
2361 fpword = val;
2362
2363 hex_p = true;
2364 }
2365 else if (reg_name_p (str, reg_type))
2366 {
2367 set_recoverable_error (_("immediate operand required"));
2368 return false;
2369 }
2370
2371 if (! hex_p)
2372 {
2373 int i;
2374
2375 if ((str = atof_ieee (str, 's', words)) == NULL)
2376 goto invalid_fp;
2377
2378 /* Our FP word must be 32 bits (single-precision FP). */
2379 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2380 {
2381 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2382 fpword |= words[i];
2383 }
2384 }
2385
2386 *immed = fpword;
2387 *ccp = str;
2388 return true;
2389
2390 invalid_fp:
2391 set_fatal_syntax_error (_("invalid floating-point constant"));
2392 return false;
2393 }
2394
2395 /* Less-generic immediate-value read function with the possibility of loading
2396 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2397 instructions.
2398
2399 To prevent the expression parser from pushing a register name into the
2400 symbol table as an undefined symbol, a check is firstly done to find
2401 out whether STR is a register of type REG_TYPE followed by a comma or
2402 the end of line. Return FALSE if STR is such a register. */
2403
2404 static bool
2405 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2406 {
2407 char *ptr = *str;
2408
2409 if (reg_name_p (ptr, reg_type))
2410 {
2411 set_syntax_error (_("immediate operand required"));
2412 return false;
2413 }
2414
2415 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2416 NORMAL_RESOLUTION);
2417
2418 if (inst.reloc.exp.X_op == O_constant)
2419 *imm = inst.reloc.exp.X_add_number;
2420
2421 *str = ptr;
2422
2423 return true;
2424 }
2425
2426 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2427 if NEED_LIBOPCODES is non-zero, the fixup will need
2428 assistance from the libopcodes. */
2429
2430 static inline void
2431 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2432 const aarch64_opnd_info *operand,
2433 int need_libopcodes_p)
2434 {
2435 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2436 reloc->opnd = operand->type;
2437 if (need_libopcodes_p)
2438 reloc->need_libopcodes_p = 1;
2439 };
2440
2441 /* Return TRUE if the instruction needs to be fixed up later internally by
2442 the GAS; otherwise return FALSE. */
2443
2444 static inline bool
2445 aarch64_gas_internal_fixup_p (void)
2446 {
2447 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2448 }
2449
2450 /* Assign the immediate value to the relevant field in *OPERAND if
2451 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2452 needs an internal fixup in a later stage.
2453 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2454 IMM.VALUE that may get assigned with the constant. */
2455 static inline void
2456 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2457 aarch64_opnd_info *operand,
2458 int addr_off_p,
2459 int need_libopcodes_p,
2460 int skip_p)
2461 {
2462 if (reloc->exp.X_op == O_constant)
2463 {
2464 if (addr_off_p)
2465 operand->addr.offset.imm = reloc->exp.X_add_number;
2466 else
2467 operand->imm.value = reloc->exp.X_add_number;
2468 reloc->type = BFD_RELOC_UNUSED;
2469 }
2470 else
2471 {
2472 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2473 /* Tell libopcodes to ignore this operand or not. This is helpful
2474 when one of the operands needs to be fixed up later but we need
2475 libopcodes to check the other operands. */
2476 operand->skip = skip_p;
2477 }
2478 }
2479
2480 /* Relocation modifiers. Each entry in the table contains the textual
2481 name for the relocation which may be placed before a symbol used as
2482 a load/store offset, or add immediate. It must be surrounded by a
2483 leading and trailing colon, for example:
2484
2485 ldr x0, [x1, #:rello:varsym]
2486 add x0, x1, #:rello:varsym */
2487
2488 struct reloc_table_entry
2489 {
2490 const char *name;
2491 int pc_rel;
2492 bfd_reloc_code_real_type adr_type;
2493 bfd_reloc_code_real_type adrp_type;
2494 bfd_reloc_code_real_type movw_type;
2495 bfd_reloc_code_real_type add_type;
2496 bfd_reloc_code_real_type ldst_type;
2497 bfd_reloc_code_real_type ld_literal_type;
2498 };
2499
2500 static struct reloc_table_entry reloc_table[] =
2501 {
2502 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2503 {"lo12", 0,
2504 0, /* adr_type */
2505 0,
2506 0,
2507 BFD_RELOC_AARCH64_ADD_LO12,
2508 BFD_RELOC_AARCH64_LDST_LO12,
2509 0},
2510
2511 /* Higher 21 bits of pc-relative page offset: ADRP */
2512 {"pg_hi21", 1,
2513 0, /* adr_type */
2514 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2515 0,
2516 0,
2517 0,
2518 0},
2519
2520 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2521 {"pg_hi21_nc", 1,
2522 0, /* adr_type */
2523 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2524 0,
2525 0,
2526 0,
2527 0},
2528
2529 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2530 {"abs_g0", 0,
2531 0, /* adr_type */
2532 0,
2533 BFD_RELOC_AARCH64_MOVW_G0,
2534 0,
2535 0,
2536 0},
2537
2538 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2539 {"abs_g0_s", 0,
2540 0, /* adr_type */
2541 0,
2542 BFD_RELOC_AARCH64_MOVW_G0_S,
2543 0,
2544 0,
2545 0},
2546
2547 /* Less significant bits 0-15 of address/value: MOVK, no check */
2548 {"abs_g0_nc", 0,
2549 0, /* adr_type */
2550 0,
2551 BFD_RELOC_AARCH64_MOVW_G0_NC,
2552 0,
2553 0,
2554 0},
2555
2556 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2557 {"abs_g1", 0,
2558 0, /* adr_type */
2559 0,
2560 BFD_RELOC_AARCH64_MOVW_G1,
2561 0,
2562 0,
2563 0},
2564
2565 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2566 {"abs_g1_s", 0,
2567 0, /* adr_type */
2568 0,
2569 BFD_RELOC_AARCH64_MOVW_G1_S,
2570 0,
2571 0,
2572 0},
2573
2574 /* Less significant bits 16-31 of address/value: MOVK, no check */
2575 {"abs_g1_nc", 0,
2576 0, /* adr_type */
2577 0,
2578 BFD_RELOC_AARCH64_MOVW_G1_NC,
2579 0,
2580 0,
2581 0},
2582
2583 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2584 {"abs_g2", 0,
2585 0, /* adr_type */
2586 0,
2587 BFD_RELOC_AARCH64_MOVW_G2,
2588 0,
2589 0,
2590 0},
2591
2592 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2593 {"abs_g2_s", 0,
2594 0, /* adr_type */
2595 0,
2596 BFD_RELOC_AARCH64_MOVW_G2_S,
2597 0,
2598 0,
2599 0},
2600
2601 /* Less significant bits 32-47 of address/value: MOVK, no check */
2602 {"abs_g2_nc", 0,
2603 0, /* adr_type */
2604 0,
2605 BFD_RELOC_AARCH64_MOVW_G2_NC,
2606 0,
2607 0,
2608 0},
2609
2610 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2611 {"abs_g3", 0,
2612 0, /* adr_type */
2613 0,
2614 BFD_RELOC_AARCH64_MOVW_G3,
2615 0,
2616 0,
2617 0},
2618
2619 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2620 {"prel_g0", 1,
2621 0, /* adr_type */
2622 0,
2623 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2624 0,
2625 0,
2626 0},
2627
2628 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2629 {"prel_g0_nc", 1,
2630 0, /* adr_type */
2631 0,
2632 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2633 0,
2634 0,
2635 0},
2636
2637 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2638 {"prel_g1", 1,
2639 0, /* adr_type */
2640 0,
2641 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2642 0,
2643 0,
2644 0},
2645
2646 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2647 {"prel_g1_nc", 1,
2648 0, /* adr_type */
2649 0,
2650 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2651 0,
2652 0,
2653 0},
2654
2655 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2656 {"prel_g2", 1,
2657 0, /* adr_type */
2658 0,
2659 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2660 0,
2661 0,
2662 0},
2663
2664 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2665 {"prel_g2_nc", 1,
2666 0, /* adr_type */
2667 0,
2668 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2669 0,
2670 0,
2671 0},
2672
2673 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2674 {"prel_g3", 1,
2675 0, /* adr_type */
2676 0,
2677 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2678 0,
2679 0,
2680 0},
2681
2682 /* Get to the page containing GOT entry for a symbol. */
2683 {"got", 1,
2684 0, /* adr_type */
2685 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2686 0,
2687 0,
2688 0,
2689 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2690
2691 /* 12 bit offset into the page containing GOT entry for that symbol. */
2692 {"got_lo12", 0,
2693 0, /* adr_type */
2694 0,
2695 0,
2696 0,
2697 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2698 0},
2699
2700 /* 0-15 bits of address/value: MOVk, no check. */
2701 {"gotoff_g0_nc", 0,
2702 0, /* adr_type */
2703 0,
2704 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2705 0,
2706 0,
2707 0},
2708
2709 /* Most significant bits 16-31 of address/value: MOVZ. */
2710 {"gotoff_g1", 0,
2711 0, /* adr_type */
2712 0,
2713 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2714 0,
2715 0,
2716 0},
2717
2718 /* 15 bit offset into the page containing GOT entry for that symbol. */
2719 {"gotoff_lo15", 0,
2720 0, /* adr_type */
2721 0,
2722 0,
2723 0,
2724 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2725 0},
2726
2727 /* Get to the page containing GOT TLS entry for a symbol */
2728 {"gottprel_g0_nc", 0,
2729 0, /* adr_type */
2730 0,
2731 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2732 0,
2733 0,
2734 0},
2735
2736 /* Get to the page containing GOT TLS entry for a symbol */
2737 {"gottprel_g1", 0,
2738 0, /* adr_type */
2739 0,
2740 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2741 0,
2742 0,
2743 0},
2744
2745 /* Get to the page containing GOT TLS entry for a symbol */
2746 {"tlsgd", 0,
2747 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2748 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2749 0,
2750 0,
2751 0,
2752 0},
2753
2754 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2755 {"tlsgd_lo12", 0,
2756 0, /* adr_type */
2757 0,
2758 0,
2759 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2760 0,
2761 0},
2762
2763 /* Lower 16 bits address/value: MOVk. */
2764 {"tlsgd_g0_nc", 0,
2765 0, /* adr_type */
2766 0,
2767 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2768 0,
2769 0,
2770 0},
2771
2772 /* Most significant bits 16-31 of address/value: MOVZ. */
2773 {"tlsgd_g1", 0,
2774 0, /* adr_type */
2775 0,
2776 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2777 0,
2778 0,
2779 0},
2780
2781 /* Get to the page containing GOT TLS entry for a symbol */
2782 {"tlsdesc", 0,
2783 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2784 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2785 0,
2786 0,
2787 0,
2788 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2789
2790 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2791 {"tlsdesc_lo12", 0,
2792 0, /* adr_type */
2793 0,
2794 0,
2795 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2796 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2797 0},
2798
2799 /* Get to the page containing GOT TLS entry for a symbol.
2800 The same as GD, we allocate two consecutive GOT slots
2801 for module index and module offset, the only difference
2802 with GD is the module offset should be initialized to
2803 zero without any outstanding runtime relocation. */
2804 {"tlsldm", 0,
2805 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2806 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2807 0,
2808 0,
2809 0,
2810 0},
2811
2812 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2813 {"tlsldm_lo12_nc", 0,
2814 0, /* adr_type */
2815 0,
2816 0,
2817 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2818 0,
2819 0},
2820
2821 /* 12 bit offset into the module TLS base address. */
2822 {"dtprel_lo12", 0,
2823 0, /* adr_type */
2824 0,
2825 0,
2826 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2827 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2828 0},
2829
2830 /* Same as dtprel_lo12, no overflow check. */
2831 {"dtprel_lo12_nc", 0,
2832 0, /* adr_type */
2833 0,
2834 0,
2835 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2836 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2837 0},
2838
2839 /* bits[23:12] of offset to the module TLS base address. */
2840 {"dtprel_hi12", 0,
2841 0, /* adr_type */
2842 0,
2843 0,
2844 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2845 0,
2846 0},
2847
2848 /* bits[15:0] of offset to the module TLS base address. */
2849 {"dtprel_g0", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2853 0,
2854 0,
2855 0},
2856
2857 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2858 {"dtprel_g0_nc", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2862 0,
2863 0,
2864 0},
2865
2866 /* bits[31:16] of offset to the module TLS base address. */
2867 {"dtprel_g1", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2871 0,
2872 0,
2873 0},
2874
2875 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2876 {"dtprel_g1_nc", 0,
2877 0, /* adr_type */
2878 0,
2879 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2880 0,
2881 0,
2882 0},
2883
2884 /* bits[47:32] of offset to the module TLS base address. */
2885 {"dtprel_g2", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2889 0,
2890 0,
2891 0},
2892
2893 /* Lower 16 bit offset into GOT entry for a symbol */
2894 {"tlsdesc_off_g0_nc", 0,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2898 0,
2899 0,
2900 0},
2901
2902 /* Higher 16 bit offset into GOT entry for a symbol */
2903 {"tlsdesc_off_g1", 0,
2904 0, /* adr_type */
2905 0,
2906 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2907 0,
2908 0,
2909 0},
2910
2911 /* Get to the page containing GOT TLS entry for a symbol */
2912 {"gottprel", 0,
2913 0, /* adr_type */
2914 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2915 0,
2916 0,
2917 0,
2918 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2919
2920 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2921 {"gottprel_lo12", 0,
2922 0, /* adr_type */
2923 0,
2924 0,
2925 0,
2926 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2927 0},
2928
2929 /* Get tp offset for a symbol. */
2930 {"tprel", 0,
2931 0, /* adr_type */
2932 0,
2933 0,
2934 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2935 0,
2936 0},
2937
2938 /* Get tp offset for a symbol. */
2939 {"tprel_lo12", 0,
2940 0, /* adr_type */
2941 0,
2942 0,
2943 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2944 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2945 0},
2946
2947 /* Get tp offset for a symbol. */
2948 {"tprel_hi12", 0,
2949 0, /* adr_type */
2950 0,
2951 0,
2952 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2953 0,
2954 0},
2955
2956 /* Get tp offset for a symbol. */
2957 {"tprel_lo12_nc", 0,
2958 0, /* adr_type */
2959 0,
2960 0,
2961 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2962 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2963 0},
2964
2965 /* Most significant bits 32-47 of address/value: MOVZ. */
2966 {"tprel_g2", 0,
2967 0, /* adr_type */
2968 0,
2969 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2970 0,
2971 0,
2972 0},
2973
2974 /* Most significant bits 16-31 of address/value: MOVZ. */
2975 {"tprel_g1", 0,
2976 0, /* adr_type */
2977 0,
2978 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2979 0,
2980 0,
2981 0},
2982
2983 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2984 {"tprel_g1_nc", 0,
2985 0, /* adr_type */
2986 0,
2987 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2988 0,
2989 0,
2990 0},
2991
2992 /* Most significant bits 0-15 of address/value: MOVZ. */
2993 {"tprel_g0", 0,
2994 0, /* adr_type */
2995 0,
2996 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2997 0,
2998 0,
2999 0},
3000
3001 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3002 {"tprel_g0_nc", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3006 0,
3007 0,
3008 0},
3009
3010 /* 15bit offset from got entry to base address of GOT table. */
3011 {"gotpage_lo15", 0,
3012 0,
3013 0,
3014 0,
3015 0,
3016 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3017 0},
3018
3019 /* 14bit offset from got entry to base address of GOT table. */
3020 {"gotpage_lo14", 0,
3021 0,
3022 0,
3023 0,
3024 0,
3025 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3026 0},
3027 };
3028
3029 /* Given the address of a pointer pointing to the textual name of a
3030 relocation as may appear in assembler source, attempt to find its
3031 details in reloc_table. The pointer will be updated to the character
3032 after the trailing colon. On failure, NULL will be returned;
3033 otherwise return the reloc_table_entry. */
3034
3035 static struct reloc_table_entry *
3036 find_reloc_table_entry (char **str)
3037 {
3038 unsigned int i;
3039 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3040 {
3041 int length = strlen (reloc_table[i].name);
3042
3043 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3044 && (*str)[length] == ':')
3045 {
3046 *str += (length + 1);
3047 return &reloc_table[i];
3048 }
3049 }
3050
3051 return NULL;
3052 }
3053
3054 /* Returns 0 if the relocation should never be forced,
3055 1 if the relocation must be forced, and -1 if either
3056 result is OK. */
3057
3058 static signed int
3059 aarch64_force_reloc (unsigned int type)
3060 {
3061 switch (type)
3062 {
3063 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3064 /* Perform these "immediate" internal relocations
3065 even if the symbol is extern or weak. */
3066 return 0;
3067
3068 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3069 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3070 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3071 /* Pseudo relocs that need to be fixed up according to
3072 ilp32_p. */
3073 return 0;
3074
3075 case BFD_RELOC_AARCH64_ADD_LO12:
3076 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3077 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3078 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3079 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3080 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3081 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3082 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3083 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3084 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3085 case BFD_RELOC_AARCH64_LDST128_LO12:
3086 case BFD_RELOC_AARCH64_LDST16_LO12:
3087 case BFD_RELOC_AARCH64_LDST32_LO12:
3088 case BFD_RELOC_AARCH64_LDST64_LO12:
3089 case BFD_RELOC_AARCH64_LDST8_LO12:
3090 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3091 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3092 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3093 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3094 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3095 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3096 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3097 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3098 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3099 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3100 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3101 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3102 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3103 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3104 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3105 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3106 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3107 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3108 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3109 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3110 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3111 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3113 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3114 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3115 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3116 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3117 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3118 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3120 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3121 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3122 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3127 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3128 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3129 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3130 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3137 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3138 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3139 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3140 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3141 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3142 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3144 /* Always leave these relocations for the linker. */
3145 return 1;
3146
3147 default:
3148 return -1;
3149 }
3150 }
3151
3152 int
3153 aarch64_force_relocation (struct fix *fixp)
3154 {
3155 int res = aarch64_force_reloc (fixp->fx_r_type);
3156
3157 if (res == -1)
3158 return generic_force_reloc (fixp);
3159 return res;
3160 }
3161
3162 /* Mode argument to parse_shift and parser_shifter_operand. */
3163 enum parse_shift_mode
3164 {
3165 SHIFTED_NONE, /* no shifter allowed */
3166 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3167 "#imm{,lsl #n}" */
3168 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3169 "#imm" */
3170 SHIFTED_LSL, /* bare "lsl #n" */
3171 SHIFTED_MUL, /* bare "mul #n" */
3172 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3173 SHIFTED_MUL_VL, /* "mul vl" */
3174 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3175 };
3176
3177 /* Parse a <shift> operator on an AArch64 data processing instruction.
3178 Return TRUE on success; otherwise return FALSE. */
3179 static bool
3180 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3181 {
3182 const struct aarch64_name_value_pair *shift_op;
3183 enum aarch64_modifier_kind kind;
3184 expressionS exp;
3185 int exp_has_prefix;
3186 char *s = *str;
3187 char *p = s;
3188
3189 for (p = *str; ISALPHA (*p); p++)
3190 ;
3191
3192 if (p == *str)
3193 {
3194 set_syntax_error (_("shift expression expected"));
3195 return false;
3196 }
3197
3198 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3199
3200 if (shift_op == NULL)
3201 {
3202 set_syntax_error (_("shift operator expected"));
3203 return false;
3204 }
3205
3206 kind = aarch64_get_operand_modifier (shift_op);
3207
3208 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3209 {
3210 set_syntax_error (_("invalid use of 'MSL'"));
3211 return false;
3212 }
3213
3214 if (kind == AARCH64_MOD_MUL
3215 && mode != SHIFTED_MUL
3216 && mode != SHIFTED_MUL_VL)
3217 {
3218 set_syntax_error (_("invalid use of 'MUL'"));
3219 return false;
3220 }
3221
3222 switch (mode)
3223 {
3224 case SHIFTED_LOGIC_IMM:
3225 if (aarch64_extend_operator_p (kind))
3226 {
3227 set_syntax_error (_("extending shift is not permitted"));
3228 return false;
3229 }
3230 break;
3231
3232 case SHIFTED_ARITH_IMM:
3233 if (kind == AARCH64_MOD_ROR)
3234 {
3235 set_syntax_error (_("'ROR' shift is not permitted"));
3236 return false;
3237 }
3238 break;
3239
3240 case SHIFTED_LSL:
3241 if (kind != AARCH64_MOD_LSL)
3242 {
3243 set_syntax_error (_("only 'LSL' shift is permitted"));
3244 return false;
3245 }
3246 break;
3247
3248 case SHIFTED_MUL:
3249 if (kind != AARCH64_MOD_MUL)
3250 {
3251 set_syntax_error (_("only 'MUL' is permitted"));
3252 return false;
3253 }
3254 break;
3255
3256 case SHIFTED_MUL_VL:
3257 /* "MUL VL" consists of two separate tokens. Require the first
3258 token to be "MUL" and look for a following "VL". */
3259 if (kind == AARCH64_MOD_MUL)
3260 {
3261 skip_whitespace (p);
3262 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3263 {
3264 p += 2;
3265 kind = AARCH64_MOD_MUL_VL;
3266 break;
3267 }
3268 }
3269 set_syntax_error (_("only 'MUL VL' is permitted"));
3270 return false;
3271
3272 case SHIFTED_REG_OFFSET:
3273 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3274 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3275 {
3276 set_fatal_syntax_error
3277 (_("invalid shift for the register offset addressing mode"));
3278 return false;
3279 }
3280 break;
3281
3282 case SHIFTED_LSL_MSL:
3283 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3284 {
3285 set_syntax_error (_("invalid shift operator"));
3286 return false;
3287 }
3288 break;
3289
3290 default:
3291 abort ();
3292 }
3293
3294 /* Whitespace can appear here if the next thing is a bare digit. */
3295 skip_whitespace (p);
3296
3297 /* Parse shift amount. */
3298 exp_has_prefix = 0;
3299 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3300 exp.X_op = O_absent;
3301 else
3302 {
3303 if (is_immediate_prefix (*p))
3304 {
3305 p++;
3306 exp_has_prefix = 1;
3307 }
3308 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3309 NORMAL_RESOLUTION);
3310 }
3311 if (kind == AARCH64_MOD_MUL_VL)
3312 /* For consistency, give MUL VL the same shift amount as an implicit
3313 MUL #1. */
3314 operand->shifter.amount = 1;
3315 else if (exp.X_op == O_absent)
3316 {
3317 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3318 {
3319 set_syntax_error (_("missing shift amount"));
3320 return false;
3321 }
3322 operand->shifter.amount = 0;
3323 }
3324 else if (exp.X_op != O_constant)
3325 {
3326 set_syntax_error (_("constant shift amount required"));
3327 return false;
3328 }
3329 /* For parsing purposes, MUL #n has no inherent range. The range
3330 depends on the operand and will be checked by operand-specific
3331 routines. */
3332 else if (kind != AARCH64_MOD_MUL
3333 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3334 {
3335 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3336 return false;
3337 }
3338 else
3339 {
3340 operand->shifter.amount = exp.X_add_number;
3341 operand->shifter.amount_present = 1;
3342 }
3343
3344 operand->shifter.operator_present = 1;
3345 operand->shifter.kind = kind;
3346
3347 *str = p;
3348 return true;
3349 }
3350
3351 /* Parse a <shifter_operand> for a data processing instruction:
3352
3353 #<immediate>
3354 #<immediate>, LSL #imm
3355
3356 Validation of immediate operands is deferred to md_apply_fix.
3357
3358 Return TRUE on success; otherwise return FALSE. */
3359
3360 static bool
3361 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3362 enum parse_shift_mode mode)
3363 {
3364 char *p;
3365
3366 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3367 return false;
3368
3369 p = *str;
3370
3371 /* Accept an immediate expression. */
3372 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3373 REJECT_ABSENT, NORMAL_RESOLUTION))
3374 return false;
3375
3376 /* Accept optional LSL for arithmetic immediate values. */
3377 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3378 if (! parse_shift (&p, operand, SHIFTED_LSL))
3379 return false;
3380
3381 /* Not accept any shifter for logical immediate values. */
3382 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3383 && parse_shift (&p, operand, mode))
3384 {
3385 set_syntax_error (_("unexpected shift operator"));
3386 return false;
3387 }
3388
3389 *str = p;
3390 return true;
3391 }
3392
3393 /* Parse a <shifter_operand> for a data processing instruction:
3394
3395 <Rm>
3396 <Rm>, <shift>
3397 #<immediate>
3398 #<immediate>, LSL #imm
3399
3400 where <shift> is handled by parse_shift above, and the last two
3401 cases are handled by the function above.
3402
3403 Validation of immediate operands is deferred to md_apply_fix.
3404
3405 Return TRUE on success; otherwise return FALSE. */
3406
3407 static bool
3408 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3409 enum parse_shift_mode mode)
3410 {
3411 const reg_entry *reg;
3412 aarch64_opnd_qualifier_t qualifier;
3413 enum aarch64_operand_class opd_class
3414 = aarch64_get_operand_class (operand->type);
3415
3416 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3417 if (reg)
3418 {
3419 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3420 {
3421 set_syntax_error (_("unexpected register in the immediate operand"));
3422 return false;
3423 }
3424
3425 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3426 {
3427 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3428 return false;
3429 }
3430
3431 operand->reg.regno = reg->number;
3432 operand->qualifier = qualifier;
3433
3434 /* Accept optional shift operation on register. */
3435 if (! skip_past_comma (str))
3436 return true;
3437
3438 if (! parse_shift (str, operand, mode))
3439 return false;
3440
3441 return true;
3442 }
3443 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3444 {
3445 set_syntax_error
3446 (_("integer register expected in the extended/shifted operand "
3447 "register"));
3448 return false;
3449 }
3450
3451 /* We have a shifted immediate variable. */
3452 return parse_shifter_operand_imm (str, operand, mode);
3453 }
3454
3455 /* Return TRUE on success; return FALSE otherwise. */
3456
3457 static bool
3458 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3459 enum parse_shift_mode mode)
3460 {
3461 char *p = *str;
3462
3463 /* Determine if we have the sequence of characters #: or just :
3464 coming next. If we do, then we check for a :rello: relocation
3465 modifier. If we don't, punt the whole lot to
3466 parse_shifter_operand. */
3467
3468 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3469 {
3470 struct reloc_table_entry *entry;
3471
3472 if (p[0] == '#')
3473 p += 2;
3474 else
3475 p++;
3476 *str = p;
3477
3478 /* Try to parse a relocation. Anything else is an error. */
3479 if (!(entry = find_reloc_table_entry (str)))
3480 {
3481 set_syntax_error (_("unknown relocation modifier"));
3482 return false;
3483 }
3484
3485 if (entry->add_type == 0)
3486 {
3487 set_syntax_error
3488 (_("this relocation modifier is not allowed on this instruction"));
3489 return false;
3490 }
3491
3492 /* Save str before we decompose it. */
3493 p = *str;
3494
3495 /* Next, we parse the expression. */
3496 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3497 REJECT_ABSENT,
3498 aarch64_force_reloc (entry->add_type) == 1))
3499 return false;
3500
3501 /* Record the relocation type (use the ADD variant here). */
3502 inst.reloc.type = entry->add_type;
3503 inst.reloc.pc_rel = entry->pc_rel;
3504
3505 /* If str is empty, we've reached the end, stop here. */
3506 if (**str == '\0')
3507 return true;
3508
3509 /* Otherwise, we have a shifted reloc modifier, so rewind to
3510 recover the variable name and continue parsing for the shifter. */
3511 *str = p;
3512 return parse_shifter_operand_imm (str, operand, mode);
3513 }
3514
3515 return parse_shifter_operand (str, operand, mode);
3516 }
3517
3518 /* Parse all forms of an address expression. Information is written
3519 to *OPERAND and/or inst.reloc.
3520
3521 The A64 instruction set has the following addressing modes:
3522
3523 Offset
3524 [base] // in SIMD ld/st structure
3525 [base{,#0}] // in ld/st exclusive
3526 [base{,#imm}]
3527 [base,Xm{,LSL #imm}]
3528 [base,Xm,SXTX {#imm}]
3529 [base,Wm,(S|U)XTW {#imm}]
3530 Pre-indexed
3531 [base]! // in ldraa/ldrab exclusive
3532 [base,#imm]!
3533 Post-indexed
3534 [base],#imm
3535 [base],Xm // in SIMD ld/st structure
3536 PC-relative (literal)
3537 label
3538 SVE:
3539 [base,#imm,MUL VL]
3540 [base,Zm.D{,LSL #imm}]
3541 [base,Zm.S,(S|U)XTW {#imm}]
3542 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3543 [Zn.S,#imm]
3544 [Zn.D,#imm]
3545 [Zn.S{, Xm}]
3546 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3547 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3548 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3549
3550 (As a convenience, the notation "=immediate" is permitted in conjunction
3551 with the pc-relative literal load instructions to automatically place an
3552 immediate value or symbolic address in a nearby literal pool and generate
3553 a hidden label which references it.)
3554
3555 Upon a successful parsing, the address structure in *OPERAND will be
3556 filled in the following way:
3557
3558 .base_regno = <base>
3559 .offset.is_reg // 1 if the offset is a register
3560 .offset.imm = <imm>
3561 .offset.regno = <Rm>
3562
3563 For different addressing modes defined in the A64 ISA:
3564
3565 Offset
3566 .pcrel=0; .preind=1; .postind=0; .writeback=0
3567 Pre-indexed
3568 .pcrel=0; .preind=1; .postind=0; .writeback=1
3569 Post-indexed
3570 .pcrel=0; .preind=0; .postind=1; .writeback=1
3571 PC-relative (literal)
3572 .pcrel=1; .preind=1; .postind=0; .writeback=0
3573
3574 The shift/extension information, if any, will be stored in .shifter.
3575 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3576 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3577 corresponding register.
3578
3579 BASE_TYPE says which types of base register should be accepted and
3580 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3581 is the type of shifter that is allowed for immediate offsets,
3582 or SHIFTED_NONE if none.
3583
3584 In all other respects, it is the caller's responsibility to check
3585 for addressing modes not supported by the instruction, and to set
3586 inst.reloc.type. */
3587
3588 static bool
3589 parse_address_main (char **str, aarch64_opnd_info *operand,
3590 aarch64_opnd_qualifier_t *base_qualifier,
3591 aarch64_opnd_qualifier_t *offset_qualifier,
3592 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3593 enum parse_shift_mode imm_shift_mode)
3594 {
3595 char *p = *str;
3596 const reg_entry *reg;
3597 expressionS *exp = &inst.reloc.exp;
3598
3599 *base_qualifier = AARCH64_OPND_QLF_NIL;
3600 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3601 if (! skip_past_char (&p, '['))
3602 {
3603 /* =immediate or label. */
3604 operand->addr.pcrel = 1;
3605 operand->addr.preind = 1;
3606
3607 /* #:<reloc_op>:<symbol> */
3608 skip_past_char (&p, '#');
3609 if (skip_past_char (&p, ':'))
3610 {
3611 bfd_reloc_code_real_type ty;
3612 struct reloc_table_entry *entry;
3613
3614 /* Try to parse a relocation modifier. Anything else is
3615 an error. */
3616 entry = find_reloc_table_entry (&p);
3617 if (! entry)
3618 {
3619 set_syntax_error (_("unknown relocation modifier"));
3620 return false;
3621 }
3622
3623 switch (operand->type)
3624 {
3625 case AARCH64_OPND_ADDR_PCREL21:
3626 /* adr */
3627 ty = entry->adr_type;
3628 break;
3629
3630 default:
3631 ty = entry->ld_literal_type;
3632 break;
3633 }
3634
3635 if (ty == 0)
3636 {
3637 set_syntax_error
3638 (_("this relocation modifier is not allowed on this "
3639 "instruction"));
3640 return false;
3641 }
3642
3643 /* #:<reloc_op>: */
3644 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3645 aarch64_force_reloc (entry->add_type) == 1))
3646 {
3647 set_syntax_error (_("invalid relocation expression"));
3648 return false;
3649 }
3650 /* #:<reloc_op>:<expr> */
3651 /* Record the relocation type. */
3652 inst.reloc.type = ty;
3653 inst.reloc.pc_rel = entry->pc_rel;
3654 }
3655 else
3656 {
3657 if (skip_past_char (&p, '='))
3658 /* =immediate; need to generate the literal in the literal pool. */
3659 inst.gen_lit_pool = 1;
3660
3661 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3662 NORMAL_RESOLUTION))
3663 {
3664 set_syntax_error (_("invalid address"));
3665 return false;
3666 }
3667 }
3668
3669 *str = p;
3670 return true;
3671 }
3672
3673 /* [ */
3674
3675 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3676 if (!reg || !aarch64_check_reg_type (reg, base_type))
3677 {
3678 set_syntax_error (_(get_reg_expected_msg (base_type)));
3679 return false;
3680 }
3681 operand->addr.base_regno = reg->number;
3682
3683 /* [Xn */
3684 if (skip_past_comma (&p))
3685 {
3686 /* [Xn, */
3687 operand->addr.preind = 1;
3688
3689 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3690 if (reg)
3691 {
3692 if (!aarch64_check_reg_type (reg, offset_type))
3693 {
3694 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3695 return false;
3696 }
3697
3698 /* [Xn,Rm */
3699 operand->addr.offset.regno = reg->number;
3700 operand->addr.offset.is_reg = 1;
3701 /* Shifted index. */
3702 if (skip_past_comma (&p))
3703 {
3704 /* [Xn,Rm, */
3705 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3706 /* Use the diagnostics set in parse_shift, so not set new
3707 error message here. */
3708 return false;
3709 }
3710 /* We only accept:
3711 [base,Xm] # For vector plus scalar SVE2 indexing.
3712 [base,Xm{,LSL #imm}]
3713 [base,Xm,SXTX {#imm}]
3714 [base,Wm,(S|U)XTW {#imm}] */
3715 if (operand->shifter.kind == AARCH64_MOD_NONE
3716 || operand->shifter.kind == AARCH64_MOD_LSL
3717 || operand->shifter.kind == AARCH64_MOD_SXTX)
3718 {
3719 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3720 {
3721 set_syntax_error (_("invalid use of 32-bit register offset"));
3722 return false;
3723 }
3724 if (aarch64_get_qualifier_esize (*base_qualifier)
3725 != aarch64_get_qualifier_esize (*offset_qualifier)
3726 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3727 || *base_qualifier != AARCH64_OPND_QLF_S_S
3728 || *offset_qualifier != AARCH64_OPND_QLF_X))
3729 {
3730 set_syntax_error (_("offset has different size from base"));
3731 return false;
3732 }
3733 }
3734 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3735 {
3736 set_syntax_error (_("invalid use of 64-bit register offset"));
3737 return false;
3738 }
3739 }
3740 else
3741 {
3742 /* [Xn,#:<reloc_op>:<symbol> */
3743 skip_past_char (&p, '#');
3744 if (skip_past_char (&p, ':'))
3745 {
3746 struct reloc_table_entry *entry;
3747
3748 /* Try to parse a relocation modifier. Anything else is
3749 an error. */
3750 if (!(entry = find_reloc_table_entry (&p)))
3751 {
3752 set_syntax_error (_("unknown relocation modifier"));
3753 return false;
3754 }
3755
3756 if (entry->ldst_type == 0)
3757 {
3758 set_syntax_error
3759 (_("this relocation modifier is not allowed on this "
3760 "instruction"));
3761 return false;
3762 }
3763
3764 /* [Xn,#:<reloc_op>: */
3765 /* We now have the group relocation table entry corresponding to
3766 the name in the assembler source. Next, we parse the
3767 expression. */
3768 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3769 aarch64_force_reloc (entry->add_type) == 1))
3770 {
3771 set_syntax_error (_("invalid relocation expression"));
3772 return false;
3773 }
3774
3775 /* [Xn,#:<reloc_op>:<expr> */
3776 /* Record the load/store relocation type. */
3777 inst.reloc.type = entry->ldst_type;
3778 inst.reloc.pc_rel = entry->pc_rel;
3779 }
3780 else
3781 {
3782 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3783 NORMAL_RESOLUTION))
3784 {
3785 set_syntax_error (_("invalid expression in the address"));
3786 return false;
3787 }
3788 /* [Xn,<expr> */
3789 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3790 /* [Xn,<expr>,<shifter> */
3791 if (! parse_shift (&p, operand, imm_shift_mode))
3792 return false;
3793 }
3794 }
3795 }
3796
3797 if (! skip_past_char (&p, ']'))
3798 {
3799 set_syntax_error (_("']' expected"));
3800 return false;
3801 }
3802
3803 if (skip_past_char (&p, '!'))
3804 {
3805 if (operand->addr.preind && operand->addr.offset.is_reg)
3806 {
3807 set_syntax_error (_("register offset not allowed in pre-indexed "
3808 "addressing mode"));
3809 return false;
3810 }
3811 /* [Xn]! */
3812 operand->addr.writeback = 1;
3813 }
3814 else if (skip_past_comma (&p))
3815 {
3816 /* [Xn], */
3817 operand->addr.postind = 1;
3818 operand->addr.writeback = 1;
3819
3820 if (operand->addr.preind)
3821 {
3822 set_syntax_error (_("cannot combine pre- and post-indexing"));
3823 return false;
3824 }
3825
3826 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3827 if (reg)
3828 {
3829 /* [Xn],Xm */
3830 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3831 {
3832 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3833 return false;
3834 }
3835
3836 operand->addr.offset.regno = reg->number;
3837 operand->addr.offset.is_reg = 1;
3838 }
3839 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3840 NORMAL_RESOLUTION))
3841 {
3842 /* [Xn],#expr */
3843 set_syntax_error (_("invalid expression in the address"));
3844 return false;
3845 }
3846 }
3847
3848 /* If at this point neither .preind nor .postind is set, we have a
3849 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3850 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3851 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3852 [Zn.<T>, xzr]. */
3853 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3854 {
3855 if (operand->addr.writeback)
3856 {
3857 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3858 {
3859 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3860 operand->addr.offset.is_reg = 0;
3861 operand->addr.offset.imm = 0;
3862 operand->addr.preind = 1;
3863 }
3864 else
3865 {
3866 /* Reject [Rn]! */
3867 set_syntax_error (_("missing offset in the pre-indexed address"));
3868 return false;
3869 }
3870 }
3871 else
3872 {
3873 operand->addr.preind = 1;
3874 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3875 {
3876 operand->addr.offset.is_reg = 1;
3877 operand->addr.offset.regno = REG_ZR;
3878 *offset_qualifier = AARCH64_OPND_QLF_X;
3879 }
3880 else
3881 {
3882 inst.reloc.exp.X_op = O_constant;
3883 inst.reloc.exp.X_add_number = 0;
3884 }
3885 }
3886 }
3887
3888 *str = p;
3889 return true;
3890 }
3891
3892 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3893 on success. */
3894 static bool
3895 parse_address (char **str, aarch64_opnd_info *operand)
3896 {
3897 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3898 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3899 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3900 }
3901
3902 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3903 The arguments have the same meaning as for parse_address_main.
3904 Return TRUE on success. */
3905 static bool
3906 parse_sve_address (char **str, aarch64_opnd_info *operand,
3907 aarch64_opnd_qualifier_t *base_qualifier,
3908 aarch64_opnd_qualifier_t *offset_qualifier)
3909 {
3910 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3911 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3912 SHIFTED_MUL_VL);
3913 }
3914
3915 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3916 Return TRUE on success; otherwise return FALSE. */
3917 static bool
3918 parse_half (char **str, int *internal_fixup_p)
3919 {
3920 char *p = *str;
3921
3922 skip_past_char (&p, '#');
3923
3924 gas_assert (internal_fixup_p);
3925 *internal_fixup_p = 0;
3926
3927 if (*p == ':')
3928 {
3929 struct reloc_table_entry *entry;
3930
3931 /* Try to parse a relocation. Anything else is an error. */
3932 ++p;
3933
3934 if (!(entry = find_reloc_table_entry (&p)))
3935 {
3936 set_syntax_error (_("unknown relocation modifier"));
3937 return false;
3938 }
3939
3940 if (entry->movw_type == 0)
3941 {
3942 set_syntax_error
3943 (_("this relocation modifier is not allowed on this instruction"));
3944 return false;
3945 }
3946
3947 inst.reloc.type = entry->movw_type;
3948 }
3949 else
3950 *internal_fixup_p = 1;
3951
3952 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3953 aarch64_force_reloc (inst.reloc.type) == 1))
3954 return false;
3955
3956 *str = p;
3957 return true;
3958 }
3959
3960 /* Parse an operand for an ADRP instruction:
3961 ADRP <Xd>, <label>
3962 Return TRUE on success; otherwise return FALSE. */
3963
3964 static bool
3965 parse_adrp (char **str)
3966 {
3967 char *p;
3968
3969 p = *str;
3970 if (*p == ':')
3971 {
3972 struct reloc_table_entry *entry;
3973
3974 /* Try to parse a relocation. Anything else is an error. */
3975 ++p;
3976 if (!(entry = find_reloc_table_entry (&p)))
3977 {
3978 set_syntax_error (_("unknown relocation modifier"));
3979 return false;
3980 }
3981
3982 if (entry->adrp_type == 0)
3983 {
3984 set_syntax_error
3985 (_("this relocation modifier is not allowed on this instruction"));
3986 return false;
3987 }
3988
3989 inst.reloc.type = entry->adrp_type;
3990 }
3991 else
3992 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3993
3994 inst.reloc.pc_rel = 1;
3995 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3996 aarch64_force_reloc (inst.reloc.type) == 1))
3997 return false;
3998 *str = p;
3999 return true;
4000 }
4001
4002 /* Miscellaneous. */
4003
4004 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4005 of SIZE tokens in which index I gives the token for field value I,
4006 or is null if field value I is invalid. REG_TYPE says which register
4007 names should be treated as registers rather than as symbolic immediates.
4008
4009 Return true on success, moving *STR past the operand and storing the
4010 field value in *VAL. */
4011
4012 static int
4013 parse_enum_string (char **str, int64_t *val, const char *const *array,
4014 size_t size, aarch64_reg_type reg_type)
4015 {
4016 expressionS exp;
4017 char *p, *q;
4018 size_t i;
4019
4020 /* Match C-like tokens. */
4021 p = q = *str;
4022 while (ISALNUM (*q))
4023 q++;
4024
4025 for (i = 0; i < size; ++i)
4026 if (array[i]
4027 && strncasecmp (array[i], p, q - p) == 0
4028 && array[i][q - p] == 0)
4029 {
4030 *val = i;
4031 *str = q;
4032 return true;
4033 }
4034
4035 if (!parse_immediate_expression (&p, &exp, reg_type))
4036 return false;
4037
4038 if (exp.X_op == O_constant
4039 && (uint64_t) exp.X_add_number < size)
4040 {
4041 *val = exp.X_add_number;
4042 *str = p;
4043 return true;
4044 }
4045
4046 /* Use the default error for this operand. */
4047 return false;
4048 }
4049
4050 /* Parse an option for a preload instruction. Returns the encoding for the
4051 option, or PARSE_FAIL. */
4052
4053 static int
4054 parse_pldop (char **str)
4055 {
4056 char *p, *q;
4057 const struct aarch64_name_value_pair *o;
4058
4059 p = q = *str;
4060 while (ISALNUM (*q))
4061 q++;
4062
4063 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4064 if (!o)
4065 return PARSE_FAIL;
4066
4067 *str = q;
4068 return o->value;
4069 }
4070
4071 /* Parse an option for a barrier instruction. Returns the encoding for the
4072 option, or PARSE_FAIL. */
4073
4074 static int
4075 parse_barrier (char **str)
4076 {
4077 char *p, *q;
4078 const struct aarch64_name_value_pair *o;
4079
4080 p = q = *str;
4081 while (ISALPHA (*q))
4082 q++;
4083
4084 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4085 if (!o)
4086 return PARSE_FAIL;
4087
4088 *str = q;
4089 return o->value;
4090 }
4091
4092 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4093 return 0 if successful. Otherwise return PARSE_FAIL. */
4094
4095 static int
4096 parse_barrier_psb (char **str,
4097 const struct aarch64_name_value_pair ** hint_opt)
4098 {
4099 char *p, *q;
4100 const struct aarch64_name_value_pair *o;
4101
4102 p = q = *str;
4103 while (ISALPHA (*q))
4104 q++;
4105
4106 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4107 if (!o)
4108 {
4109 set_fatal_syntax_error
4110 ( _("unknown or missing option to PSB/TSB"));
4111 return PARSE_FAIL;
4112 }
4113
4114 if (o->value != 0x11)
4115 {
4116 /* PSB only accepts option name 'CSYNC'. */
4117 set_syntax_error
4118 (_("the specified option is not accepted for PSB/TSB"));
4119 return PARSE_FAIL;
4120 }
4121
4122 *str = q;
4123 *hint_opt = o;
4124 return 0;
4125 }
4126
4127 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4128 return 0 if successful. Otherwise return PARSE_FAIL. */
4129
4130 static int
4131 parse_bti_operand (char **str,
4132 const struct aarch64_name_value_pair ** hint_opt)
4133 {
4134 char *p, *q;
4135 const struct aarch64_name_value_pair *o;
4136
4137 p = q = *str;
4138 while (ISALPHA (*q))
4139 q++;
4140
4141 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4142 if (!o)
4143 {
4144 set_fatal_syntax_error
4145 ( _("unknown option to BTI"));
4146 return PARSE_FAIL;
4147 }
4148
4149 switch (o->value)
4150 {
4151 /* Valid BTI operands. */
4152 case HINT_OPD_C:
4153 case HINT_OPD_J:
4154 case HINT_OPD_JC:
4155 break;
4156
4157 default:
4158 set_syntax_error
4159 (_("unknown option to BTI"));
4160 return PARSE_FAIL;
4161 }
4162
4163 *str = q;
4164 *hint_opt = o;
4165 return 0;
4166 }
4167
4168 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4169 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4170 on failure. Format:
4171
4172 REG_TYPE.QUALIFIER
4173
4174 Side effect: Update STR with current parse position of success.
4175 */
4176
4177 static const reg_entry *
4178 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4179 aarch64_opnd_qualifier_t *qualifier)
4180 {
4181 char *q;
4182
4183 reg_entry *reg = parse_reg (str);
4184 if (reg != NULL && reg->type == reg_type)
4185 {
4186 if (!skip_past_char (str, '.'))
4187 {
4188 set_syntax_error (_("missing ZA tile element size separator"));
4189 return NULL;
4190 }
4191
4192 q = *str;
4193 switch (TOLOWER (*q))
4194 {
4195 case 'b':
4196 *qualifier = AARCH64_OPND_QLF_S_B;
4197 break;
4198 case 'h':
4199 *qualifier = AARCH64_OPND_QLF_S_H;
4200 break;
4201 case 's':
4202 *qualifier = AARCH64_OPND_QLF_S_S;
4203 break;
4204 case 'd':
4205 *qualifier = AARCH64_OPND_QLF_S_D;
4206 break;
4207 case 'q':
4208 *qualifier = AARCH64_OPND_QLF_S_Q;
4209 break;
4210 default:
4211 return NULL;
4212 }
4213 q++;
4214
4215 *str = q;
4216 return reg;
4217 }
4218
4219 return NULL;
4220 }
4221
4222 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4223 Function return tile QUALIFIER on success.
4224
4225 Tiles are in example format: za[0-9]\.[bhsd]
4226
4227 Function returns <ZAda> register number or PARSE_FAIL.
4228 */
4229 static int
4230 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4231 {
4232 int regno;
4233 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4234
4235 if (reg == NULL)
4236 return PARSE_FAIL;
4237 regno = reg->number;
4238
4239 switch (*qualifier)
4240 {
4241 case AARCH64_OPND_QLF_S_B:
4242 if (regno != 0x00)
4243 {
4244 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4245 return PARSE_FAIL;
4246 }
4247 break;
4248 case AARCH64_OPND_QLF_S_H:
4249 if (regno > 0x01)
4250 {
4251 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4252 return PARSE_FAIL;
4253 }
4254 break;
4255 case AARCH64_OPND_QLF_S_S:
4256 if (regno > 0x03)
4257 {
4258 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4259 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4260 return PARSE_FAIL;
4261 }
4262 break;
4263 case AARCH64_OPND_QLF_S_D:
4264 if (regno > 0x07)
4265 {
4266 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4267 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4268 return PARSE_FAIL;
4269 }
4270 break;
4271 default:
4272 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4273 return PARSE_FAIL;
4274 }
4275
4276 return regno;
4277 }
4278
4279 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4280 Returns the encoding for the option, or PARSE_FAIL.
4281
4282 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4283 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4284
4285 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4286 field, otherwise as a system register.
4287 */
4288
4289 static int
4290 parse_sys_reg (char **str, htab_t sys_regs,
4291 int imple_defined_p, int pstatefield_p,
4292 uint32_t* flags)
4293 {
4294 char *p, *q;
4295 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4296 const aarch64_sys_reg *o;
4297 int value;
4298
4299 p = buf;
4300 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4301 if (p < buf + (sizeof (buf) - 1))
4302 *p++ = TOLOWER (*q);
4303 *p = '\0';
4304
4305 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4306 valid system register. This is enforced by construction of the hash
4307 table. */
4308 if (p - buf != q - *str)
4309 return PARSE_FAIL;
4310
4311 o = str_hash_find (sys_regs, buf);
4312 if (!o)
4313 {
4314 if (!imple_defined_p)
4315 return PARSE_FAIL;
4316 else
4317 {
4318 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4319 unsigned int op0, op1, cn, cm, op2;
4320
4321 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4322 != 5)
4323 return PARSE_FAIL;
4324 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4325 return PARSE_FAIL;
4326 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4327 if (flags)
4328 *flags = 0;
4329 }
4330 }
4331 else
4332 {
4333 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4334 as_bad (_("selected processor does not support PSTATE field "
4335 "name '%s'"), buf);
4336 if (!pstatefield_p
4337 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4338 o->value, o->flags, o->features))
4339 as_bad (_("selected processor does not support system register "
4340 "name '%s'"), buf);
4341 if (aarch64_sys_reg_deprecated_p (o->flags))
4342 as_warn (_("system register name '%s' is deprecated and may be "
4343 "removed in a future release"), buf);
4344 value = o->value;
4345 if (flags)
4346 *flags = o->flags;
4347 }
4348
4349 *str = q;
4350 return value;
4351 }
4352
4353 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4354 for the option, or NULL. */
4355
4356 static const aarch64_sys_ins_reg *
4357 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4358 {
4359 char *p, *q;
4360 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4361 const aarch64_sys_ins_reg *o;
4362
4363 p = buf;
4364 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4365 if (p < buf + (sizeof (buf) - 1))
4366 *p++ = TOLOWER (*q);
4367 *p = '\0';
4368
4369 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4370 valid system register. This is enforced by construction of the hash
4371 table. */
4372 if (p - buf != q - *str)
4373 return NULL;
4374
4375 o = str_hash_find (sys_ins_regs, buf);
4376 if (!o)
4377 return NULL;
4378
4379 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4380 o->name, o->value, o->flags, 0))
4381 as_bad (_("selected processor does not support system register "
4382 "name '%s'"), buf);
4383 if (aarch64_sys_reg_deprecated_p (o->flags))
4384 as_warn (_("system register name '%s' is deprecated and may be "
4385 "removed in a future release"), buf);
4386
4387 *str = q;
4388 return o;
4389 }
4390 \f
4391 #define po_char_or_fail(chr) do { \
4392 if (! skip_past_char (&str, chr)) \
4393 goto failure; \
4394 } while (0)
4395
4396 #define po_reg_or_fail(regtype) do { \
4397 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4398 if (val == PARSE_FAIL) \
4399 { \
4400 set_default_error (); \
4401 goto failure; \
4402 } \
4403 } while (0)
4404
4405 #define po_int_reg_or_fail(reg_type) do { \
4406 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4407 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4408 { \
4409 set_default_error (); \
4410 goto failure; \
4411 } \
4412 info->reg.regno = reg->number; \
4413 info->qualifier = qualifier; \
4414 } while (0)
4415
4416 #define po_imm_nc_or_fail() do { \
4417 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4418 goto failure; \
4419 } while (0)
4420
4421 #define po_imm_or_fail(min, max) do { \
4422 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4423 goto failure; \
4424 if (val < min || val > max) \
4425 { \
4426 set_fatal_syntax_error (_("immediate value out of range "\
4427 #min " to "#max)); \
4428 goto failure; \
4429 } \
4430 } while (0)
4431
4432 #define po_enum_or_fail(array) do { \
4433 if (!parse_enum_string (&str, &val, array, \
4434 ARRAY_SIZE (array), imm_reg_type)) \
4435 goto failure; \
4436 } while (0)
4437
4438 #define po_misc_or_fail(expr) do { \
4439 if (!expr) \
4440 goto failure; \
4441 } while (0)
4442 \f
4443 /* encode the 12-bit imm field of Add/sub immediate */
4444 static inline uint32_t
4445 encode_addsub_imm (uint32_t imm)
4446 {
4447 return imm << 10;
4448 }
4449
4450 /* encode the shift amount field of Add/sub immediate */
4451 static inline uint32_t
4452 encode_addsub_imm_shift_amount (uint32_t cnt)
4453 {
4454 return cnt << 22;
4455 }
4456
4457
4458 /* encode the imm field of Adr instruction */
4459 static inline uint32_t
4460 encode_adr_imm (uint32_t imm)
4461 {
4462 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4463 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4464 }
4465
4466 /* encode the immediate field of Move wide immediate */
4467 static inline uint32_t
4468 encode_movw_imm (uint32_t imm)
4469 {
4470 return imm << 5;
4471 }
4472
4473 /* encode the 26-bit offset of unconditional branch */
4474 static inline uint32_t
4475 encode_branch_ofs_26 (uint32_t ofs)
4476 {
4477 return ofs & ((1 << 26) - 1);
4478 }
4479
4480 /* encode the 19-bit offset of conditional branch and compare & branch */
4481 static inline uint32_t
4482 encode_cond_branch_ofs_19 (uint32_t ofs)
4483 {
4484 return (ofs & ((1 << 19) - 1)) << 5;
4485 }
4486
4487 /* encode the 19-bit offset of ld literal */
4488 static inline uint32_t
4489 encode_ld_lit_ofs_19 (uint32_t ofs)
4490 {
4491 return (ofs & ((1 << 19) - 1)) << 5;
4492 }
4493
4494 /* Encode the 14-bit offset of test & branch. */
4495 static inline uint32_t
4496 encode_tst_branch_ofs_14 (uint32_t ofs)
4497 {
4498 return (ofs & ((1 << 14) - 1)) << 5;
4499 }
4500
4501 /* Encode the 16-bit imm field of svc/hvc/smc. */
4502 static inline uint32_t
4503 encode_svc_imm (uint32_t imm)
4504 {
4505 return imm << 5;
4506 }
4507
4508 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4509 static inline uint32_t
4510 reencode_addsub_switch_add_sub (uint32_t opcode)
4511 {
4512 return opcode ^ (1 << 30);
4513 }
4514
4515 static inline uint32_t
4516 reencode_movzn_to_movz (uint32_t opcode)
4517 {
4518 return opcode | (1 << 30);
4519 }
4520
4521 static inline uint32_t
4522 reencode_movzn_to_movn (uint32_t opcode)
4523 {
4524 return opcode & ~(1 << 30);
4525 }
4526
4527 /* Overall per-instruction processing. */
4528
4529 /* We need to be able to fix up arbitrary expressions in some statements.
4530 This is so that we can handle symbols that are an arbitrary distance from
4531 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4532 which returns part of an address in a form which will be valid for
4533 a data instruction. We do this by pushing the expression into a symbol
4534 in the expr_section, and creating a fix for that. */
4535
4536 static fixS *
4537 fix_new_aarch64 (fragS * frag,
4538 int where,
4539 short int size,
4540 expressionS * exp,
4541 int pc_rel,
4542 int reloc)
4543 {
4544 fixS *new_fix;
4545
4546 switch (exp->X_op)
4547 {
4548 case O_constant:
4549 case O_symbol:
4550 case O_add:
4551 case O_subtract:
4552 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4553 break;
4554
4555 default:
4556 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4557 pc_rel, reloc);
4558 break;
4559 }
4560 return new_fix;
4561 }
4562 \f
4563 /* Diagnostics on operands errors. */
4564
4565 /* By default, output verbose error message.
4566 Disable the verbose error message by -mno-verbose-error. */
4567 static int verbose_error_p = 1;
4568
4569 #ifdef DEBUG_AARCH64
4570 /* N.B. this is only for the purpose of debugging. */
4571 const char* operand_mismatch_kind_names[] =
4572 {
4573 "AARCH64_OPDE_NIL",
4574 "AARCH64_OPDE_RECOVERABLE",
4575 "AARCH64_OPDE_SYNTAX_ERROR",
4576 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4577 "AARCH64_OPDE_INVALID_VARIANT",
4578 "AARCH64_OPDE_OUT_OF_RANGE",
4579 "AARCH64_OPDE_UNALIGNED",
4580 "AARCH64_OPDE_REG_LIST",
4581 "AARCH64_OPDE_OTHER_ERROR",
4582 };
4583 #endif /* DEBUG_AARCH64 */
4584
4585 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4586
4587 When multiple errors of different kinds are found in the same assembly
4588 line, only the error of the highest severity will be picked up for
4589 issuing the diagnostics. */
4590
4591 static inline bool
4592 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4593 enum aarch64_operand_error_kind rhs)
4594 {
4595 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4596 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4597 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4598 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4599 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4600 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4601 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4602 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4603 return lhs > rhs;
4604 }
4605
4606 /* Helper routine to get the mnemonic name from the assembly instruction
4607 line; should only be called for the diagnosis purpose, as there is
4608 string copy operation involved, which may affect the runtime
4609 performance if used in elsewhere. */
4610
4611 static const char*
4612 get_mnemonic_name (const char *str)
4613 {
4614 static char mnemonic[32];
4615 char *ptr;
4616
4617 /* Get the first 15 bytes and assume that the full name is included. */
4618 strncpy (mnemonic, str, 31);
4619 mnemonic[31] = '\0';
4620
4621 /* Scan up to the end of the mnemonic, which must end in white space,
4622 '.', or end of string. */
4623 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4624 ;
4625
4626 *ptr = '\0';
4627
4628 /* Append '...' to the truncated long name. */
4629 if (ptr - mnemonic == 31)
4630 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4631
4632 return mnemonic;
4633 }
4634
4635 static void
4636 reset_aarch64_instruction (aarch64_instruction *instruction)
4637 {
4638 memset (instruction, '\0', sizeof (aarch64_instruction));
4639 instruction->reloc.type = BFD_RELOC_UNUSED;
4640 }
4641
4642 /* Data structures storing one user error in the assembly code related to
4643 operands. */
4644
4645 struct operand_error_record
4646 {
4647 const aarch64_opcode *opcode;
4648 aarch64_operand_error detail;
4649 struct operand_error_record *next;
4650 };
4651
4652 typedef struct operand_error_record operand_error_record;
4653
4654 struct operand_errors
4655 {
4656 operand_error_record *head;
4657 operand_error_record *tail;
4658 };
4659
4660 typedef struct operand_errors operand_errors;
4661
4662 /* Top-level data structure reporting user errors for the current line of
4663 the assembly code.
4664 The way md_assemble works is that all opcodes sharing the same mnemonic
4665 name are iterated to find a match to the assembly line. In this data
4666 structure, each of the such opcodes will have one operand_error_record
4667 allocated and inserted. In other words, excessive errors related with
4668 a single opcode are disregarded. */
4669 operand_errors operand_error_report;
4670
4671 /* Free record nodes. */
4672 static operand_error_record *free_opnd_error_record_nodes = NULL;
4673
4674 /* Initialize the data structure that stores the operand mismatch
4675 information on assembling one line of the assembly code. */
4676 static void
4677 init_operand_error_report (void)
4678 {
4679 if (operand_error_report.head != NULL)
4680 {
4681 gas_assert (operand_error_report.tail != NULL);
4682 operand_error_report.tail->next = free_opnd_error_record_nodes;
4683 free_opnd_error_record_nodes = operand_error_report.head;
4684 operand_error_report.head = NULL;
4685 operand_error_report.tail = NULL;
4686 return;
4687 }
4688 gas_assert (operand_error_report.tail == NULL);
4689 }
4690
4691 /* Return TRUE if some operand error has been recorded during the
4692 parsing of the current assembly line using the opcode *OPCODE;
4693 otherwise return FALSE. */
4694 static inline bool
4695 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4696 {
4697 operand_error_record *record = operand_error_report.head;
4698 return record && record->opcode == opcode;
4699 }
4700
4701 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4702 OPCODE field is initialized with OPCODE.
4703 N.B. only one record for each opcode, i.e. the maximum of one error is
4704 recorded for each instruction template. */
4705
4706 static void
4707 add_operand_error_record (const operand_error_record* new_record)
4708 {
4709 const aarch64_opcode *opcode = new_record->opcode;
4710 operand_error_record* record = operand_error_report.head;
4711
4712 /* The record may have been created for this opcode. If not, we need
4713 to prepare one. */
4714 if (! opcode_has_operand_error_p (opcode))
4715 {
4716 /* Get one empty record. */
4717 if (free_opnd_error_record_nodes == NULL)
4718 {
4719 record = XNEW (operand_error_record);
4720 }
4721 else
4722 {
4723 record = free_opnd_error_record_nodes;
4724 free_opnd_error_record_nodes = record->next;
4725 }
4726 record->opcode = opcode;
4727 /* Insert at the head. */
4728 record->next = operand_error_report.head;
4729 operand_error_report.head = record;
4730 if (operand_error_report.tail == NULL)
4731 operand_error_report.tail = record;
4732 }
4733 else if (record->detail.kind != AARCH64_OPDE_NIL
4734 && record->detail.index <= new_record->detail.index
4735 && operand_error_higher_severity_p (record->detail.kind,
4736 new_record->detail.kind))
4737 {
4738 /* In the case of multiple errors found on operands related with a
4739 single opcode, only record the error of the leftmost operand and
4740 only if the error is of higher severity. */
4741 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4742 " the existing error %s on operand %d",
4743 operand_mismatch_kind_names[new_record->detail.kind],
4744 new_record->detail.index,
4745 operand_mismatch_kind_names[record->detail.kind],
4746 record->detail.index);
4747 return;
4748 }
4749
4750 record->detail = new_record->detail;
4751 }
4752
4753 static inline void
4754 record_operand_error_info (const aarch64_opcode *opcode,
4755 aarch64_operand_error *error_info)
4756 {
4757 operand_error_record record;
4758 record.opcode = opcode;
4759 record.detail = *error_info;
4760 add_operand_error_record (&record);
4761 }
4762
4763 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4764 error message *ERROR, for operand IDX (count from 0). */
4765
4766 static void
4767 record_operand_error (const aarch64_opcode *opcode, int idx,
4768 enum aarch64_operand_error_kind kind,
4769 const char* error)
4770 {
4771 aarch64_operand_error info;
4772 memset(&info, 0, sizeof (info));
4773 info.index = idx;
4774 info.kind = kind;
4775 info.error = error;
4776 info.non_fatal = false;
4777 record_operand_error_info (opcode, &info);
4778 }
4779
4780 static void
4781 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4782 enum aarch64_operand_error_kind kind,
4783 const char* error, const int *extra_data)
4784 {
4785 aarch64_operand_error info;
4786 info.index = idx;
4787 info.kind = kind;
4788 info.error = error;
4789 info.data[0] = extra_data[0];
4790 info.data[1] = extra_data[1];
4791 info.data[2] = extra_data[2];
4792 info.non_fatal = false;
4793 record_operand_error_info (opcode, &info);
4794 }
4795
4796 static void
4797 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4798 const char* error, int lower_bound,
4799 int upper_bound)
4800 {
4801 int data[3] = {lower_bound, upper_bound, 0};
4802 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4803 error, data);
4804 }
4805
4806 /* Remove the operand error record for *OPCODE. */
4807 static void ATTRIBUTE_UNUSED
4808 remove_operand_error_record (const aarch64_opcode *opcode)
4809 {
4810 if (opcode_has_operand_error_p (opcode))
4811 {
4812 operand_error_record* record = operand_error_report.head;
4813 gas_assert (record != NULL && operand_error_report.tail != NULL);
4814 operand_error_report.head = record->next;
4815 record->next = free_opnd_error_record_nodes;
4816 free_opnd_error_record_nodes = record;
4817 if (operand_error_report.head == NULL)
4818 {
4819 gas_assert (operand_error_report.tail == record);
4820 operand_error_report.tail = NULL;
4821 }
4822 }
4823 }
4824
4825 /* Given the instruction in *INSTR, return the index of the best matched
4826 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4827
4828 Return -1 if there is no qualifier sequence; return the first match
4829 if there is multiple matches found. */
4830
4831 static int
4832 find_best_match (const aarch64_inst *instr,
4833 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4834 {
4835 int i, num_opnds, max_num_matched, idx;
4836
4837 num_opnds = aarch64_num_of_operands (instr->opcode);
4838 if (num_opnds == 0)
4839 {
4840 DEBUG_TRACE ("no operand");
4841 return -1;
4842 }
4843
4844 max_num_matched = 0;
4845 idx = 0;
4846
4847 /* For each pattern. */
4848 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4849 {
4850 int j, num_matched;
4851 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4852
4853 /* Most opcodes has much fewer patterns in the list. */
4854 if (empty_qualifier_sequence_p (qualifiers))
4855 {
4856 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4857 break;
4858 }
4859
4860 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4861 if (*qualifiers == instr->operands[j].qualifier)
4862 ++num_matched;
4863
4864 if (num_matched > max_num_matched)
4865 {
4866 max_num_matched = num_matched;
4867 idx = i;
4868 }
4869 }
4870
4871 DEBUG_TRACE ("return with %d", idx);
4872 return idx;
4873 }
4874
4875 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4876 corresponding operands in *INSTR. */
4877
4878 static inline void
4879 assign_qualifier_sequence (aarch64_inst *instr,
4880 const aarch64_opnd_qualifier_t *qualifiers)
4881 {
4882 int i = 0;
4883 int num_opnds = aarch64_num_of_operands (instr->opcode);
4884 gas_assert (num_opnds);
4885 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4886 instr->operands[i].qualifier = *qualifiers;
4887 }
4888
4889 /* Print operands for the diagnosis purpose. */
4890
4891 static void
4892 print_operands (char *buf, const aarch64_opcode *opcode,
4893 const aarch64_opnd_info *opnds)
4894 {
4895 int i;
4896
4897 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4898 {
4899 char str[128];
4900
4901 /* We regard the opcode operand info more, however we also look into
4902 the inst->operands to support the disassembling of the optional
4903 operand.
4904 The two operand code should be the same in all cases, apart from
4905 when the operand can be optional. */
4906 if (opcode->operands[i] == AARCH64_OPND_NIL
4907 || opnds[i].type == AARCH64_OPND_NIL)
4908 break;
4909
4910 /* Generate the operand string in STR. */
4911 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4912 NULL, cpu_variant);
4913
4914 /* Delimiter. */
4915 if (str[0] != '\0')
4916 strcat (buf, i == 0 ? " " : ", ");
4917
4918 /* Append the operand string. */
4919 strcat (buf, str);
4920 }
4921 }
4922
4923 /* Send to stderr a string as information. */
4924
4925 static void
4926 output_info (const char *format, ...)
4927 {
4928 const char *file;
4929 unsigned int line;
4930 va_list args;
4931
4932 file = as_where (&line);
4933 if (file)
4934 {
4935 if (line != 0)
4936 fprintf (stderr, "%s:%u: ", file, line);
4937 else
4938 fprintf (stderr, "%s: ", file);
4939 }
4940 fprintf (stderr, _("Info: "));
4941 va_start (args, format);
4942 vfprintf (stderr, format, args);
4943 va_end (args);
4944 (void) putc ('\n', stderr);
4945 }
4946
4947 /* Output one operand error record. */
4948
4949 static void
4950 output_operand_error_record (const operand_error_record *record, char *str)
4951 {
4952 const aarch64_operand_error *detail = &record->detail;
4953 int idx = detail->index;
4954 const aarch64_opcode *opcode = record->opcode;
4955 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4956 : AARCH64_OPND_NIL);
4957
4958 typedef void (*handler_t)(const char *format, ...);
4959 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4960
4961 switch (detail->kind)
4962 {
4963 case AARCH64_OPDE_NIL:
4964 gas_assert (0);
4965 break;
4966 case AARCH64_OPDE_SYNTAX_ERROR:
4967 case AARCH64_OPDE_RECOVERABLE:
4968 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4969 case AARCH64_OPDE_OTHER_ERROR:
4970 /* Use the prepared error message if there is, otherwise use the
4971 operand description string to describe the error. */
4972 if (detail->error != NULL)
4973 {
4974 if (idx < 0)
4975 handler (_("%s -- `%s'"), detail->error, str);
4976 else
4977 handler (_("%s at operand %d -- `%s'"),
4978 detail->error, idx + 1, str);
4979 }
4980 else
4981 {
4982 gas_assert (idx >= 0);
4983 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4984 aarch64_get_operand_desc (opd_code), str);
4985 }
4986 break;
4987
4988 case AARCH64_OPDE_INVALID_VARIANT:
4989 handler (_("operand mismatch -- `%s'"), str);
4990 if (verbose_error_p)
4991 {
4992 /* We will try to correct the erroneous instruction and also provide
4993 more information e.g. all other valid variants.
4994
4995 The string representation of the corrected instruction and other
4996 valid variants are generated by
4997
4998 1) obtaining the intermediate representation of the erroneous
4999 instruction;
5000 2) manipulating the IR, e.g. replacing the operand qualifier;
5001 3) printing out the instruction by calling the printer functions
5002 shared with the disassembler.
5003
5004 The limitation of this method is that the exact input assembly
5005 line cannot be accurately reproduced in some cases, for example an
5006 optional operand present in the actual assembly line will be
5007 omitted in the output; likewise for the optional syntax rules,
5008 e.g. the # before the immediate. Another limitation is that the
5009 assembly symbols and relocation operations in the assembly line
5010 currently cannot be printed out in the error report. Last but not
5011 least, when there is other error(s) co-exist with this error, the
5012 'corrected' instruction may be still incorrect, e.g. given
5013 'ldnp h0,h1,[x0,#6]!'
5014 this diagnosis will provide the version:
5015 'ldnp s0,s1,[x0,#6]!'
5016 which is still not right. */
5017 size_t len = strlen (get_mnemonic_name (str));
5018 int i, qlf_idx;
5019 bool result;
5020 char buf[2048];
5021 aarch64_inst *inst_base = &inst.base;
5022 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5023
5024 /* Init inst. */
5025 reset_aarch64_instruction (&inst);
5026 inst_base->opcode = opcode;
5027
5028 /* Reset the error report so that there is no side effect on the
5029 following operand parsing. */
5030 init_operand_error_report ();
5031
5032 /* Fill inst. */
5033 result = parse_operands (str + len, opcode)
5034 && programmer_friendly_fixup (&inst);
5035 gas_assert (result);
5036 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5037 NULL, NULL, insn_sequence);
5038 gas_assert (!result);
5039
5040 /* Find the most matched qualifier sequence. */
5041 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5042 gas_assert (qlf_idx > -1);
5043
5044 /* Assign the qualifiers. */
5045 assign_qualifier_sequence (inst_base,
5046 opcode->qualifiers_list[qlf_idx]);
5047
5048 /* Print the hint. */
5049 output_info (_(" did you mean this?"));
5050 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5051 print_operands (buf, opcode, inst_base->operands);
5052 output_info (_(" %s"), buf);
5053
5054 /* Print out other variant(s) if there is any. */
5055 if (qlf_idx != 0 ||
5056 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5057 output_info (_(" other valid variant(s):"));
5058
5059 /* For each pattern. */
5060 qualifiers_list = opcode->qualifiers_list;
5061 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5062 {
5063 /* Most opcodes has much fewer patterns in the list.
5064 First NIL qualifier indicates the end in the list. */
5065 if (empty_qualifier_sequence_p (*qualifiers_list))
5066 break;
5067
5068 if (i != qlf_idx)
5069 {
5070 /* Mnemonics name. */
5071 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5072
5073 /* Assign the qualifiers. */
5074 assign_qualifier_sequence (inst_base, *qualifiers_list);
5075
5076 /* Print instruction. */
5077 print_operands (buf, opcode, inst_base->operands);
5078
5079 output_info (_(" %s"), buf);
5080 }
5081 }
5082 }
5083 break;
5084
5085 case AARCH64_OPDE_UNTIED_OPERAND:
5086 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5087 detail->index + 1, str);
5088 break;
5089
5090 case AARCH64_OPDE_OUT_OF_RANGE:
5091 if (detail->data[0] != detail->data[1])
5092 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5093 detail->error ? detail->error : _("immediate value"),
5094 detail->data[0], detail->data[1], idx + 1, str);
5095 else
5096 handler (_("%s must be %d at operand %d -- `%s'"),
5097 detail->error ? detail->error : _("immediate value"),
5098 detail->data[0], idx + 1, str);
5099 break;
5100
5101 case AARCH64_OPDE_REG_LIST:
5102 if (detail->data[0] == 1)
5103 handler (_("invalid number of registers in the list; "
5104 "only 1 register is expected at operand %d -- `%s'"),
5105 idx + 1, str);
5106 else
5107 handler (_("invalid number of registers in the list; "
5108 "%d registers are expected at operand %d -- `%s'"),
5109 detail->data[0], idx + 1, str);
5110 break;
5111
5112 case AARCH64_OPDE_UNALIGNED:
5113 handler (_("immediate value must be a multiple of "
5114 "%d at operand %d -- `%s'"),
5115 detail->data[0], idx + 1, str);
5116 break;
5117
5118 default:
5119 gas_assert (0);
5120 break;
5121 }
5122 }
5123
5124 /* Process and output the error message about the operand mismatching.
5125
5126 When this function is called, the operand error information had
5127 been collected for an assembly line and there will be multiple
5128 errors in the case of multiple instruction templates; output the
5129 error message that most closely describes the problem.
5130
5131 The errors to be printed can be filtered on printing all errors
5132 or only non-fatal errors. This distinction has to be made because
5133 the error buffer may already be filled with fatal errors we don't want to
5134 print due to the different instruction templates. */
5135
5136 static void
5137 output_operand_error_report (char *str, bool non_fatal_only)
5138 {
5139 int largest_error_pos;
5140 const char *msg = NULL;
5141 enum aarch64_operand_error_kind kind;
5142 operand_error_record *curr;
5143 operand_error_record *head = operand_error_report.head;
5144 operand_error_record *record = NULL;
5145
5146 /* No error to report. */
5147 if (head == NULL)
5148 return;
5149
5150 gas_assert (head != NULL && operand_error_report.tail != NULL);
5151
5152 /* Only one error. */
5153 if (head == operand_error_report.tail)
5154 {
5155 /* If the only error is a non-fatal one and we don't want to print it,
5156 just exit. */
5157 if (!non_fatal_only || head->detail.non_fatal)
5158 {
5159 DEBUG_TRACE ("single opcode entry with error kind: %s",
5160 operand_mismatch_kind_names[head->detail.kind]);
5161 output_operand_error_record (head, str);
5162 }
5163 return;
5164 }
5165
5166 /* Find the error kind of the highest severity. */
5167 DEBUG_TRACE ("multiple opcode entries with error kind");
5168 kind = AARCH64_OPDE_NIL;
5169 for (curr = head; curr != NULL; curr = curr->next)
5170 {
5171 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5172 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5173 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5174 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5175 kind = curr->detail.kind;
5176 }
5177
5178 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5179
5180 /* Pick up one of errors of KIND to report. */
5181 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5182 for (curr = head; curr != NULL; curr = curr->next)
5183 {
5184 /* If we don't want to print non-fatal errors then don't consider them
5185 at all. */
5186 if (curr->detail.kind != kind
5187 || (non_fatal_only && !curr->detail.non_fatal))
5188 continue;
5189 /* If there are multiple errors, pick up the one with the highest
5190 mismatching operand index. In the case of multiple errors with
5191 the equally highest operand index, pick up the first one or the
5192 first one with non-NULL error message. */
5193 if (curr->detail.index > largest_error_pos
5194 || (curr->detail.index == largest_error_pos && msg == NULL
5195 && curr->detail.error != NULL))
5196 {
5197 largest_error_pos = curr->detail.index;
5198 record = curr;
5199 msg = record->detail.error;
5200 }
5201 }
5202
5203 /* The way errors are collected in the back-end is a bit non-intuitive. But
5204 essentially, because each operand template is tried recursively you may
5205 always have errors collected from the previous tried OPND. These are
5206 usually skipped if there is one successful match. However now with the
5207 non-fatal errors we have to ignore those previously collected hard errors
5208 when we're only interested in printing the non-fatal ones. This condition
5209 prevents us from printing errors that are not appropriate, since we did
5210 match a condition, but it also has warnings that it wants to print. */
5211 if (non_fatal_only && !record)
5212 return;
5213
5214 gas_assert (largest_error_pos != -2 && record != NULL);
5215 DEBUG_TRACE ("Pick up error kind %s to report",
5216 operand_mismatch_kind_names[record->detail.kind]);
5217
5218 /* Output. */
5219 output_operand_error_record (record, str);
5220 }
5221 \f
5222 /* Write an AARCH64 instruction to buf - always little-endian. */
5223 static void
5224 put_aarch64_insn (char *buf, uint32_t insn)
5225 {
5226 unsigned char *where = (unsigned char *) buf;
5227 where[0] = insn;
5228 where[1] = insn >> 8;
5229 where[2] = insn >> 16;
5230 where[3] = insn >> 24;
5231 }
5232
5233 static uint32_t
5234 get_aarch64_insn (char *buf)
5235 {
5236 unsigned char *where = (unsigned char *) buf;
5237 uint32_t result;
5238 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5239 | ((uint32_t) where[3] << 24)));
5240 return result;
5241 }
5242
5243 static void
5244 output_inst (struct aarch64_inst *new_inst)
5245 {
5246 char *to = NULL;
5247
5248 to = frag_more (INSN_SIZE);
5249
5250 frag_now->tc_frag_data.recorded = 1;
5251
5252 put_aarch64_insn (to, inst.base.value);
5253
5254 if (inst.reloc.type != BFD_RELOC_UNUSED)
5255 {
5256 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5257 INSN_SIZE, &inst.reloc.exp,
5258 inst.reloc.pc_rel,
5259 inst.reloc.type);
5260 DEBUG_TRACE ("Prepared relocation fix up");
5261 /* Don't check the addend value against the instruction size,
5262 that's the job of our code in md_apply_fix(). */
5263 fixp->fx_no_overflow = 1;
5264 if (new_inst != NULL)
5265 fixp->tc_fix_data.inst = new_inst;
5266 if (aarch64_gas_internal_fixup_p ())
5267 {
5268 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5269 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5270 fixp->fx_addnumber = inst.reloc.flags;
5271 }
5272 }
5273
5274 dwarf2_emit_insn (INSN_SIZE);
5275 }
5276
5277 /* Link together opcodes of the same name. */
5278
5279 struct templates
5280 {
5281 const aarch64_opcode *opcode;
5282 struct templates *next;
5283 };
5284
5285 typedef struct templates templates;
5286
5287 static templates *
5288 lookup_mnemonic (const char *start, int len)
5289 {
5290 templates *templ = NULL;
5291
5292 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5293 return templ;
5294 }
5295
5296 /* Subroutine of md_assemble, responsible for looking up the primary
5297 opcode from the mnemonic the user wrote. STR points to the
5298 beginning of the mnemonic. */
5299
5300 static templates *
5301 opcode_lookup (char **str)
5302 {
5303 char *end, *base, *dot;
5304 const aarch64_cond *cond;
5305 char condname[16];
5306 int len;
5307
5308 /* Scan up to the end of the mnemonic, which must end in white space,
5309 '.', or end of string. */
5310 dot = 0;
5311 for (base = end = *str; is_part_of_name(*end); end++)
5312 if (*end == '.' && !dot)
5313 dot = end;
5314
5315 if (end == base || dot == base)
5316 return 0;
5317
5318 inst.cond = COND_ALWAYS;
5319
5320 /* Handle a possible condition. */
5321 if (dot)
5322 {
5323 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5324 if (cond)
5325 {
5326 inst.cond = cond->value;
5327 *str = end;
5328 }
5329 else
5330 {
5331 *str = dot;
5332 return 0;
5333 }
5334 len = dot - base;
5335 }
5336 else
5337 {
5338 *str = end;
5339 len = end - base;
5340 }
5341
5342 if (inst.cond == COND_ALWAYS)
5343 {
5344 /* Look for unaffixed mnemonic. */
5345 return lookup_mnemonic (base, len);
5346 }
5347 else if (len <= 13)
5348 {
5349 /* append ".c" to mnemonic if conditional */
5350 memcpy (condname, base, len);
5351 memcpy (condname + len, ".c", 2);
5352 base = condname;
5353 len += 2;
5354 return lookup_mnemonic (base, len);
5355 }
5356
5357 return NULL;
5358 }
5359
5360 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5361 to a corresponding operand qualifier. */
5362
5363 static inline aarch64_opnd_qualifier_t
5364 vectype_to_qualifier (const struct vector_type_el *vectype)
5365 {
5366 /* Element size in bytes indexed by vector_el_type. */
5367 const unsigned char ele_size[5]
5368 = {1, 2, 4, 8, 16};
5369 const unsigned int ele_base [5] =
5370 {
5371 AARCH64_OPND_QLF_V_4B,
5372 AARCH64_OPND_QLF_V_2H,
5373 AARCH64_OPND_QLF_V_2S,
5374 AARCH64_OPND_QLF_V_1D,
5375 AARCH64_OPND_QLF_V_1Q
5376 };
5377
5378 if (!vectype->defined || vectype->type == NT_invtype)
5379 goto vectype_conversion_fail;
5380
5381 if (vectype->type == NT_zero)
5382 return AARCH64_OPND_QLF_P_Z;
5383 if (vectype->type == NT_merge)
5384 return AARCH64_OPND_QLF_P_M;
5385
5386 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5387
5388 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5389 {
5390 /* Special case S_4B. */
5391 if (vectype->type == NT_b && vectype->width == 4)
5392 return AARCH64_OPND_QLF_S_4B;
5393
5394 /* Special case S_2H. */
5395 if (vectype->type == NT_h && vectype->width == 2)
5396 return AARCH64_OPND_QLF_S_2H;
5397
5398 /* Vector element register. */
5399 return AARCH64_OPND_QLF_S_B + vectype->type;
5400 }
5401 else
5402 {
5403 /* Vector register. */
5404 int reg_size = ele_size[vectype->type] * vectype->width;
5405 unsigned offset;
5406 unsigned shift;
5407 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5408 goto vectype_conversion_fail;
5409
5410 /* The conversion is by calculating the offset from the base operand
5411 qualifier for the vector type. The operand qualifiers are regular
5412 enough that the offset can established by shifting the vector width by
5413 a vector-type dependent amount. */
5414 shift = 0;
5415 if (vectype->type == NT_b)
5416 shift = 3;
5417 else if (vectype->type == NT_h || vectype->type == NT_s)
5418 shift = 2;
5419 else if (vectype->type >= NT_d)
5420 shift = 1;
5421 else
5422 gas_assert (0);
5423
5424 offset = ele_base [vectype->type] + (vectype->width >> shift);
5425 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5426 && offset <= AARCH64_OPND_QLF_V_1Q);
5427 return offset;
5428 }
5429
5430 vectype_conversion_fail:
5431 first_error (_("bad vector arrangement type"));
5432 return AARCH64_OPND_QLF_NIL;
5433 }
5434
5435 /* Process an optional operand that is found omitted from the assembly line.
5436 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5437 instruction's opcode entry while IDX is the index of this omitted operand.
5438 */
5439
5440 static void
5441 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5442 int idx, aarch64_opnd_info *operand)
5443 {
5444 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5445 gas_assert (optional_operand_p (opcode, idx));
5446 gas_assert (!operand->present);
5447
5448 switch (type)
5449 {
5450 case AARCH64_OPND_Rd:
5451 case AARCH64_OPND_Rn:
5452 case AARCH64_OPND_Rm:
5453 case AARCH64_OPND_Rt:
5454 case AARCH64_OPND_Rt2:
5455 case AARCH64_OPND_Rt_LS64:
5456 case AARCH64_OPND_Rt_SP:
5457 case AARCH64_OPND_Rs:
5458 case AARCH64_OPND_Ra:
5459 case AARCH64_OPND_Rt_SYS:
5460 case AARCH64_OPND_Rd_SP:
5461 case AARCH64_OPND_Rn_SP:
5462 case AARCH64_OPND_Rm_SP:
5463 case AARCH64_OPND_Fd:
5464 case AARCH64_OPND_Fn:
5465 case AARCH64_OPND_Fm:
5466 case AARCH64_OPND_Fa:
5467 case AARCH64_OPND_Ft:
5468 case AARCH64_OPND_Ft2:
5469 case AARCH64_OPND_Sd:
5470 case AARCH64_OPND_Sn:
5471 case AARCH64_OPND_Sm:
5472 case AARCH64_OPND_Va:
5473 case AARCH64_OPND_Vd:
5474 case AARCH64_OPND_Vn:
5475 case AARCH64_OPND_Vm:
5476 case AARCH64_OPND_VdD1:
5477 case AARCH64_OPND_VnD1:
5478 operand->reg.regno = default_value;
5479 break;
5480
5481 case AARCH64_OPND_Ed:
5482 case AARCH64_OPND_En:
5483 case AARCH64_OPND_Em:
5484 case AARCH64_OPND_Em16:
5485 case AARCH64_OPND_SM3_IMM2:
5486 operand->reglane.regno = default_value;
5487 break;
5488
5489 case AARCH64_OPND_IDX:
5490 case AARCH64_OPND_BIT_NUM:
5491 case AARCH64_OPND_IMMR:
5492 case AARCH64_OPND_IMMS:
5493 case AARCH64_OPND_SHLL_IMM:
5494 case AARCH64_OPND_IMM_VLSL:
5495 case AARCH64_OPND_IMM_VLSR:
5496 case AARCH64_OPND_CCMP_IMM:
5497 case AARCH64_OPND_FBITS:
5498 case AARCH64_OPND_UIMM4:
5499 case AARCH64_OPND_UIMM3_OP1:
5500 case AARCH64_OPND_UIMM3_OP2:
5501 case AARCH64_OPND_IMM:
5502 case AARCH64_OPND_IMM_2:
5503 case AARCH64_OPND_WIDTH:
5504 case AARCH64_OPND_UIMM7:
5505 case AARCH64_OPND_NZCV:
5506 case AARCH64_OPND_SVE_PATTERN:
5507 case AARCH64_OPND_SVE_PRFOP:
5508 operand->imm.value = default_value;
5509 break;
5510
5511 case AARCH64_OPND_SVE_PATTERN_SCALED:
5512 operand->imm.value = default_value;
5513 operand->shifter.kind = AARCH64_MOD_MUL;
5514 operand->shifter.amount = 1;
5515 break;
5516
5517 case AARCH64_OPND_EXCEPTION:
5518 inst.reloc.type = BFD_RELOC_UNUSED;
5519 break;
5520
5521 case AARCH64_OPND_BARRIER_ISB:
5522 operand->barrier = aarch64_barrier_options + default_value;
5523 break;
5524
5525 case AARCH64_OPND_BTI_TARGET:
5526 operand->hint_option = aarch64_hint_options + default_value;
5527 break;
5528
5529 default:
5530 break;
5531 }
5532 }
5533
5534 /* Process the relocation type for move wide instructions.
5535 Return TRUE on success; otherwise return FALSE. */
5536
5537 static bool
5538 process_movw_reloc_info (void)
5539 {
5540 int is32;
5541 unsigned shift;
5542
5543 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5544
5545 if (inst.base.opcode->op == OP_MOVK)
5546 switch (inst.reloc.type)
5547 {
5548 case BFD_RELOC_AARCH64_MOVW_G0_S:
5549 case BFD_RELOC_AARCH64_MOVW_G1_S:
5550 case BFD_RELOC_AARCH64_MOVW_G2_S:
5551 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5552 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5553 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5554 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5555 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5556 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5557 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5558 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5559 set_syntax_error
5560 (_("the specified relocation type is not allowed for MOVK"));
5561 return false;
5562 default:
5563 break;
5564 }
5565
5566 switch (inst.reloc.type)
5567 {
5568 case BFD_RELOC_AARCH64_MOVW_G0:
5569 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5570 case BFD_RELOC_AARCH64_MOVW_G0_S:
5571 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5572 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5573 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5574 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5575 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5576 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5577 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5578 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5579 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5580 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5581 shift = 0;
5582 break;
5583 case BFD_RELOC_AARCH64_MOVW_G1:
5584 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5585 case BFD_RELOC_AARCH64_MOVW_G1_S:
5586 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5587 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5588 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5589 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5590 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5591 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5592 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5593 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5594 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5595 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5596 shift = 16;
5597 break;
5598 case BFD_RELOC_AARCH64_MOVW_G2:
5599 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5600 case BFD_RELOC_AARCH64_MOVW_G2_S:
5601 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5602 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5603 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5604 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5605 if (is32)
5606 {
5607 set_fatal_syntax_error
5608 (_("the specified relocation type is not allowed for 32-bit "
5609 "register"));
5610 return false;
5611 }
5612 shift = 32;
5613 break;
5614 case BFD_RELOC_AARCH64_MOVW_G3:
5615 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5616 if (is32)
5617 {
5618 set_fatal_syntax_error
5619 (_("the specified relocation type is not allowed for 32-bit "
5620 "register"));
5621 return false;
5622 }
5623 shift = 48;
5624 break;
5625 default:
5626 /* More cases should be added when more MOVW-related relocation types
5627 are supported in GAS. */
5628 gas_assert (aarch64_gas_internal_fixup_p ());
5629 /* The shift amount should have already been set by the parser. */
5630 return true;
5631 }
5632 inst.base.operands[1].shifter.amount = shift;
5633 return true;
5634 }
5635
5636 /* A primitive log calculator. */
5637
5638 static inline unsigned int
5639 get_logsz (unsigned int size)
5640 {
5641 const unsigned char ls[16] =
5642 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5643 if (size > 16)
5644 {
5645 gas_assert (0);
5646 return -1;
5647 }
5648 gas_assert (ls[size - 1] != (unsigned char)-1);
5649 return ls[size - 1];
5650 }
5651
5652 /* Determine and return the real reloc type code for an instruction
5653 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5654
5655 static inline bfd_reloc_code_real_type
5656 ldst_lo12_determine_real_reloc_type (void)
5657 {
5658 unsigned logsz, max_logsz;
5659 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5660 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5661
5662 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5663 {
5664 BFD_RELOC_AARCH64_LDST8_LO12,
5665 BFD_RELOC_AARCH64_LDST16_LO12,
5666 BFD_RELOC_AARCH64_LDST32_LO12,
5667 BFD_RELOC_AARCH64_LDST64_LO12,
5668 BFD_RELOC_AARCH64_LDST128_LO12
5669 },
5670 {
5671 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5672 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5673 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5674 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5675 BFD_RELOC_AARCH64_NONE
5676 },
5677 {
5678 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5679 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5680 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5681 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5682 BFD_RELOC_AARCH64_NONE
5683 },
5684 {
5685 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5686 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5687 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5688 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5689 BFD_RELOC_AARCH64_NONE
5690 },
5691 {
5692 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5693 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5694 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5695 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5696 BFD_RELOC_AARCH64_NONE
5697 }
5698 };
5699
5700 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5701 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5702 || (inst.reloc.type
5703 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5704 || (inst.reloc.type
5705 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5706 || (inst.reloc.type
5707 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5708 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5709
5710 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5711 opd1_qlf =
5712 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5713 1, opd0_qlf, 0);
5714 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5715
5716 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5717
5718 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5719 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5720 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5721 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5722 max_logsz = 3;
5723 else
5724 max_logsz = 4;
5725
5726 if (logsz > max_logsz)
5727 {
5728 /* SEE PR 27904 for an example of this. */
5729 set_fatal_syntax_error
5730 (_("relocation qualifier does not match instruction size"));
5731 return BFD_RELOC_AARCH64_NONE;
5732 }
5733
5734 /* In reloc.c, these pseudo relocation types should be defined in similar
5735 order as above reloc_ldst_lo12 array. Because the array index calculation
5736 below relies on this. */
5737 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5738 }
5739
5740 /* Check whether a register list REGINFO is valid. The registers must be
5741 numbered in increasing order (modulo 32), in increments of one or two.
5742
5743 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5744 increments of two.
5745
5746 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5747
5748 static bool
5749 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5750 {
5751 uint32_t i, nb_regs, prev_regno, incr;
5752
5753 nb_regs = 1 + (reginfo & 0x3);
5754 reginfo >>= 2;
5755 prev_regno = reginfo & 0x1f;
5756 incr = accept_alternate ? 2 : 1;
5757
5758 for (i = 1; i < nb_regs; ++i)
5759 {
5760 uint32_t curr_regno;
5761 reginfo >>= 5;
5762 curr_regno = reginfo & 0x1f;
5763 if (curr_regno != ((prev_regno + incr) & 0x1f))
5764 return false;
5765 prev_regno = curr_regno;
5766 }
5767
5768 return true;
5769 }
5770
5771 /* Generic instruction operand parser. This does no encoding and no
5772 semantic validation; it merely squirrels values away in the inst
5773 structure. Returns TRUE or FALSE depending on whether the
5774 specified grammar matched. */
5775
5776 static bool
5777 parse_operands (char *str, const aarch64_opcode *opcode)
5778 {
5779 int i;
5780 char *backtrack_pos = 0;
5781 const enum aarch64_opnd *operands = opcode->operands;
5782 aarch64_reg_type imm_reg_type;
5783
5784 clear_error ();
5785 skip_whitespace (str);
5786
5787 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5788 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5789 else
5790 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5791
5792 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5793 {
5794 int64_t val;
5795 const reg_entry *reg;
5796 int comma_skipped_p = 0;
5797 aarch64_reg_type rtype;
5798 struct vector_type_el vectype;
5799 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5800 aarch64_opnd_info *info = &inst.base.operands[i];
5801 aarch64_reg_type reg_type;
5802
5803 DEBUG_TRACE ("parse operand %d", i);
5804
5805 /* Assign the operand code. */
5806 info->type = operands[i];
5807
5808 if (optional_operand_p (opcode, i))
5809 {
5810 /* Remember where we are in case we need to backtrack. */
5811 gas_assert (!backtrack_pos);
5812 backtrack_pos = str;
5813 }
5814
5815 /* Expect comma between operands; the backtrack mechanism will take
5816 care of cases of omitted optional operand. */
5817 if (i > 0 && ! skip_past_char (&str, ','))
5818 {
5819 set_syntax_error (_("comma expected between operands"));
5820 goto failure;
5821 }
5822 else
5823 comma_skipped_p = 1;
5824
5825 switch (operands[i])
5826 {
5827 case AARCH64_OPND_Rd:
5828 case AARCH64_OPND_Rn:
5829 case AARCH64_OPND_Rm:
5830 case AARCH64_OPND_Rt:
5831 case AARCH64_OPND_Rt2:
5832 case AARCH64_OPND_Rs:
5833 case AARCH64_OPND_Ra:
5834 case AARCH64_OPND_Rt_LS64:
5835 case AARCH64_OPND_Rt_SYS:
5836 case AARCH64_OPND_PAIRREG:
5837 case AARCH64_OPND_SVE_Rm:
5838 po_int_reg_or_fail (REG_TYPE_R_Z);
5839
5840 /* In LS64 load/store instructions Rt register number must be even
5841 and <=22. */
5842 if (operands[i] == AARCH64_OPND_Rt_LS64)
5843 {
5844 /* We've already checked if this is valid register.
5845 This will check if register number (Rt) is not undefined for LS64
5846 instructions:
5847 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
5848 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
5849 {
5850 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
5851 goto failure;
5852 }
5853 }
5854 break;
5855
5856 case AARCH64_OPND_Rd_SP:
5857 case AARCH64_OPND_Rn_SP:
5858 case AARCH64_OPND_Rt_SP:
5859 case AARCH64_OPND_SVE_Rn_SP:
5860 case AARCH64_OPND_Rm_SP:
5861 po_int_reg_or_fail (REG_TYPE_R_SP);
5862 break;
5863
5864 case AARCH64_OPND_Rm_EXT:
5865 case AARCH64_OPND_Rm_SFT:
5866 po_misc_or_fail (parse_shifter_operand
5867 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5868 ? SHIFTED_ARITH_IMM
5869 : SHIFTED_LOGIC_IMM)));
5870 if (!info->shifter.operator_present)
5871 {
5872 /* Default to LSL if not present. Libopcodes prefers shifter
5873 kind to be explicit. */
5874 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5875 info->shifter.kind = AARCH64_MOD_LSL;
5876 /* For Rm_EXT, libopcodes will carry out further check on whether
5877 or not stack pointer is used in the instruction (Recall that
5878 "the extend operator is not optional unless at least one of
5879 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5880 }
5881 break;
5882
5883 case AARCH64_OPND_Fd:
5884 case AARCH64_OPND_Fn:
5885 case AARCH64_OPND_Fm:
5886 case AARCH64_OPND_Fa:
5887 case AARCH64_OPND_Ft:
5888 case AARCH64_OPND_Ft2:
5889 case AARCH64_OPND_Sd:
5890 case AARCH64_OPND_Sn:
5891 case AARCH64_OPND_Sm:
5892 case AARCH64_OPND_SVE_VZn:
5893 case AARCH64_OPND_SVE_Vd:
5894 case AARCH64_OPND_SVE_Vm:
5895 case AARCH64_OPND_SVE_Vn:
5896 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5897 if (val == PARSE_FAIL)
5898 {
5899 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5900 goto failure;
5901 }
5902 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5903
5904 info->reg.regno = val;
5905 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5906 break;
5907
5908 case AARCH64_OPND_SVE_Pd:
5909 case AARCH64_OPND_SVE_Pg3:
5910 case AARCH64_OPND_SVE_Pg4_5:
5911 case AARCH64_OPND_SVE_Pg4_10:
5912 case AARCH64_OPND_SVE_Pg4_16:
5913 case AARCH64_OPND_SVE_Pm:
5914 case AARCH64_OPND_SVE_Pn:
5915 case AARCH64_OPND_SVE_Pt:
5916 case AARCH64_OPND_SME_Pm:
5917 reg_type = REG_TYPE_PN;
5918 goto vector_reg;
5919
5920 case AARCH64_OPND_SVE_Za_5:
5921 case AARCH64_OPND_SVE_Za_16:
5922 case AARCH64_OPND_SVE_Zd:
5923 case AARCH64_OPND_SVE_Zm_5:
5924 case AARCH64_OPND_SVE_Zm_16:
5925 case AARCH64_OPND_SVE_Zn:
5926 case AARCH64_OPND_SVE_Zt:
5927 reg_type = REG_TYPE_ZN;
5928 goto vector_reg;
5929
5930 case AARCH64_OPND_Va:
5931 case AARCH64_OPND_Vd:
5932 case AARCH64_OPND_Vn:
5933 case AARCH64_OPND_Vm:
5934 reg_type = REG_TYPE_VN;
5935 vector_reg:
5936 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5937 if (val == PARSE_FAIL)
5938 {
5939 first_error (_(get_reg_expected_msg (reg_type)));
5940 goto failure;
5941 }
5942 if (vectype.defined & NTA_HASINDEX)
5943 goto failure;
5944
5945 info->reg.regno = val;
5946 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5947 && vectype.type == NT_invtype)
5948 /* Unqualified Pn and Zn registers are allowed in certain
5949 contexts. Rely on F_STRICT qualifier checking to catch
5950 invalid uses. */
5951 info->qualifier = AARCH64_OPND_QLF_NIL;
5952 else
5953 {
5954 info->qualifier = vectype_to_qualifier (&vectype);
5955 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5956 goto failure;
5957 }
5958 break;
5959
5960 case AARCH64_OPND_VdD1:
5961 case AARCH64_OPND_VnD1:
5962 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5963 if (val == PARSE_FAIL)
5964 {
5965 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5966 goto failure;
5967 }
5968 if (vectype.type != NT_d || vectype.index != 1)
5969 {
5970 set_fatal_syntax_error
5971 (_("the top half of a 128-bit FP/SIMD register is expected"));
5972 goto failure;
5973 }
5974 info->reg.regno = val;
5975 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5976 here; it is correct for the purpose of encoding/decoding since
5977 only the register number is explicitly encoded in the related
5978 instructions, although this appears a bit hacky. */
5979 info->qualifier = AARCH64_OPND_QLF_S_D;
5980 break;
5981
5982 case AARCH64_OPND_SVE_Zm3_INDEX:
5983 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5984 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5985 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5986 case AARCH64_OPND_SVE_Zm4_INDEX:
5987 case AARCH64_OPND_SVE_Zn_INDEX:
5988 reg_type = REG_TYPE_ZN;
5989 goto vector_reg_index;
5990
5991 case AARCH64_OPND_Ed:
5992 case AARCH64_OPND_En:
5993 case AARCH64_OPND_Em:
5994 case AARCH64_OPND_Em16:
5995 case AARCH64_OPND_SM3_IMM2:
5996 reg_type = REG_TYPE_VN;
5997 vector_reg_index:
5998 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5999 if (val == PARSE_FAIL)
6000 {
6001 first_error (_(get_reg_expected_msg (reg_type)));
6002 goto failure;
6003 }
6004 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6005 goto failure;
6006
6007 info->reglane.regno = val;
6008 info->reglane.index = vectype.index;
6009 info->qualifier = vectype_to_qualifier (&vectype);
6010 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6011 goto failure;
6012 break;
6013
6014 case AARCH64_OPND_SVE_ZnxN:
6015 case AARCH64_OPND_SVE_ZtxN:
6016 reg_type = REG_TYPE_ZN;
6017 goto vector_reg_list;
6018
6019 case AARCH64_OPND_LVn:
6020 case AARCH64_OPND_LVt:
6021 case AARCH64_OPND_LVt_AL:
6022 case AARCH64_OPND_LEt:
6023 reg_type = REG_TYPE_VN;
6024 vector_reg_list:
6025 if (reg_type == REG_TYPE_ZN
6026 && get_opcode_dependent_value (opcode) == 1
6027 && *str != '{')
6028 {
6029 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6030 if (val == PARSE_FAIL)
6031 {
6032 first_error (_(get_reg_expected_msg (reg_type)));
6033 goto failure;
6034 }
6035 info->reglist.first_regno = val;
6036 info->reglist.num_regs = 1;
6037 }
6038 else
6039 {
6040 val = parse_vector_reg_list (&str, reg_type, &vectype);
6041 if (val == PARSE_FAIL)
6042 goto failure;
6043
6044 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6045 {
6046 set_fatal_syntax_error (_("invalid register list"));
6047 goto failure;
6048 }
6049
6050 if (vectype.width != 0 && *str != ',')
6051 {
6052 set_fatal_syntax_error
6053 (_("expected element type rather than vector type"));
6054 goto failure;
6055 }
6056
6057 info->reglist.first_regno = (val >> 2) & 0x1f;
6058 info->reglist.num_regs = (val & 0x3) + 1;
6059 }
6060 if (operands[i] == AARCH64_OPND_LEt)
6061 {
6062 if (!(vectype.defined & NTA_HASINDEX))
6063 goto failure;
6064 info->reglist.has_index = 1;
6065 info->reglist.index = vectype.index;
6066 }
6067 else
6068 {
6069 if (vectype.defined & NTA_HASINDEX)
6070 goto failure;
6071 if (!(vectype.defined & NTA_HASTYPE))
6072 {
6073 if (reg_type == REG_TYPE_ZN)
6074 set_fatal_syntax_error (_("missing type suffix"));
6075 goto failure;
6076 }
6077 }
6078 info->qualifier = vectype_to_qualifier (&vectype);
6079 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6080 goto failure;
6081 break;
6082
6083 case AARCH64_OPND_CRn:
6084 case AARCH64_OPND_CRm:
6085 {
6086 char prefix = *(str++);
6087 if (prefix != 'c' && prefix != 'C')
6088 goto failure;
6089
6090 po_imm_nc_or_fail ();
6091 if (val > 15)
6092 {
6093 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6094 goto failure;
6095 }
6096 info->qualifier = AARCH64_OPND_QLF_CR;
6097 info->imm.value = val;
6098 break;
6099 }
6100
6101 case AARCH64_OPND_SHLL_IMM:
6102 case AARCH64_OPND_IMM_VLSR:
6103 po_imm_or_fail (1, 64);
6104 info->imm.value = val;
6105 break;
6106
6107 case AARCH64_OPND_CCMP_IMM:
6108 case AARCH64_OPND_SIMM5:
6109 case AARCH64_OPND_FBITS:
6110 case AARCH64_OPND_TME_UIMM16:
6111 case AARCH64_OPND_UIMM4:
6112 case AARCH64_OPND_UIMM4_ADDG:
6113 case AARCH64_OPND_UIMM10:
6114 case AARCH64_OPND_UIMM3_OP1:
6115 case AARCH64_OPND_UIMM3_OP2:
6116 case AARCH64_OPND_IMM_VLSL:
6117 case AARCH64_OPND_IMM:
6118 case AARCH64_OPND_IMM_2:
6119 case AARCH64_OPND_WIDTH:
6120 case AARCH64_OPND_SVE_INV_LIMM:
6121 case AARCH64_OPND_SVE_LIMM:
6122 case AARCH64_OPND_SVE_LIMM_MOV:
6123 case AARCH64_OPND_SVE_SHLIMM_PRED:
6124 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6125 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6126 case AARCH64_OPND_SVE_SHRIMM_PRED:
6127 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6128 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6129 case AARCH64_OPND_SVE_SIMM5:
6130 case AARCH64_OPND_SVE_SIMM5B:
6131 case AARCH64_OPND_SVE_SIMM6:
6132 case AARCH64_OPND_SVE_SIMM8:
6133 case AARCH64_OPND_SVE_UIMM3:
6134 case AARCH64_OPND_SVE_UIMM7:
6135 case AARCH64_OPND_SVE_UIMM8:
6136 case AARCH64_OPND_SVE_UIMM8_53:
6137 case AARCH64_OPND_IMM_ROT1:
6138 case AARCH64_OPND_IMM_ROT2:
6139 case AARCH64_OPND_IMM_ROT3:
6140 case AARCH64_OPND_SVE_IMM_ROT1:
6141 case AARCH64_OPND_SVE_IMM_ROT2:
6142 case AARCH64_OPND_SVE_IMM_ROT3:
6143 po_imm_nc_or_fail ();
6144 info->imm.value = val;
6145 break;
6146
6147 case AARCH64_OPND_SVE_AIMM:
6148 case AARCH64_OPND_SVE_ASIMM:
6149 po_imm_nc_or_fail ();
6150 info->imm.value = val;
6151 skip_whitespace (str);
6152 if (skip_past_comma (&str))
6153 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6154 else
6155 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6156 break;
6157
6158 case AARCH64_OPND_SVE_PATTERN:
6159 po_enum_or_fail (aarch64_sve_pattern_array);
6160 info->imm.value = val;
6161 break;
6162
6163 case AARCH64_OPND_SVE_PATTERN_SCALED:
6164 po_enum_or_fail (aarch64_sve_pattern_array);
6165 info->imm.value = val;
6166 if (skip_past_comma (&str)
6167 && !parse_shift (&str, info, SHIFTED_MUL))
6168 goto failure;
6169 if (!info->shifter.operator_present)
6170 {
6171 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6172 info->shifter.kind = AARCH64_MOD_MUL;
6173 info->shifter.amount = 1;
6174 }
6175 break;
6176
6177 case AARCH64_OPND_SVE_PRFOP:
6178 po_enum_or_fail (aarch64_sve_prfop_array);
6179 info->imm.value = val;
6180 break;
6181
6182 case AARCH64_OPND_UIMM7:
6183 po_imm_or_fail (0, 127);
6184 info->imm.value = val;
6185 break;
6186
6187 case AARCH64_OPND_IDX:
6188 case AARCH64_OPND_MASK:
6189 case AARCH64_OPND_BIT_NUM:
6190 case AARCH64_OPND_IMMR:
6191 case AARCH64_OPND_IMMS:
6192 po_imm_or_fail (0, 63);
6193 info->imm.value = val;
6194 break;
6195
6196 case AARCH64_OPND_IMM0:
6197 po_imm_nc_or_fail ();
6198 if (val != 0)
6199 {
6200 set_fatal_syntax_error (_("immediate zero expected"));
6201 goto failure;
6202 }
6203 info->imm.value = 0;
6204 break;
6205
6206 case AARCH64_OPND_FPIMM0:
6207 {
6208 int qfloat;
6209 bool res1 = false, res2 = false;
6210 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6211 it is probably not worth the effort to support it. */
6212 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6213 imm_reg_type))
6214 && (error_p ()
6215 || !(res2 = parse_constant_immediate (&str, &val,
6216 imm_reg_type))))
6217 goto failure;
6218 if ((res1 && qfloat == 0) || (res2 && val == 0))
6219 {
6220 info->imm.value = 0;
6221 info->imm.is_fp = 1;
6222 break;
6223 }
6224 set_fatal_syntax_error (_("immediate zero expected"));
6225 goto failure;
6226 }
6227
6228 case AARCH64_OPND_IMM_MOV:
6229 {
6230 char *saved = str;
6231 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6232 reg_name_p (str, REG_TYPE_VN))
6233 goto failure;
6234 str = saved;
6235 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6236 GE_OPT_PREFIX, REJECT_ABSENT,
6237 NORMAL_RESOLUTION));
6238 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6239 later. fix_mov_imm_insn will try to determine a machine
6240 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6241 message if the immediate cannot be moved by a single
6242 instruction. */
6243 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6244 inst.base.operands[i].skip = 1;
6245 }
6246 break;
6247
6248 case AARCH64_OPND_SIMD_IMM:
6249 case AARCH64_OPND_SIMD_IMM_SFT:
6250 if (! parse_big_immediate (&str, &val, imm_reg_type))
6251 goto failure;
6252 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6253 /* addr_off_p */ 0,
6254 /* need_libopcodes_p */ 1,
6255 /* skip_p */ 1);
6256 /* Parse shift.
6257 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6258 shift, we don't check it here; we leave the checking to
6259 the libopcodes (operand_general_constraint_met_p). By
6260 doing this, we achieve better diagnostics. */
6261 if (skip_past_comma (&str)
6262 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6263 goto failure;
6264 if (!info->shifter.operator_present
6265 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6266 {
6267 /* Default to LSL if not present. Libopcodes prefers shifter
6268 kind to be explicit. */
6269 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6270 info->shifter.kind = AARCH64_MOD_LSL;
6271 }
6272 break;
6273
6274 case AARCH64_OPND_FPIMM:
6275 case AARCH64_OPND_SIMD_FPIMM:
6276 case AARCH64_OPND_SVE_FPIMM8:
6277 {
6278 int qfloat;
6279 bool dp_p;
6280
6281 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6282 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6283 || !aarch64_imm_float_p (qfloat))
6284 {
6285 if (!error_p ())
6286 set_fatal_syntax_error (_("invalid floating-point"
6287 " constant"));
6288 goto failure;
6289 }
6290 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6291 inst.base.operands[i].imm.is_fp = 1;
6292 }
6293 break;
6294
6295 case AARCH64_OPND_SVE_I1_HALF_ONE:
6296 case AARCH64_OPND_SVE_I1_HALF_TWO:
6297 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6298 {
6299 int qfloat;
6300 bool dp_p;
6301
6302 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6303 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6304 {
6305 if (!error_p ())
6306 set_fatal_syntax_error (_("invalid floating-point"
6307 " constant"));
6308 goto failure;
6309 }
6310 inst.base.operands[i].imm.value = qfloat;
6311 inst.base.operands[i].imm.is_fp = 1;
6312 }
6313 break;
6314
6315 case AARCH64_OPND_LIMM:
6316 po_misc_or_fail (parse_shifter_operand (&str, info,
6317 SHIFTED_LOGIC_IMM));
6318 if (info->shifter.operator_present)
6319 {
6320 set_fatal_syntax_error
6321 (_("shift not allowed for bitmask immediate"));
6322 goto failure;
6323 }
6324 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6325 /* addr_off_p */ 0,
6326 /* need_libopcodes_p */ 1,
6327 /* skip_p */ 1);
6328 break;
6329
6330 case AARCH64_OPND_AIMM:
6331 if (opcode->op == OP_ADD)
6332 /* ADD may have relocation types. */
6333 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6334 SHIFTED_ARITH_IMM));
6335 else
6336 po_misc_or_fail (parse_shifter_operand (&str, info,
6337 SHIFTED_ARITH_IMM));
6338 switch (inst.reloc.type)
6339 {
6340 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6341 info->shifter.amount = 12;
6342 break;
6343 case BFD_RELOC_UNUSED:
6344 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6345 if (info->shifter.kind != AARCH64_MOD_NONE)
6346 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6347 inst.reloc.pc_rel = 0;
6348 break;
6349 default:
6350 break;
6351 }
6352 info->imm.value = 0;
6353 if (!info->shifter.operator_present)
6354 {
6355 /* Default to LSL if not present. Libopcodes prefers shifter
6356 kind to be explicit. */
6357 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6358 info->shifter.kind = AARCH64_MOD_LSL;
6359 }
6360 break;
6361
6362 case AARCH64_OPND_HALF:
6363 {
6364 /* #<imm16> or relocation. */
6365 int internal_fixup_p;
6366 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6367 if (internal_fixup_p)
6368 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6369 skip_whitespace (str);
6370 if (skip_past_comma (&str))
6371 {
6372 /* {, LSL #<shift>} */
6373 if (! aarch64_gas_internal_fixup_p ())
6374 {
6375 set_fatal_syntax_error (_("can't mix relocation modifier "
6376 "with explicit shift"));
6377 goto failure;
6378 }
6379 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6380 }
6381 else
6382 inst.base.operands[i].shifter.amount = 0;
6383 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6384 inst.base.operands[i].imm.value = 0;
6385 if (! process_movw_reloc_info ())
6386 goto failure;
6387 }
6388 break;
6389
6390 case AARCH64_OPND_EXCEPTION:
6391 case AARCH64_OPND_UNDEFINED:
6392 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6393 imm_reg_type));
6394 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6395 /* addr_off_p */ 0,
6396 /* need_libopcodes_p */ 0,
6397 /* skip_p */ 1);
6398 break;
6399
6400 case AARCH64_OPND_NZCV:
6401 {
6402 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6403 if (nzcv != NULL)
6404 {
6405 str += 4;
6406 info->imm.value = nzcv->value;
6407 break;
6408 }
6409 po_imm_or_fail (0, 15);
6410 info->imm.value = val;
6411 }
6412 break;
6413
6414 case AARCH64_OPND_COND:
6415 case AARCH64_OPND_COND1:
6416 {
6417 char *start = str;
6418 do
6419 str++;
6420 while (ISALPHA (*str));
6421 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6422 if (info->cond == NULL)
6423 {
6424 set_syntax_error (_("invalid condition"));
6425 goto failure;
6426 }
6427 else if (operands[i] == AARCH64_OPND_COND1
6428 && (info->cond->value & 0xe) == 0xe)
6429 {
6430 /* Do not allow AL or NV. */
6431 set_default_error ();
6432 goto failure;
6433 }
6434 }
6435 break;
6436
6437 case AARCH64_OPND_ADDR_ADRP:
6438 po_misc_or_fail (parse_adrp (&str));
6439 /* Clear the value as operand needs to be relocated. */
6440 info->imm.value = 0;
6441 break;
6442
6443 case AARCH64_OPND_ADDR_PCREL14:
6444 case AARCH64_OPND_ADDR_PCREL19:
6445 case AARCH64_OPND_ADDR_PCREL21:
6446 case AARCH64_OPND_ADDR_PCREL26:
6447 po_misc_or_fail (parse_address (&str, info));
6448 if (!info->addr.pcrel)
6449 {
6450 set_syntax_error (_("invalid pc-relative address"));
6451 goto failure;
6452 }
6453 if (inst.gen_lit_pool
6454 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6455 {
6456 /* Only permit "=value" in the literal load instructions.
6457 The literal will be generated by programmer_friendly_fixup. */
6458 set_syntax_error (_("invalid use of \"=immediate\""));
6459 goto failure;
6460 }
6461 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6462 {
6463 set_syntax_error (_("unrecognized relocation suffix"));
6464 goto failure;
6465 }
6466 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6467 {
6468 info->imm.value = inst.reloc.exp.X_add_number;
6469 inst.reloc.type = BFD_RELOC_UNUSED;
6470 }
6471 else
6472 {
6473 info->imm.value = 0;
6474 if (inst.reloc.type == BFD_RELOC_UNUSED)
6475 switch (opcode->iclass)
6476 {
6477 case compbranch:
6478 case condbranch:
6479 /* e.g. CBZ or B.COND */
6480 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6481 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6482 break;
6483 case testbranch:
6484 /* e.g. TBZ */
6485 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6486 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6487 break;
6488 case branch_imm:
6489 /* e.g. B or BL */
6490 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6491 inst.reloc.type =
6492 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6493 : BFD_RELOC_AARCH64_JUMP26;
6494 break;
6495 case loadlit:
6496 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6497 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6498 break;
6499 case pcreladdr:
6500 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6501 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6502 break;
6503 default:
6504 gas_assert (0);
6505 abort ();
6506 }
6507 inst.reloc.pc_rel = 1;
6508 }
6509 break;
6510
6511 case AARCH64_OPND_ADDR_SIMPLE:
6512 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6513 {
6514 /* [<Xn|SP>{, #<simm>}] */
6515 char *start = str;
6516 /* First use the normal address-parsing routines, to get
6517 the usual syntax errors. */
6518 po_misc_or_fail (parse_address (&str, info));
6519 if (info->addr.pcrel || info->addr.offset.is_reg
6520 || !info->addr.preind || info->addr.postind
6521 || info->addr.writeback)
6522 {
6523 set_syntax_error (_("invalid addressing mode"));
6524 goto failure;
6525 }
6526
6527 /* Then retry, matching the specific syntax of these addresses. */
6528 str = start;
6529 po_char_or_fail ('[');
6530 po_reg_or_fail (REG_TYPE_R64_SP);
6531 /* Accept optional ", #0". */
6532 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6533 && skip_past_char (&str, ','))
6534 {
6535 skip_past_char (&str, '#');
6536 if (! skip_past_char (&str, '0'))
6537 {
6538 set_fatal_syntax_error
6539 (_("the optional immediate offset can only be 0"));
6540 goto failure;
6541 }
6542 }
6543 po_char_or_fail (']');
6544 break;
6545 }
6546
6547 case AARCH64_OPND_ADDR_REGOFF:
6548 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6549 po_misc_or_fail (parse_address (&str, info));
6550 regoff_addr:
6551 if (info->addr.pcrel || !info->addr.offset.is_reg
6552 || !info->addr.preind || info->addr.postind
6553 || info->addr.writeback)
6554 {
6555 set_syntax_error (_("invalid addressing mode"));
6556 goto failure;
6557 }
6558 if (!info->shifter.operator_present)
6559 {
6560 /* Default to LSL if not present. Libopcodes prefers shifter
6561 kind to be explicit. */
6562 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6563 info->shifter.kind = AARCH64_MOD_LSL;
6564 }
6565 /* Qualifier to be deduced by libopcodes. */
6566 break;
6567
6568 case AARCH64_OPND_ADDR_SIMM7:
6569 po_misc_or_fail (parse_address (&str, info));
6570 if (info->addr.pcrel || info->addr.offset.is_reg
6571 || (!info->addr.preind && !info->addr.postind))
6572 {
6573 set_syntax_error (_("invalid addressing mode"));
6574 goto failure;
6575 }
6576 if (inst.reloc.type != BFD_RELOC_UNUSED)
6577 {
6578 set_syntax_error (_("relocation not allowed"));
6579 goto failure;
6580 }
6581 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6582 /* addr_off_p */ 1,
6583 /* need_libopcodes_p */ 1,
6584 /* skip_p */ 0);
6585 break;
6586
6587 case AARCH64_OPND_ADDR_SIMM9:
6588 case AARCH64_OPND_ADDR_SIMM9_2:
6589 case AARCH64_OPND_ADDR_SIMM11:
6590 case AARCH64_OPND_ADDR_SIMM13:
6591 po_misc_or_fail (parse_address (&str, info));
6592 if (info->addr.pcrel || info->addr.offset.is_reg
6593 || (!info->addr.preind && !info->addr.postind)
6594 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6595 && info->addr.writeback))
6596 {
6597 set_syntax_error (_("invalid addressing mode"));
6598 goto failure;
6599 }
6600 if (inst.reloc.type != BFD_RELOC_UNUSED)
6601 {
6602 set_syntax_error (_("relocation not allowed"));
6603 goto failure;
6604 }
6605 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6606 /* addr_off_p */ 1,
6607 /* need_libopcodes_p */ 1,
6608 /* skip_p */ 0);
6609 break;
6610
6611 case AARCH64_OPND_ADDR_SIMM10:
6612 case AARCH64_OPND_ADDR_OFFSET:
6613 po_misc_or_fail (parse_address (&str, info));
6614 if (info->addr.pcrel || info->addr.offset.is_reg
6615 || !info->addr.preind || info->addr.postind)
6616 {
6617 set_syntax_error (_("invalid addressing mode"));
6618 goto failure;
6619 }
6620 if (inst.reloc.type != BFD_RELOC_UNUSED)
6621 {
6622 set_syntax_error (_("relocation not allowed"));
6623 goto failure;
6624 }
6625 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6626 /* addr_off_p */ 1,
6627 /* need_libopcodes_p */ 1,
6628 /* skip_p */ 0);
6629 break;
6630
6631 case AARCH64_OPND_ADDR_UIMM12:
6632 po_misc_or_fail (parse_address (&str, info));
6633 if (info->addr.pcrel || info->addr.offset.is_reg
6634 || !info->addr.preind || info->addr.writeback)
6635 {
6636 set_syntax_error (_("invalid addressing mode"));
6637 goto failure;
6638 }
6639 if (inst.reloc.type == BFD_RELOC_UNUSED)
6640 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6641 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6642 || (inst.reloc.type
6643 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6644 || (inst.reloc.type
6645 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6646 || (inst.reloc.type
6647 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6648 || (inst.reloc.type
6649 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6650 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6651 /* Leave qualifier to be determined by libopcodes. */
6652 break;
6653
6654 case AARCH64_OPND_SIMD_ADDR_POST:
6655 /* [<Xn|SP>], <Xm|#<amount>> */
6656 po_misc_or_fail (parse_address (&str, info));
6657 if (!info->addr.postind || !info->addr.writeback)
6658 {
6659 set_syntax_error (_("invalid addressing mode"));
6660 goto failure;
6661 }
6662 if (!info->addr.offset.is_reg)
6663 {
6664 if (inst.reloc.exp.X_op == O_constant)
6665 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6666 else
6667 {
6668 set_fatal_syntax_error
6669 (_("writeback value must be an immediate constant"));
6670 goto failure;
6671 }
6672 }
6673 /* No qualifier. */
6674 break;
6675
6676 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6677 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6678 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6679 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6680 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6681 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6682 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6683 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6684 case AARCH64_OPND_SVE_ADDR_RI_U6:
6685 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6686 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6687 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6688 /* [X<n>{, #imm, MUL VL}]
6689 [X<n>{, #imm}]
6690 but recognizing SVE registers. */
6691 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6692 &offset_qualifier));
6693 if (base_qualifier != AARCH64_OPND_QLF_X)
6694 {
6695 set_syntax_error (_("invalid addressing mode"));
6696 goto failure;
6697 }
6698 sve_regimm:
6699 if (info->addr.pcrel || info->addr.offset.is_reg
6700 || !info->addr.preind || info->addr.writeback)
6701 {
6702 set_syntax_error (_("invalid addressing mode"));
6703 goto failure;
6704 }
6705 if (inst.reloc.type != BFD_RELOC_UNUSED
6706 || inst.reloc.exp.X_op != O_constant)
6707 {
6708 /* Make sure this has priority over
6709 "invalid addressing mode". */
6710 set_fatal_syntax_error (_("constant offset required"));
6711 goto failure;
6712 }
6713 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6714 break;
6715
6716 case AARCH64_OPND_SVE_ADDR_R:
6717 /* [<Xn|SP>{, <R><m>}]
6718 but recognizing SVE registers. */
6719 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6720 &offset_qualifier));
6721 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6722 {
6723 offset_qualifier = AARCH64_OPND_QLF_X;
6724 info->addr.offset.is_reg = 1;
6725 info->addr.offset.regno = 31;
6726 }
6727 else if (base_qualifier != AARCH64_OPND_QLF_X
6728 || offset_qualifier != AARCH64_OPND_QLF_X)
6729 {
6730 set_syntax_error (_("invalid addressing mode"));
6731 goto failure;
6732 }
6733 goto regoff_addr;
6734
6735 case AARCH64_OPND_SVE_ADDR_RR:
6736 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6737 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6738 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6739 case AARCH64_OPND_SVE_ADDR_RX:
6740 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6741 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6742 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6743 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6744 but recognizing SVE registers. */
6745 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6746 &offset_qualifier));
6747 if (base_qualifier != AARCH64_OPND_QLF_X
6748 || offset_qualifier != AARCH64_OPND_QLF_X)
6749 {
6750 set_syntax_error (_("invalid addressing mode"));
6751 goto failure;
6752 }
6753 goto regoff_addr;
6754
6755 case AARCH64_OPND_SVE_ADDR_RZ:
6756 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6757 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6758 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6759 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6760 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6761 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6762 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6763 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6764 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6765 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6766 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6767 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6768 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6769 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6770 &offset_qualifier));
6771 if (base_qualifier != AARCH64_OPND_QLF_X
6772 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6773 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6774 {
6775 set_syntax_error (_("invalid addressing mode"));
6776 goto failure;
6777 }
6778 info->qualifier = offset_qualifier;
6779 goto regoff_addr;
6780
6781 case AARCH64_OPND_SVE_ADDR_ZX:
6782 /* [Zn.<T>{, <Xm>}]. */
6783 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6784 &offset_qualifier));
6785 /* Things to check:
6786 base_qualifier either S_S or S_D
6787 offset_qualifier must be X
6788 */
6789 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6790 && base_qualifier != AARCH64_OPND_QLF_S_D)
6791 || offset_qualifier != AARCH64_OPND_QLF_X)
6792 {
6793 set_syntax_error (_("invalid addressing mode"));
6794 goto failure;
6795 }
6796 info->qualifier = base_qualifier;
6797 if (!info->addr.offset.is_reg || info->addr.pcrel
6798 || !info->addr.preind || info->addr.writeback
6799 || info->shifter.operator_present != 0)
6800 {
6801 set_syntax_error (_("invalid addressing mode"));
6802 goto failure;
6803 }
6804 info->shifter.kind = AARCH64_MOD_LSL;
6805 break;
6806
6807
6808 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6809 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6810 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6811 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6812 /* [Z<n>.<T>{, #imm}] */
6813 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6814 &offset_qualifier));
6815 if (base_qualifier != AARCH64_OPND_QLF_S_S
6816 && base_qualifier != AARCH64_OPND_QLF_S_D)
6817 {
6818 set_syntax_error (_("invalid addressing mode"));
6819 goto failure;
6820 }
6821 info->qualifier = base_qualifier;
6822 goto sve_regimm;
6823
6824 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6825 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6826 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6827 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6828 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6829
6830 We don't reject:
6831
6832 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6833
6834 here since we get better error messages by leaving it to
6835 the qualifier checking routines. */
6836 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6837 &offset_qualifier));
6838 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6839 && base_qualifier != AARCH64_OPND_QLF_S_D)
6840 || offset_qualifier != base_qualifier)
6841 {
6842 set_syntax_error (_("invalid addressing mode"));
6843 goto failure;
6844 }
6845 info->qualifier = base_qualifier;
6846 goto regoff_addr;
6847
6848 case AARCH64_OPND_SYSREG:
6849 {
6850 uint32_t sysreg_flags;
6851 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6852 &sysreg_flags)) == PARSE_FAIL)
6853 {
6854 set_syntax_error (_("unknown or missing system register name"));
6855 goto failure;
6856 }
6857 inst.base.operands[i].sysreg.value = val;
6858 inst.base.operands[i].sysreg.flags = sysreg_flags;
6859 break;
6860 }
6861
6862 case AARCH64_OPND_PSTATEFIELD:
6863 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6864 == PARSE_FAIL)
6865 {
6866 set_syntax_error (_("unknown or missing PSTATE field name"));
6867 goto failure;
6868 }
6869 inst.base.operands[i].pstatefield = val;
6870 break;
6871
6872 case AARCH64_OPND_SYSREG_IC:
6873 inst.base.operands[i].sysins_op =
6874 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6875 goto sys_reg_ins;
6876
6877 case AARCH64_OPND_SYSREG_DC:
6878 inst.base.operands[i].sysins_op =
6879 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6880 goto sys_reg_ins;
6881
6882 case AARCH64_OPND_SYSREG_AT:
6883 inst.base.operands[i].sysins_op =
6884 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6885 goto sys_reg_ins;
6886
6887 case AARCH64_OPND_SYSREG_SR:
6888 inst.base.operands[i].sysins_op =
6889 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6890 goto sys_reg_ins;
6891
6892 case AARCH64_OPND_SYSREG_TLBI:
6893 inst.base.operands[i].sysins_op =
6894 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6895 sys_reg_ins:
6896 if (inst.base.operands[i].sysins_op == NULL)
6897 {
6898 set_fatal_syntax_error ( _("unknown or missing operation name"));
6899 goto failure;
6900 }
6901 break;
6902
6903 case AARCH64_OPND_BARRIER:
6904 case AARCH64_OPND_BARRIER_ISB:
6905 val = parse_barrier (&str);
6906 if (val != PARSE_FAIL
6907 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6908 {
6909 /* ISB only accepts options name 'sy'. */
6910 set_syntax_error
6911 (_("the specified option is not accepted in ISB"));
6912 /* Turn off backtrack as this optional operand is present. */
6913 backtrack_pos = 0;
6914 goto failure;
6915 }
6916 if (val != PARSE_FAIL
6917 && operands[i] == AARCH64_OPND_BARRIER)
6918 {
6919 /* Regular barriers accept options CRm (C0-C15).
6920 DSB nXS barrier variant accepts values > 15. */
6921 if (val < 0 || val > 15)
6922 {
6923 set_syntax_error (_("the specified option is not accepted in DSB"));
6924 goto failure;
6925 }
6926 }
6927 /* This is an extension to accept a 0..15 immediate. */
6928 if (val == PARSE_FAIL)
6929 po_imm_or_fail (0, 15);
6930 info->barrier = aarch64_barrier_options + val;
6931 break;
6932
6933 case AARCH64_OPND_BARRIER_DSB_NXS:
6934 val = parse_barrier (&str);
6935 if (val != PARSE_FAIL)
6936 {
6937 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6938 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6939 {
6940 set_syntax_error (_("the specified option is not accepted in DSB"));
6941 /* Turn off backtrack as this optional operand is present. */
6942 backtrack_pos = 0;
6943 goto failure;
6944 }
6945 }
6946 else
6947 {
6948 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6949 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6950 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6951 goto failure;
6952 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6953 {
6954 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6955 goto failure;
6956 }
6957 }
6958 /* Option index is encoded as 2-bit value in val<3:2>. */
6959 val = (val >> 2) - 4;
6960 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6961 break;
6962
6963 case AARCH64_OPND_PRFOP:
6964 val = parse_pldop (&str);
6965 /* This is an extension to accept a 0..31 immediate. */
6966 if (val == PARSE_FAIL)
6967 po_imm_or_fail (0, 31);
6968 inst.base.operands[i].prfop = aarch64_prfops + val;
6969 break;
6970
6971 case AARCH64_OPND_BARRIER_PSB:
6972 val = parse_barrier_psb (&str, &(info->hint_option));
6973 if (val == PARSE_FAIL)
6974 goto failure;
6975 break;
6976
6977 case AARCH64_OPND_BTI_TARGET:
6978 val = parse_bti_operand (&str, &(info->hint_option));
6979 if (val == PARSE_FAIL)
6980 goto failure;
6981 break;
6982
6983 case AARCH64_OPND_SME_ZAda_2b:
6984 case AARCH64_OPND_SME_ZAda_3b:
6985 val = parse_sme_zada_operand (&str, &qualifier);
6986 if (val == PARSE_FAIL)
6987 goto failure;
6988 info->reg.regno = val;
6989 info->qualifier = qualifier;
6990 break;
6991
6992 default:
6993 as_fatal (_("unhandled operand code %d"), operands[i]);
6994 }
6995
6996 /* If we get here, this operand was successfully parsed. */
6997 inst.base.operands[i].present = 1;
6998 continue;
6999
7000 failure:
7001 /* The parse routine should already have set the error, but in case
7002 not, set a default one here. */
7003 if (! error_p ())
7004 set_default_error ();
7005
7006 if (! backtrack_pos)
7007 goto parse_operands_return;
7008
7009 {
7010 /* We reach here because this operand is marked as optional, and
7011 either no operand was supplied or the operand was supplied but it
7012 was syntactically incorrect. In the latter case we report an
7013 error. In the former case we perform a few more checks before
7014 dropping through to the code to insert the default operand. */
7015
7016 char *tmp = backtrack_pos;
7017 char endchar = END_OF_INSN;
7018
7019 if (i != (aarch64_num_of_operands (opcode) - 1))
7020 endchar = ',';
7021 skip_past_char (&tmp, ',');
7022
7023 if (*tmp != endchar)
7024 /* The user has supplied an operand in the wrong format. */
7025 goto parse_operands_return;
7026
7027 /* Make sure there is not a comma before the optional operand.
7028 For example the fifth operand of 'sys' is optional:
7029
7030 sys #0,c0,c0,#0, <--- wrong
7031 sys #0,c0,c0,#0 <--- correct. */
7032 if (comma_skipped_p && i && endchar == END_OF_INSN)
7033 {
7034 set_fatal_syntax_error
7035 (_("unexpected comma before the omitted optional operand"));
7036 goto parse_operands_return;
7037 }
7038 }
7039
7040 /* Reaching here means we are dealing with an optional operand that is
7041 omitted from the assembly line. */
7042 gas_assert (optional_operand_p (opcode, i));
7043 info->present = 0;
7044 process_omitted_operand (operands[i], opcode, i, info);
7045
7046 /* Try again, skipping the optional operand at backtrack_pos. */
7047 str = backtrack_pos;
7048 backtrack_pos = 0;
7049
7050 /* Clear any error record after the omitted optional operand has been
7051 successfully handled. */
7052 clear_error ();
7053 }
7054
7055 /* Check if we have parsed all the operands. */
7056 if (*str != '\0' && ! error_p ())
7057 {
7058 /* Set I to the index of the last present operand; this is
7059 for the purpose of diagnostics. */
7060 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7061 ;
7062 set_fatal_syntax_error
7063 (_("unexpected characters following instruction"));
7064 }
7065
7066 parse_operands_return:
7067
7068 if (error_p ())
7069 {
7070 DEBUG_TRACE ("parsing FAIL: %s - %s",
7071 operand_mismatch_kind_names[get_error_kind ()],
7072 get_error_message ());
7073 /* Record the operand error properly; this is useful when there
7074 are multiple instruction templates for a mnemonic name, so that
7075 later on, we can select the error that most closely describes
7076 the problem. */
7077 record_operand_error (opcode, i, get_error_kind (),
7078 get_error_message ());
7079 return false;
7080 }
7081 else
7082 {
7083 DEBUG_TRACE ("parsing SUCCESS");
7084 return true;
7085 }
7086 }
7087
7088 /* It does some fix-up to provide some programmer friendly feature while
7089 keeping the libopcodes happy, i.e. libopcodes only accepts
7090 the preferred architectural syntax.
7091 Return FALSE if there is any failure; otherwise return TRUE. */
7092
7093 static bool
7094 programmer_friendly_fixup (aarch64_instruction *instr)
7095 {
7096 aarch64_inst *base = &instr->base;
7097 const aarch64_opcode *opcode = base->opcode;
7098 enum aarch64_op op = opcode->op;
7099 aarch64_opnd_info *operands = base->operands;
7100
7101 DEBUG_TRACE ("enter");
7102
7103 switch (opcode->iclass)
7104 {
7105 case testbranch:
7106 /* TBNZ Xn|Wn, #uimm6, label
7107 Test and Branch Not Zero: conditionally jumps to label if bit number
7108 uimm6 in register Xn is not zero. The bit number implies the width of
7109 the register, which may be written and should be disassembled as Wn if
7110 uimm is less than 32. */
7111 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7112 {
7113 if (operands[1].imm.value >= 32)
7114 {
7115 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7116 0, 31);
7117 return false;
7118 }
7119 operands[0].qualifier = AARCH64_OPND_QLF_X;
7120 }
7121 break;
7122 case loadlit:
7123 /* LDR Wt, label | =value
7124 As a convenience assemblers will typically permit the notation
7125 "=value" in conjunction with the pc-relative literal load instructions
7126 to automatically place an immediate value or symbolic address in a
7127 nearby literal pool and generate a hidden label which references it.
7128 ISREG has been set to 0 in the case of =value. */
7129 if (instr->gen_lit_pool
7130 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7131 {
7132 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7133 if (op == OP_LDRSW_LIT)
7134 size = 4;
7135 if (instr->reloc.exp.X_op != O_constant
7136 && instr->reloc.exp.X_op != O_big
7137 && instr->reloc.exp.X_op != O_symbol)
7138 {
7139 record_operand_error (opcode, 1,
7140 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7141 _("constant expression expected"));
7142 return false;
7143 }
7144 if (! add_to_lit_pool (&instr->reloc.exp, size))
7145 {
7146 record_operand_error (opcode, 1,
7147 AARCH64_OPDE_OTHER_ERROR,
7148 _("literal pool insertion failed"));
7149 return false;
7150 }
7151 }
7152 break;
7153 case log_shift:
7154 case bitfield:
7155 /* UXT[BHW] Wd, Wn
7156 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7157 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7158 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7159 A programmer-friendly assembler should accept a destination Xd in
7160 place of Wd, however that is not the preferred form for disassembly.
7161 */
7162 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7163 && operands[1].qualifier == AARCH64_OPND_QLF_W
7164 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7165 operands[0].qualifier = AARCH64_OPND_QLF_W;
7166 break;
7167
7168 case addsub_ext:
7169 {
7170 /* In the 64-bit form, the final register operand is written as Wm
7171 for all but the (possibly omitted) UXTX/LSL and SXTX
7172 operators.
7173 As a programmer-friendly assembler, we accept e.g.
7174 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7175 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7176 int idx = aarch64_operand_index (opcode->operands,
7177 AARCH64_OPND_Rm_EXT);
7178 gas_assert (idx == 1 || idx == 2);
7179 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7180 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7181 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7182 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7183 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7184 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7185 }
7186 break;
7187
7188 default:
7189 break;
7190 }
7191
7192 DEBUG_TRACE ("exit with SUCCESS");
7193 return true;
7194 }
7195
7196 /* Check for loads and stores that will cause unpredictable behavior. */
7197
7198 static void
7199 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7200 {
7201 aarch64_inst *base = &instr->base;
7202 const aarch64_opcode *opcode = base->opcode;
7203 const aarch64_opnd_info *opnds = base->operands;
7204 switch (opcode->iclass)
7205 {
7206 case ldst_pos:
7207 case ldst_imm9:
7208 case ldst_imm10:
7209 case ldst_unscaled:
7210 case ldst_unpriv:
7211 /* Loading/storing the base register is unpredictable if writeback. */
7212 if ((aarch64_get_operand_class (opnds[0].type)
7213 == AARCH64_OPND_CLASS_INT_REG)
7214 && opnds[0].reg.regno == opnds[1].addr.base_regno
7215 && opnds[1].addr.base_regno != REG_SP
7216 /* Exempt STG/STZG/ST2G/STZ2G. */
7217 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7218 && opnds[1].addr.writeback)
7219 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7220 break;
7221
7222 case ldstpair_off:
7223 case ldstnapair_offs:
7224 case ldstpair_indexed:
7225 /* Loading/storing the base register is unpredictable if writeback. */
7226 if ((aarch64_get_operand_class (opnds[0].type)
7227 == AARCH64_OPND_CLASS_INT_REG)
7228 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7229 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7230 && opnds[2].addr.base_regno != REG_SP
7231 /* Exempt STGP. */
7232 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7233 && opnds[2].addr.writeback)
7234 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7235 /* Load operations must load different registers. */
7236 if ((opcode->opcode & (1 << 22))
7237 && opnds[0].reg.regno == opnds[1].reg.regno)
7238 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7239 break;
7240
7241 case ldstexcl:
7242 if ((aarch64_get_operand_class (opnds[0].type)
7243 == AARCH64_OPND_CLASS_INT_REG)
7244 && (aarch64_get_operand_class (opnds[1].type)
7245 == AARCH64_OPND_CLASS_INT_REG))
7246 {
7247 if ((opcode->opcode & (1 << 22)))
7248 {
7249 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7250 if ((opcode->opcode & (1 << 21))
7251 && opnds[0].reg.regno == opnds[1].reg.regno)
7252 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7253 }
7254 else
7255 {
7256 /* Store-Exclusive is unpredictable if Rt == Rs. */
7257 if (opnds[0].reg.regno == opnds[1].reg.regno)
7258 as_warn
7259 (_("unpredictable: identical transfer and status registers"
7260 " --`%s'"),str);
7261
7262 if (opnds[0].reg.regno == opnds[2].reg.regno)
7263 {
7264 if (!(opcode->opcode & (1 << 21)))
7265 /* Store-Exclusive is unpredictable if Rn == Rs. */
7266 as_warn
7267 (_("unpredictable: identical base and status registers"
7268 " --`%s'"),str);
7269 else
7270 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7271 as_warn
7272 (_("unpredictable: "
7273 "identical transfer and status registers"
7274 " --`%s'"),str);
7275 }
7276
7277 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7278 if ((opcode->opcode & (1 << 21))
7279 && opnds[0].reg.regno == opnds[3].reg.regno
7280 && opnds[3].reg.regno != REG_SP)
7281 as_warn (_("unpredictable: identical base and status registers"
7282 " --`%s'"),str);
7283 }
7284 }
7285 break;
7286
7287 default:
7288 break;
7289 }
7290 }
7291
7292 static void
7293 force_automatic_sequence_close (void)
7294 {
7295 if (now_instr_sequence.instr)
7296 {
7297 as_warn (_("previous `%s' sequence has not been closed"),
7298 now_instr_sequence.instr->opcode->name);
7299 init_insn_sequence (NULL, &now_instr_sequence);
7300 }
7301 }
7302
7303 /* A wrapper function to interface with libopcodes on encoding and
7304 record the error message if there is any.
7305
7306 Return TRUE on success; otherwise return FALSE. */
7307
7308 static bool
7309 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7310 aarch64_insn *code)
7311 {
7312 aarch64_operand_error error_info;
7313 memset (&error_info, '\0', sizeof (error_info));
7314 error_info.kind = AARCH64_OPDE_NIL;
7315 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7316 && !error_info.non_fatal)
7317 return true;
7318
7319 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7320 record_operand_error_info (opcode, &error_info);
7321 return error_info.non_fatal;
7322 }
7323
7324 #ifdef DEBUG_AARCH64
7325 static inline void
7326 dump_opcode_operands (const aarch64_opcode *opcode)
7327 {
7328 int i = 0;
7329 while (opcode->operands[i] != AARCH64_OPND_NIL)
7330 {
7331 aarch64_verbose ("\t\t opnd%d: %s", i,
7332 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7333 ? aarch64_get_operand_name (opcode->operands[i])
7334 : aarch64_get_operand_desc (opcode->operands[i]));
7335 ++i;
7336 }
7337 }
7338 #endif /* DEBUG_AARCH64 */
7339
7340 /* This is the guts of the machine-dependent assembler. STR points to a
7341 machine dependent instruction. This function is supposed to emit
7342 the frags/bytes it assembles to. */
7343
7344 void
7345 md_assemble (char *str)
7346 {
7347 char *p = str;
7348 templates *template;
7349 const aarch64_opcode *opcode;
7350 aarch64_inst *inst_base;
7351 unsigned saved_cond;
7352
7353 /* Align the previous label if needed. */
7354 if (last_label_seen != NULL)
7355 {
7356 symbol_set_frag (last_label_seen, frag_now);
7357 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7358 S_SET_SEGMENT (last_label_seen, now_seg);
7359 }
7360
7361 /* Update the current insn_sequence from the segment. */
7362 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7363
7364 inst.reloc.type = BFD_RELOC_UNUSED;
7365
7366 DEBUG_TRACE ("\n\n");
7367 DEBUG_TRACE ("==============================");
7368 DEBUG_TRACE ("Enter md_assemble with %s", str);
7369
7370 template = opcode_lookup (&p);
7371 if (!template)
7372 {
7373 /* It wasn't an instruction, but it might be a register alias of
7374 the form alias .req reg directive. */
7375 if (!create_register_alias (str, p))
7376 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7377 str);
7378 return;
7379 }
7380
7381 skip_whitespace (p);
7382 if (*p == ',')
7383 {
7384 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7385 get_mnemonic_name (str), str);
7386 return;
7387 }
7388
7389 init_operand_error_report ();
7390
7391 /* Sections are assumed to start aligned. In executable section, there is no
7392 MAP_DATA symbol pending. So we only align the address during
7393 MAP_DATA --> MAP_INSN transition.
7394 For other sections, this is not guaranteed. */
7395 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7396 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7397 frag_align_code (2, 0);
7398
7399 saved_cond = inst.cond;
7400 reset_aarch64_instruction (&inst);
7401 inst.cond = saved_cond;
7402
7403 /* Iterate through all opcode entries with the same mnemonic name. */
7404 do
7405 {
7406 opcode = template->opcode;
7407
7408 DEBUG_TRACE ("opcode %s found", opcode->name);
7409 #ifdef DEBUG_AARCH64
7410 if (debug_dump)
7411 dump_opcode_operands (opcode);
7412 #endif /* DEBUG_AARCH64 */
7413
7414 mapping_state (MAP_INSN);
7415
7416 inst_base = &inst.base;
7417 inst_base->opcode = opcode;
7418
7419 /* Truly conditionally executed instructions, e.g. b.cond. */
7420 if (opcode->flags & F_COND)
7421 {
7422 gas_assert (inst.cond != COND_ALWAYS);
7423 inst_base->cond = get_cond_from_value (inst.cond);
7424 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7425 }
7426 else if (inst.cond != COND_ALWAYS)
7427 {
7428 /* It shouldn't arrive here, where the assembly looks like a
7429 conditional instruction but the found opcode is unconditional. */
7430 gas_assert (0);
7431 continue;
7432 }
7433
7434 if (parse_operands (p, opcode)
7435 && programmer_friendly_fixup (&inst)
7436 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7437 {
7438 /* Check that this instruction is supported for this CPU. */
7439 if (!opcode->avariant
7440 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7441 {
7442 as_bad (_("selected processor does not support `%s'"), str);
7443 return;
7444 }
7445
7446 warn_unpredictable_ldst (&inst, str);
7447
7448 if (inst.reloc.type == BFD_RELOC_UNUSED
7449 || !inst.reloc.need_libopcodes_p)
7450 output_inst (NULL);
7451 else
7452 {
7453 /* If there is relocation generated for the instruction,
7454 store the instruction information for the future fix-up. */
7455 struct aarch64_inst *copy;
7456 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7457 copy = XNEW (struct aarch64_inst);
7458 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7459 output_inst (copy);
7460 }
7461
7462 /* Issue non-fatal messages if any. */
7463 output_operand_error_report (str, true);
7464 return;
7465 }
7466
7467 template = template->next;
7468 if (template != NULL)
7469 {
7470 reset_aarch64_instruction (&inst);
7471 inst.cond = saved_cond;
7472 }
7473 }
7474 while (template != NULL);
7475
7476 /* Issue the error messages if any. */
7477 output_operand_error_report (str, false);
7478 }
7479
7480 /* Various frobbings of labels and their addresses. */
7481
7482 void
7483 aarch64_start_line_hook (void)
7484 {
7485 last_label_seen = NULL;
7486 }
7487
7488 void
7489 aarch64_frob_label (symbolS * sym)
7490 {
7491 last_label_seen = sym;
7492
7493 dwarf2_emit_label (sym);
7494 }
7495
7496 void
7497 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7498 {
7499 /* Check to see if we have a block to close. */
7500 force_automatic_sequence_close ();
7501 }
7502
7503 int
7504 aarch64_data_in_code (void)
7505 {
7506 if (startswith (input_line_pointer + 1, "data:"))
7507 {
7508 *input_line_pointer = '/';
7509 input_line_pointer += 5;
7510 *input_line_pointer = 0;
7511 return 1;
7512 }
7513
7514 return 0;
7515 }
7516
7517 char *
7518 aarch64_canonicalize_symbol_name (char *name)
7519 {
7520 int len;
7521
7522 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7523 *(name + len - 5) = 0;
7524
7525 return name;
7526 }
7527 \f
7528 /* Table of all register names defined by default. The user can
7529 define additional names with .req. Note that all register names
7530 should appear in both upper and lowercase variants. Some registers
7531 also have mixed-case names. */
7532
7533 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7534 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7535 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7536 #define REGSET16(p,t) \
7537 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7538 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7539 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7540 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7541 #define REGSET31(p,t) \
7542 REGSET16(p, t), \
7543 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7544 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7545 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7546 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7547 #define REGSET(p,t) \
7548 REGSET31(p,t), REGNUM(p,31,t)
7549
7550 /* These go into aarch64_reg_hsh hash-table. */
7551 static const reg_entry reg_names[] = {
7552 /* Integer registers. */
7553 REGSET31 (x, R_64), REGSET31 (X, R_64),
7554 REGSET31 (w, R_32), REGSET31 (W, R_32),
7555
7556 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7557 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7558 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7559 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7560 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7561 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7562
7563 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7564 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7565
7566 /* Floating-point single precision registers. */
7567 REGSET (s, FP_S), REGSET (S, FP_S),
7568
7569 /* Floating-point double precision registers. */
7570 REGSET (d, FP_D), REGSET (D, FP_D),
7571
7572 /* Floating-point half precision registers. */
7573 REGSET (h, FP_H), REGSET (H, FP_H),
7574
7575 /* Floating-point byte precision registers. */
7576 REGSET (b, FP_B), REGSET (B, FP_B),
7577
7578 /* Floating-point quad precision registers. */
7579 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7580
7581 /* FP/SIMD registers. */
7582 REGSET (v, VN), REGSET (V, VN),
7583
7584 /* SVE vector registers. */
7585 REGSET (z, ZN), REGSET (Z, ZN),
7586
7587 /* SVE predicate registers. */
7588 REGSET16 (p, PN), REGSET16 (P, PN),
7589
7590 /* SME ZA tile registers. */
7591 REGSET16 (za, ZA), REGSET16 (ZA, ZA)
7592 };
7593
7594 #undef REGDEF
7595 #undef REGDEF_ALIAS
7596 #undef REGNUM
7597 #undef REGSET16
7598 #undef REGSET31
7599 #undef REGSET
7600
7601 #define N 1
7602 #define n 0
7603 #define Z 1
7604 #define z 0
7605 #define C 1
7606 #define c 0
7607 #define V 1
7608 #define v 0
7609 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7610 static const asm_nzcv nzcv_names[] = {
7611 {"nzcv", B (n, z, c, v)},
7612 {"nzcV", B (n, z, c, V)},
7613 {"nzCv", B (n, z, C, v)},
7614 {"nzCV", B (n, z, C, V)},
7615 {"nZcv", B (n, Z, c, v)},
7616 {"nZcV", B (n, Z, c, V)},
7617 {"nZCv", B (n, Z, C, v)},
7618 {"nZCV", B (n, Z, C, V)},
7619 {"Nzcv", B (N, z, c, v)},
7620 {"NzcV", B (N, z, c, V)},
7621 {"NzCv", B (N, z, C, v)},
7622 {"NzCV", B (N, z, C, V)},
7623 {"NZcv", B (N, Z, c, v)},
7624 {"NZcV", B (N, Z, c, V)},
7625 {"NZCv", B (N, Z, C, v)},
7626 {"NZCV", B (N, Z, C, V)}
7627 };
7628
7629 #undef N
7630 #undef n
7631 #undef Z
7632 #undef z
7633 #undef C
7634 #undef c
7635 #undef V
7636 #undef v
7637 #undef B
7638 \f
7639 /* MD interface: bits in the object file. */
7640
7641 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7642 for use in the a.out file, and stores them in the array pointed to by buf.
7643 This knows about the endian-ness of the target machine and does
7644 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7645 2 (short) and 4 (long) Floating numbers are put out as a series of
7646 LITTLENUMS (shorts, here at least). */
7647
7648 void
7649 md_number_to_chars (char *buf, valueT val, int n)
7650 {
7651 if (target_big_endian)
7652 number_to_chars_bigendian (buf, val, n);
7653 else
7654 number_to_chars_littleendian (buf, val, n);
7655 }
7656
7657 /* MD interface: Sections. */
7658
7659 /* Estimate the size of a frag before relaxing. Assume everything fits in
7660 4 bytes. */
7661
7662 int
7663 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7664 {
7665 fragp->fr_var = 4;
7666 return 4;
7667 }
7668
7669 /* Round up a section size to the appropriate boundary. */
7670
7671 valueT
7672 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7673 {
7674 return size;
7675 }
7676
7677 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7678 of an rs_align_code fragment.
7679
7680 Here we fill the frag with the appropriate info for padding the
7681 output stream. The resulting frag will consist of a fixed (fr_fix)
7682 and of a repeating (fr_var) part.
7683
7684 The fixed content is always emitted before the repeating content and
7685 these two parts are used as follows in constructing the output:
7686 - the fixed part will be used to align to a valid instruction word
7687 boundary, in case that we start at a misaligned address; as no
7688 executable instruction can live at the misaligned location, we
7689 simply fill with zeros;
7690 - the variable part will be used to cover the remaining padding and
7691 we fill using the AArch64 NOP instruction.
7692
7693 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7694 enough storage space for up to 3 bytes for padding the back to a valid
7695 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7696
7697 void
7698 aarch64_handle_align (fragS * fragP)
7699 {
7700 /* NOP = d503201f */
7701 /* AArch64 instructions are always little-endian. */
7702 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7703
7704 int bytes, fix, noop_size;
7705 char *p;
7706
7707 if (fragP->fr_type != rs_align_code)
7708 return;
7709
7710 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7711 p = fragP->fr_literal + fragP->fr_fix;
7712
7713 #ifdef OBJ_ELF
7714 gas_assert (fragP->tc_frag_data.recorded);
7715 #endif
7716
7717 noop_size = sizeof (aarch64_noop);
7718
7719 fix = bytes & (noop_size - 1);
7720 if (fix)
7721 {
7722 #ifdef OBJ_ELF
7723 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7724 #endif
7725 memset (p, 0, fix);
7726 p += fix;
7727 fragP->fr_fix += fix;
7728 }
7729
7730 if (noop_size)
7731 memcpy (p, aarch64_noop, noop_size);
7732 fragP->fr_var = noop_size;
7733 }
7734
7735 /* Perform target specific initialisation of a frag.
7736 Note - despite the name this initialisation is not done when the frag
7737 is created, but only when its type is assigned. A frag can be created
7738 and used a long time before its type is set, so beware of assuming that
7739 this initialisation is performed first. */
7740
7741 #ifndef OBJ_ELF
7742 void
7743 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7744 int max_chars ATTRIBUTE_UNUSED)
7745 {
7746 }
7747
7748 #else /* OBJ_ELF is defined. */
7749 void
7750 aarch64_init_frag (fragS * fragP, int max_chars)
7751 {
7752 /* Record a mapping symbol for alignment frags. We will delete this
7753 later if the alignment ends up empty. */
7754 if (!fragP->tc_frag_data.recorded)
7755 fragP->tc_frag_data.recorded = 1;
7756
7757 /* PR 21809: Do not set a mapping state for debug sections
7758 - it just confuses other tools. */
7759 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7760 return;
7761
7762 switch (fragP->fr_type)
7763 {
7764 case rs_align_test:
7765 case rs_fill:
7766 mapping_state_2 (MAP_DATA, max_chars);
7767 break;
7768 case rs_align:
7769 /* PR 20364: We can get alignment frags in code sections,
7770 so do not just assume that we should use the MAP_DATA state. */
7771 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7772 break;
7773 case rs_align_code:
7774 mapping_state_2 (MAP_INSN, max_chars);
7775 break;
7776 default:
7777 break;
7778 }
7779 }
7780 \f
7781 /* Initialize the DWARF-2 unwind information for this procedure. */
7782
7783 void
7784 tc_aarch64_frame_initial_instructions (void)
7785 {
7786 cfi_add_CFA_def_cfa (REG_SP, 0);
7787 }
7788 #endif /* OBJ_ELF */
7789
7790 /* Convert REGNAME to a DWARF-2 register number. */
7791
7792 int
7793 tc_aarch64_regname_to_dw2regnum (char *regname)
7794 {
7795 const reg_entry *reg = parse_reg (&regname);
7796 if (reg == NULL)
7797 return -1;
7798
7799 switch (reg->type)
7800 {
7801 case REG_TYPE_SP_32:
7802 case REG_TYPE_SP_64:
7803 case REG_TYPE_R_32:
7804 case REG_TYPE_R_64:
7805 return reg->number;
7806
7807 case REG_TYPE_FP_B:
7808 case REG_TYPE_FP_H:
7809 case REG_TYPE_FP_S:
7810 case REG_TYPE_FP_D:
7811 case REG_TYPE_FP_Q:
7812 return reg->number + 64;
7813
7814 default:
7815 break;
7816 }
7817 return -1;
7818 }
7819
7820 /* Implement DWARF2_ADDR_SIZE. */
7821
7822 int
7823 aarch64_dwarf2_addr_size (void)
7824 {
7825 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7826 if (ilp32_p)
7827 return 4;
7828 #endif
7829 return bfd_arch_bits_per_address (stdoutput) / 8;
7830 }
7831
7832 /* MD interface: Symbol and relocation handling. */
7833
7834 /* Return the address within the segment that a PC-relative fixup is
7835 relative to. For AArch64 PC-relative fixups applied to instructions
7836 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7837
7838 long
7839 md_pcrel_from_section (fixS * fixP, segT seg)
7840 {
7841 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7842
7843 /* If this is pc-relative and we are going to emit a relocation
7844 then we just want to put out any pipeline compensation that the linker
7845 will need. Otherwise we want to use the calculated base. */
7846 if (fixP->fx_pcrel
7847 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7848 || aarch64_force_relocation (fixP)))
7849 base = 0;
7850
7851 /* AArch64 should be consistent for all pc-relative relocations. */
7852 return base + AARCH64_PCREL_OFFSET;
7853 }
7854
7855 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7856 Otherwise we have no need to default values of symbols. */
7857
7858 symbolS *
7859 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7860 {
7861 #ifdef OBJ_ELF
7862 if (name[0] == '_' && name[1] == 'G'
7863 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7864 {
7865 if (!GOT_symbol)
7866 {
7867 if (symbol_find (name))
7868 as_bad (_("GOT already in the symbol table"));
7869
7870 GOT_symbol = symbol_new (name, undefined_section,
7871 &zero_address_frag, 0);
7872 }
7873
7874 return GOT_symbol;
7875 }
7876 #endif
7877
7878 return 0;
7879 }
7880
7881 /* Return non-zero if the indicated VALUE has overflowed the maximum
7882 range expressible by a unsigned number with the indicated number of
7883 BITS. */
7884
7885 static bool
7886 unsigned_overflow (valueT value, unsigned bits)
7887 {
7888 valueT lim;
7889 if (bits >= sizeof (valueT) * 8)
7890 return false;
7891 lim = (valueT) 1 << bits;
7892 return (value >= lim);
7893 }
7894
7895
7896 /* Return non-zero if the indicated VALUE has overflowed the maximum
7897 range expressible by an signed number with the indicated number of
7898 BITS. */
7899
7900 static bool
7901 signed_overflow (offsetT value, unsigned bits)
7902 {
7903 offsetT lim;
7904 if (bits >= sizeof (offsetT) * 8)
7905 return false;
7906 lim = (offsetT) 1 << (bits - 1);
7907 return (value < -lim || value >= lim);
7908 }
7909
7910 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7911 unsigned immediate offset load/store instruction, try to encode it as
7912 an unscaled, 9-bit, signed immediate offset load/store instruction.
7913 Return TRUE if it is successful; otherwise return FALSE.
7914
7915 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7916 in response to the standard LDR/STR mnemonics when the immediate offset is
7917 unambiguous, i.e. when it is negative or unaligned. */
7918
7919 static bool
7920 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7921 {
7922 int idx;
7923 enum aarch64_op new_op;
7924 const aarch64_opcode *new_opcode;
7925
7926 gas_assert (instr->opcode->iclass == ldst_pos);
7927
7928 switch (instr->opcode->op)
7929 {
7930 case OP_LDRB_POS:new_op = OP_LDURB; break;
7931 case OP_STRB_POS: new_op = OP_STURB; break;
7932 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7933 case OP_LDRH_POS: new_op = OP_LDURH; break;
7934 case OP_STRH_POS: new_op = OP_STURH; break;
7935 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7936 case OP_LDR_POS: new_op = OP_LDUR; break;
7937 case OP_STR_POS: new_op = OP_STUR; break;
7938 case OP_LDRF_POS: new_op = OP_LDURV; break;
7939 case OP_STRF_POS: new_op = OP_STURV; break;
7940 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7941 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7942 default: new_op = OP_NIL; break;
7943 }
7944
7945 if (new_op == OP_NIL)
7946 return false;
7947
7948 new_opcode = aarch64_get_opcode (new_op);
7949 gas_assert (new_opcode != NULL);
7950
7951 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7952 instr->opcode->op, new_opcode->op);
7953
7954 aarch64_replace_opcode (instr, new_opcode);
7955
7956 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7957 qualifier matching may fail because the out-of-date qualifier will
7958 prevent the operand being updated with a new and correct qualifier. */
7959 idx = aarch64_operand_index (instr->opcode->operands,
7960 AARCH64_OPND_ADDR_SIMM9);
7961 gas_assert (idx == 1);
7962 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7963
7964 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7965
7966 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7967 insn_sequence))
7968 return false;
7969
7970 return true;
7971 }
7972
7973 /* Called by fix_insn to fix a MOV immediate alias instruction.
7974
7975 Operand for a generic move immediate instruction, which is an alias
7976 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7977 a 32-bit/64-bit immediate value into general register. An assembler error
7978 shall result if the immediate cannot be created by a single one of these
7979 instructions. If there is a choice, then to ensure reversability an
7980 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7981
7982 static void
7983 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7984 {
7985 const aarch64_opcode *opcode;
7986
7987 /* Need to check if the destination is SP/ZR. The check has to be done
7988 before any aarch64_replace_opcode. */
7989 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7990 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7991
7992 instr->operands[1].imm.value = value;
7993 instr->operands[1].skip = 0;
7994
7995 if (try_mov_wide_p)
7996 {
7997 /* Try the MOVZ alias. */
7998 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7999 aarch64_replace_opcode (instr, opcode);
8000 if (aarch64_opcode_encode (instr->opcode, instr,
8001 &instr->value, NULL, NULL, insn_sequence))
8002 {
8003 put_aarch64_insn (buf, instr->value);
8004 return;
8005 }
8006 /* Try the MOVK alias. */
8007 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8008 aarch64_replace_opcode (instr, opcode);
8009 if (aarch64_opcode_encode (instr->opcode, instr,
8010 &instr->value, NULL, NULL, insn_sequence))
8011 {
8012 put_aarch64_insn (buf, instr->value);
8013 return;
8014 }
8015 }
8016
8017 if (try_mov_bitmask_p)
8018 {
8019 /* Try the ORR alias. */
8020 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8021 aarch64_replace_opcode (instr, opcode);
8022 if (aarch64_opcode_encode (instr->opcode, instr,
8023 &instr->value, NULL, NULL, insn_sequence))
8024 {
8025 put_aarch64_insn (buf, instr->value);
8026 return;
8027 }
8028 }
8029
8030 as_bad_where (fixP->fx_file, fixP->fx_line,
8031 _("immediate cannot be moved by a single instruction"));
8032 }
8033
8034 /* An instruction operand which is immediate related may have symbol used
8035 in the assembly, e.g.
8036
8037 mov w0, u32
8038 .set u32, 0x00ffff00
8039
8040 At the time when the assembly instruction is parsed, a referenced symbol,
8041 like 'u32' in the above example may not have been seen; a fixS is created
8042 in such a case and is handled here after symbols have been resolved.
8043 Instruction is fixed up with VALUE using the information in *FIXP plus
8044 extra information in FLAGS.
8045
8046 This function is called by md_apply_fix to fix up instructions that need
8047 a fix-up described above but does not involve any linker-time relocation. */
8048
8049 static void
8050 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8051 {
8052 int idx;
8053 uint32_t insn;
8054 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8055 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8056 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8057
8058 if (new_inst)
8059 {
8060 /* Now the instruction is about to be fixed-up, so the operand that
8061 was previously marked as 'ignored' needs to be unmarked in order
8062 to get the encoding done properly. */
8063 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8064 new_inst->operands[idx].skip = 0;
8065 }
8066
8067 gas_assert (opnd != AARCH64_OPND_NIL);
8068
8069 switch (opnd)
8070 {
8071 case AARCH64_OPND_EXCEPTION:
8072 case AARCH64_OPND_UNDEFINED:
8073 if (unsigned_overflow (value, 16))
8074 as_bad_where (fixP->fx_file, fixP->fx_line,
8075 _("immediate out of range"));
8076 insn = get_aarch64_insn (buf);
8077 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8078 put_aarch64_insn (buf, insn);
8079 break;
8080
8081 case AARCH64_OPND_AIMM:
8082 /* ADD or SUB with immediate.
8083 NOTE this assumes we come here with a add/sub shifted reg encoding
8084 3 322|2222|2 2 2 21111 111111
8085 1 098|7654|3 2 1 09876 543210 98765 43210
8086 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8087 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8088 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8089 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8090 ->
8091 3 322|2222|2 2 221111111111
8092 1 098|7654|3 2 109876543210 98765 43210
8093 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8094 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8095 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8096 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8097 Fields sf Rn Rd are already set. */
8098 insn = get_aarch64_insn (buf);
8099 if (value < 0)
8100 {
8101 /* Add <-> sub. */
8102 insn = reencode_addsub_switch_add_sub (insn);
8103 value = -value;
8104 }
8105
8106 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8107 && unsigned_overflow (value, 12))
8108 {
8109 /* Try to shift the value by 12 to make it fit. */
8110 if (((value >> 12) << 12) == value
8111 && ! unsigned_overflow (value, 12 + 12))
8112 {
8113 value >>= 12;
8114 insn |= encode_addsub_imm_shift_amount (1);
8115 }
8116 }
8117
8118 if (unsigned_overflow (value, 12))
8119 as_bad_where (fixP->fx_file, fixP->fx_line,
8120 _("immediate out of range"));
8121
8122 insn |= encode_addsub_imm (value);
8123
8124 put_aarch64_insn (buf, insn);
8125 break;
8126
8127 case AARCH64_OPND_SIMD_IMM:
8128 case AARCH64_OPND_SIMD_IMM_SFT:
8129 case AARCH64_OPND_LIMM:
8130 /* Bit mask immediate. */
8131 gas_assert (new_inst != NULL);
8132 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8133 new_inst->operands[idx].imm.value = value;
8134 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8135 &new_inst->value, NULL, NULL, insn_sequence))
8136 put_aarch64_insn (buf, new_inst->value);
8137 else
8138 as_bad_where (fixP->fx_file, fixP->fx_line,
8139 _("invalid immediate"));
8140 break;
8141
8142 case AARCH64_OPND_HALF:
8143 /* 16-bit unsigned immediate. */
8144 if (unsigned_overflow (value, 16))
8145 as_bad_where (fixP->fx_file, fixP->fx_line,
8146 _("immediate out of range"));
8147 insn = get_aarch64_insn (buf);
8148 insn |= encode_movw_imm (value & 0xffff);
8149 put_aarch64_insn (buf, insn);
8150 break;
8151
8152 case AARCH64_OPND_IMM_MOV:
8153 /* Operand for a generic move immediate instruction, which is
8154 an alias instruction that generates a single MOVZ, MOVN or ORR
8155 instruction to loads a 32-bit/64-bit immediate value into general
8156 register. An assembler error shall result if the immediate cannot be
8157 created by a single one of these instructions. If there is a choice,
8158 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8159 and MOVZ or MOVN to ORR. */
8160 gas_assert (new_inst != NULL);
8161 fix_mov_imm_insn (fixP, buf, new_inst, value);
8162 break;
8163
8164 case AARCH64_OPND_ADDR_SIMM7:
8165 case AARCH64_OPND_ADDR_SIMM9:
8166 case AARCH64_OPND_ADDR_SIMM9_2:
8167 case AARCH64_OPND_ADDR_SIMM10:
8168 case AARCH64_OPND_ADDR_UIMM12:
8169 case AARCH64_OPND_ADDR_SIMM11:
8170 case AARCH64_OPND_ADDR_SIMM13:
8171 /* Immediate offset in an address. */
8172 insn = get_aarch64_insn (buf);
8173
8174 gas_assert (new_inst != NULL && new_inst->value == insn);
8175 gas_assert (new_inst->opcode->operands[1] == opnd
8176 || new_inst->opcode->operands[2] == opnd);
8177
8178 /* Get the index of the address operand. */
8179 if (new_inst->opcode->operands[1] == opnd)
8180 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8181 idx = 1;
8182 else
8183 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8184 idx = 2;
8185
8186 /* Update the resolved offset value. */
8187 new_inst->operands[idx].addr.offset.imm = value;
8188
8189 /* Encode/fix-up. */
8190 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8191 &new_inst->value, NULL, NULL, insn_sequence))
8192 {
8193 put_aarch64_insn (buf, new_inst->value);
8194 break;
8195 }
8196 else if (new_inst->opcode->iclass == ldst_pos
8197 && try_to_encode_as_unscaled_ldst (new_inst))
8198 {
8199 put_aarch64_insn (buf, new_inst->value);
8200 break;
8201 }
8202
8203 as_bad_where (fixP->fx_file, fixP->fx_line,
8204 _("immediate offset out of range"));
8205 break;
8206
8207 default:
8208 gas_assert (0);
8209 as_fatal (_("unhandled operand code %d"), opnd);
8210 }
8211 }
8212
8213 /* Apply a fixup (fixP) to segment data, once it has been determined
8214 by our caller that we have all the info we need to fix it up.
8215
8216 Parameter valP is the pointer to the value of the bits. */
8217
8218 void
8219 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8220 {
8221 offsetT value = *valP;
8222 uint32_t insn;
8223 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8224 int scale;
8225 unsigned flags = fixP->fx_addnumber;
8226
8227 DEBUG_TRACE ("\n\n");
8228 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8229 DEBUG_TRACE ("Enter md_apply_fix");
8230
8231 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8232
8233 /* Note whether this will delete the relocation. */
8234
8235 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8236 fixP->fx_done = 1;
8237
8238 /* Process the relocations. */
8239 switch (fixP->fx_r_type)
8240 {
8241 case BFD_RELOC_NONE:
8242 /* This will need to go in the object file. */
8243 fixP->fx_done = 0;
8244 break;
8245
8246 case BFD_RELOC_8:
8247 case BFD_RELOC_8_PCREL:
8248 if (fixP->fx_done || !seg->use_rela_p)
8249 md_number_to_chars (buf, value, 1);
8250 break;
8251
8252 case BFD_RELOC_16:
8253 case BFD_RELOC_16_PCREL:
8254 if (fixP->fx_done || !seg->use_rela_p)
8255 md_number_to_chars (buf, value, 2);
8256 break;
8257
8258 case BFD_RELOC_32:
8259 case BFD_RELOC_32_PCREL:
8260 if (fixP->fx_done || !seg->use_rela_p)
8261 md_number_to_chars (buf, value, 4);
8262 break;
8263
8264 case BFD_RELOC_64:
8265 case BFD_RELOC_64_PCREL:
8266 if (fixP->fx_done || !seg->use_rela_p)
8267 md_number_to_chars (buf, value, 8);
8268 break;
8269
8270 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8271 /* We claim that these fixups have been processed here, even if
8272 in fact we generate an error because we do not have a reloc
8273 for them, so tc_gen_reloc() will reject them. */
8274 fixP->fx_done = 1;
8275 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8276 {
8277 as_bad_where (fixP->fx_file, fixP->fx_line,
8278 _("undefined symbol %s used as an immediate value"),
8279 S_GET_NAME (fixP->fx_addsy));
8280 goto apply_fix_return;
8281 }
8282 fix_insn (fixP, flags, value);
8283 break;
8284
8285 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8286 if (fixP->fx_done || !seg->use_rela_p)
8287 {
8288 if (value & 3)
8289 as_bad_where (fixP->fx_file, fixP->fx_line,
8290 _("pc-relative load offset not word aligned"));
8291 if (signed_overflow (value, 21))
8292 as_bad_where (fixP->fx_file, fixP->fx_line,
8293 _("pc-relative load offset out of range"));
8294 insn = get_aarch64_insn (buf);
8295 insn |= encode_ld_lit_ofs_19 (value >> 2);
8296 put_aarch64_insn (buf, insn);
8297 }
8298 break;
8299
8300 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8301 if (fixP->fx_done || !seg->use_rela_p)
8302 {
8303 if (signed_overflow (value, 21))
8304 as_bad_where (fixP->fx_file, fixP->fx_line,
8305 _("pc-relative address offset out of range"));
8306 insn = get_aarch64_insn (buf);
8307 insn |= encode_adr_imm (value);
8308 put_aarch64_insn (buf, insn);
8309 }
8310 break;
8311
8312 case BFD_RELOC_AARCH64_BRANCH19:
8313 if (fixP->fx_done || !seg->use_rela_p)
8314 {
8315 if (value & 3)
8316 as_bad_where (fixP->fx_file, fixP->fx_line,
8317 _("conditional branch target not word aligned"));
8318 if (signed_overflow (value, 21))
8319 as_bad_where (fixP->fx_file, fixP->fx_line,
8320 _("conditional branch out of range"));
8321 insn = get_aarch64_insn (buf);
8322 insn |= encode_cond_branch_ofs_19 (value >> 2);
8323 put_aarch64_insn (buf, insn);
8324 }
8325 break;
8326
8327 case BFD_RELOC_AARCH64_TSTBR14:
8328 if (fixP->fx_done || !seg->use_rela_p)
8329 {
8330 if (value & 3)
8331 as_bad_where (fixP->fx_file, fixP->fx_line,
8332 _("conditional branch target not word aligned"));
8333 if (signed_overflow (value, 16))
8334 as_bad_where (fixP->fx_file, fixP->fx_line,
8335 _("conditional branch out of range"));
8336 insn = get_aarch64_insn (buf);
8337 insn |= encode_tst_branch_ofs_14 (value >> 2);
8338 put_aarch64_insn (buf, insn);
8339 }
8340 break;
8341
8342 case BFD_RELOC_AARCH64_CALL26:
8343 case BFD_RELOC_AARCH64_JUMP26:
8344 if (fixP->fx_done || !seg->use_rela_p)
8345 {
8346 if (value & 3)
8347 as_bad_where (fixP->fx_file, fixP->fx_line,
8348 _("branch target not word aligned"));
8349 if (signed_overflow (value, 28))
8350 as_bad_where (fixP->fx_file, fixP->fx_line,
8351 _("branch out of range"));
8352 insn = get_aarch64_insn (buf);
8353 insn |= encode_branch_ofs_26 (value >> 2);
8354 put_aarch64_insn (buf, insn);
8355 }
8356 break;
8357
8358 case BFD_RELOC_AARCH64_MOVW_G0:
8359 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8360 case BFD_RELOC_AARCH64_MOVW_G0_S:
8361 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8362 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8363 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8364 scale = 0;
8365 goto movw_common;
8366 case BFD_RELOC_AARCH64_MOVW_G1:
8367 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8368 case BFD_RELOC_AARCH64_MOVW_G1_S:
8369 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8370 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8371 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8372 scale = 16;
8373 goto movw_common;
8374 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8375 scale = 0;
8376 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8377 /* Should always be exported to object file, see
8378 aarch64_force_relocation(). */
8379 gas_assert (!fixP->fx_done);
8380 gas_assert (seg->use_rela_p);
8381 goto movw_common;
8382 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8383 scale = 16;
8384 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8385 /* Should always be exported to object file, see
8386 aarch64_force_relocation(). */
8387 gas_assert (!fixP->fx_done);
8388 gas_assert (seg->use_rela_p);
8389 goto movw_common;
8390 case BFD_RELOC_AARCH64_MOVW_G2:
8391 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8392 case BFD_RELOC_AARCH64_MOVW_G2_S:
8393 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8394 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8395 scale = 32;
8396 goto movw_common;
8397 case BFD_RELOC_AARCH64_MOVW_G3:
8398 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8399 scale = 48;
8400 movw_common:
8401 if (fixP->fx_done || !seg->use_rela_p)
8402 {
8403 insn = get_aarch64_insn (buf);
8404
8405 if (!fixP->fx_done)
8406 {
8407 /* REL signed addend must fit in 16 bits */
8408 if (signed_overflow (value, 16))
8409 as_bad_where (fixP->fx_file, fixP->fx_line,
8410 _("offset out of range"));
8411 }
8412 else
8413 {
8414 /* Check for overflow and scale. */
8415 switch (fixP->fx_r_type)
8416 {
8417 case BFD_RELOC_AARCH64_MOVW_G0:
8418 case BFD_RELOC_AARCH64_MOVW_G1:
8419 case BFD_RELOC_AARCH64_MOVW_G2:
8420 case BFD_RELOC_AARCH64_MOVW_G3:
8421 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8422 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8423 if (unsigned_overflow (value, scale + 16))
8424 as_bad_where (fixP->fx_file, fixP->fx_line,
8425 _("unsigned value out of range"));
8426 break;
8427 case BFD_RELOC_AARCH64_MOVW_G0_S:
8428 case BFD_RELOC_AARCH64_MOVW_G1_S:
8429 case BFD_RELOC_AARCH64_MOVW_G2_S:
8430 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8431 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8432 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8433 /* NOTE: We can only come here with movz or movn. */
8434 if (signed_overflow (value, scale + 16))
8435 as_bad_where (fixP->fx_file, fixP->fx_line,
8436 _("signed value out of range"));
8437 if (value < 0)
8438 {
8439 /* Force use of MOVN. */
8440 value = ~value;
8441 insn = reencode_movzn_to_movn (insn);
8442 }
8443 else
8444 {
8445 /* Force use of MOVZ. */
8446 insn = reencode_movzn_to_movz (insn);
8447 }
8448 break;
8449 default:
8450 /* Unchecked relocations. */
8451 break;
8452 }
8453 value >>= scale;
8454 }
8455
8456 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8457 insn |= encode_movw_imm (value & 0xffff);
8458
8459 put_aarch64_insn (buf, insn);
8460 }
8461 break;
8462
8463 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8464 fixP->fx_r_type = (ilp32_p
8465 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8466 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8467 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8468 /* Should always be exported to object file, see
8469 aarch64_force_relocation(). */
8470 gas_assert (!fixP->fx_done);
8471 gas_assert (seg->use_rela_p);
8472 break;
8473
8474 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8475 fixP->fx_r_type = (ilp32_p
8476 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8477 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8478 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8479 /* Should always be exported to object file, see
8480 aarch64_force_relocation(). */
8481 gas_assert (!fixP->fx_done);
8482 gas_assert (seg->use_rela_p);
8483 break;
8484
8485 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8486 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8487 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8488 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8489 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8490 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8491 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8492 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8493 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8494 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8495 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8496 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8497 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8498 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8499 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8500 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8501 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8502 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8503 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8504 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8505 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8506 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8507 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8508 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8509 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8510 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8511 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8512 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8513 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8514 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8515 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8516 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8517 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8518 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8519 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8520 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8521 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8522 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8523 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8524 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8525 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8526 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8527 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8528 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8529 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8530 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8531 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8532 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8533 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8534 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8535 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8536 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8537 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8538 /* Should always be exported to object file, see
8539 aarch64_force_relocation(). */
8540 gas_assert (!fixP->fx_done);
8541 gas_assert (seg->use_rela_p);
8542 break;
8543
8544 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8545 /* Should always be exported to object file, see
8546 aarch64_force_relocation(). */
8547 fixP->fx_r_type = (ilp32_p
8548 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8549 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8550 gas_assert (!fixP->fx_done);
8551 gas_assert (seg->use_rela_p);
8552 break;
8553
8554 case BFD_RELOC_AARCH64_ADD_LO12:
8555 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8556 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8557 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8558 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8559 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8560 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8561 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8562 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8563 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8564 case BFD_RELOC_AARCH64_LDST128_LO12:
8565 case BFD_RELOC_AARCH64_LDST16_LO12:
8566 case BFD_RELOC_AARCH64_LDST32_LO12:
8567 case BFD_RELOC_AARCH64_LDST64_LO12:
8568 case BFD_RELOC_AARCH64_LDST8_LO12:
8569 /* Should always be exported to object file, see
8570 aarch64_force_relocation(). */
8571 gas_assert (!fixP->fx_done);
8572 gas_assert (seg->use_rela_p);
8573 break;
8574
8575 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8576 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8577 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8578 break;
8579
8580 case BFD_RELOC_UNUSED:
8581 /* An error will already have been reported. */
8582 break;
8583
8584 default:
8585 as_bad_where (fixP->fx_file, fixP->fx_line,
8586 _("unexpected %s fixup"),
8587 bfd_get_reloc_code_name (fixP->fx_r_type));
8588 break;
8589 }
8590
8591 apply_fix_return:
8592 /* Free the allocated the struct aarch64_inst.
8593 N.B. currently there are very limited number of fix-up types actually use
8594 this field, so the impact on the performance should be minimal . */
8595 free (fixP->tc_fix_data.inst);
8596
8597 return;
8598 }
8599
8600 /* Translate internal representation of relocation info to BFD target
8601 format. */
8602
8603 arelent *
8604 tc_gen_reloc (asection * section, fixS * fixp)
8605 {
8606 arelent *reloc;
8607 bfd_reloc_code_real_type code;
8608
8609 reloc = XNEW (arelent);
8610
8611 reloc->sym_ptr_ptr = XNEW (asymbol *);
8612 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8613 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8614
8615 if (fixp->fx_pcrel)
8616 {
8617 if (section->use_rela_p)
8618 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8619 else
8620 fixp->fx_offset = reloc->address;
8621 }
8622 reloc->addend = fixp->fx_offset;
8623
8624 code = fixp->fx_r_type;
8625 switch (code)
8626 {
8627 case BFD_RELOC_16:
8628 if (fixp->fx_pcrel)
8629 code = BFD_RELOC_16_PCREL;
8630 break;
8631
8632 case BFD_RELOC_32:
8633 if (fixp->fx_pcrel)
8634 code = BFD_RELOC_32_PCREL;
8635 break;
8636
8637 case BFD_RELOC_64:
8638 if (fixp->fx_pcrel)
8639 code = BFD_RELOC_64_PCREL;
8640 break;
8641
8642 default:
8643 break;
8644 }
8645
8646 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8647 if (reloc->howto == NULL)
8648 {
8649 as_bad_where (fixp->fx_file, fixp->fx_line,
8650 _
8651 ("cannot represent %s relocation in this object file format"),
8652 bfd_get_reloc_code_name (code));
8653 return NULL;
8654 }
8655
8656 return reloc;
8657 }
8658
8659 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8660
8661 void
8662 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8663 {
8664 bfd_reloc_code_real_type type;
8665 int pcrel = 0;
8666
8667 /* Pick a reloc.
8668 FIXME: @@ Should look at CPU word size. */
8669 switch (size)
8670 {
8671 case 1:
8672 type = BFD_RELOC_8;
8673 break;
8674 case 2:
8675 type = BFD_RELOC_16;
8676 break;
8677 case 4:
8678 type = BFD_RELOC_32;
8679 break;
8680 case 8:
8681 type = BFD_RELOC_64;
8682 break;
8683 default:
8684 as_bad (_("cannot do %u-byte relocation"), size);
8685 type = BFD_RELOC_UNUSED;
8686 break;
8687 }
8688
8689 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8690 }
8691
8692 #ifdef OBJ_ELF
8693
8694 /* Implement md_after_parse_args. This is the earliest time we need to decide
8695 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8696
8697 void
8698 aarch64_after_parse_args (void)
8699 {
8700 if (aarch64_abi != AARCH64_ABI_NONE)
8701 return;
8702
8703 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8704 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8705 aarch64_abi = AARCH64_ABI_ILP32;
8706 else
8707 aarch64_abi = AARCH64_ABI_LP64;
8708 }
8709
8710 const char *
8711 elf64_aarch64_target_format (void)
8712 {
8713 #ifdef TE_CLOUDABI
8714 /* FIXME: What to do for ilp32_p ? */
8715 if (target_big_endian)
8716 return "elf64-bigaarch64-cloudabi";
8717 else
8718 return "elf64-littleaarch64-cloudabi";
8719 #else
8720 if (target_big_endian)
8721 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8722 else
8723 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8724 #endif
8725 }
8726
8727 void
8728 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8729 {
8730 elf_frob_symbol (symp, puntp);
8731 }
8732 #endif
8733
8734 /* MD interface: Finalization. */
8735
8736 /* A good place to do this, although this was probably not intended
8737 for this kind of use. We need to dump the literal pool before
8738 references are made to a null symbol pointer. */
8739
8740 void
8741 aarch64_cleanup (void)
8742 {
8743 literal_pool *pool;
8744
8745 for (pool = list_of_pools; pool; pool = pool->next)
8746 {
8747 /* Put it at the end of the relevant section. */
8748 subseg_set (pool->section, pool->sub_section);
8749 s_ltorg (0);
8750 }
8751 }
8752
8753 #ifdef OBJ_ELF
8754 /* Remove any excess mapping symbols generated for alignment frags in
8755 SEC. We may have created a mapping symbol before a zero byte
8756 alignment; remove it if there's a mapping symbol after the
8757 alignment. */
8758 static void
8759 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8760 void *dummy ATTRIBUTE_UNUSED)
8761 {
8762 segment_info_type *seginfo = seg_info (sec);
8763 fragS *fragp;
8764
8765 if (seginfo == NULL || seginfo->frchainP == NULL)
8766 return;
8767
8768 for (fragp = seginfo->frchainP->frch_root;
8769 fragp != NULL; fragp = fragp->fr_next)
8770 {
8771 symbolS *sym = fragp->tc_frag_data.last_map;
8772 fragS *next = fragp->fr_next;
8773
8774 /* Variable-sized frags have been converted to fixed size by
8775 this point. But if this was variable-sized to start with,
8776 there will be a fixed-size frag after it. So don't handle
8777 next == NULL. */
8778 if (sym == NULL || next == NULL)
8779 continue;
8780
8781 if (S_GET_VALUE (sym) < next->fr_address)
8782 /* Not at the end of this frag. */
8783 continue;
8784 know (S_GET_VALUE (sym) == next->fr_address);
8785
8786 do
8787 {
8788 if (next->tc_frag_data.first_map != NULL)
8789 {
8790 /* Next frag starts with a mapping symbol. Discard this
8791 one. */
8792 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8793 break;
8794 }
8795
8796 if (next->fr_next == NULL)
8797 {
8798 /* This mapping symbol is at the end of the section. Discard
8799 it. */
8800 know (next->fr_fix == 0 && next->fr_var == 0);
8801 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8802 break;
8803 }
8804
8805 /* As long as we have empty frags without any mapping symbols,
8806 keep looking. */
8807 /* If the next frag is non-empty and does not start with a
8808 mapping symbol, then this mapping symbol is required. */
8809 if (next->fr_address != next->fr_next->fr_address)
8810 break;
8811
8812 next = next->fr_next;
8813 }
8814 while (next != NULL);
8815 }
8816 }
8817 #endif
8818
8819 /* Adjust the symbol table. */
8820
8821 void
8822 aarch64_adjust_symtab (void)
8823 {
8824 #ifdef OBJ_ELF
8825 /* Remove any overlapping mapping symbols generated by alignment frags. */
8826 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8827 /* Now do generic ELF adjustments. */
8828 elf_adjust_symtab ();
8829 #endif
8830 }
8831
8832 static void
8833 checked_hash_insert (htab_t table, const char *key, void *value)
8834 {
8835 str_hash_insert (table, key, value, 0);
8836 }
8837
8838 static void
8839 sysreg_hash_insert (htab_t table, const char *key, void *value)
8840 {
8841 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8842 checked_hash_insert (table, key, value);
8843 }
8844
8845 static void
8846 fill_instruction_hash_table (void)
8847 {
8848 const aarch64_opcode *opcode = aarch64_opcode_table;
8849
8850 while (opcode->name != NULL)
8851 {
8852 templates *templ, *new_templ;
8853 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8854
8855 new_templ = XNEW (templates);
8856 new_templ->opcode = opcode;
8857 new_templ->next = NULL;
8858
8859 if (!templ)
8860 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8861 else
8862 {
8863 new_templ->next = templ->next;
8864 templ->next = new_templ;
8865 }
8866 ++opcode;
8867 }
8868 }
8869
8870 static inline void
8871 convert_to_upper (char *dst, const char *src, size_t num)
8872 {
8873 unsigned int i;
8874 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8875 *dst = TOUPPER (*src);
8876 *dst = '\0';
8877 }
8878
8879 /* Assume STR point to a lower-case string, allocate, convert and return
8880 the corresponding upper-case string. */
8881 static inline const char*
8882 get_upper_str (const char *str)
8883 {
8884 char *ret;
8885 size_t len = strlen (str);
8886 ret = XNEWVEC (char, len + 1);
8887 convert_to_upper (ret, str, len);
8888 return ret;
8889 }
8890
8891 /* MD interface: Initialization. */
8892
8893 void
8894 md_begin (void)
8895 {
8896 unsigned mach;
8897 unsigned int i;
8898
8899 aarch64_ops_hsh = str_htab_create ();
8900 aarch64_cond_hsh = str_htab_create ();
8901 aarch64_shift_hsh = str_htab_create ();
8902 aarch64_sys_regs_hsh = str_htab_create ();
8903 aarch64_pstatefield_hsh = str_htab_create ();
8904 aarch64_sys_regs_ic_hsh = str_htab_create ();
8905 aarch64_sys_regs_dc_hsh = str_htab_create ();
8906 aarch64_sys_regs_at_hsh = str_htab_create ();
8907 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8908 aarch64_sys_regs_sr_hsh = str_htab_create ();
8909 aarch64_reg_hsh = str_htab_create ();
8910 aarch64_barrier_opt_hsh = str_htab_create ();
8911 aarch64_nzcv_hsh = str_htab_create ();
8912 aarch64_pldop_hsh = str_htab_create ();
8913 aarch64_hint_opt_hsh = str_htab_create ();
8914
8915 fill_instruction_hash_table ();
8916
8917 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8918 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8919 (void *) (aarch64_sys_regs + i));
8920
8921 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8922 sysreg_hash_insert (aarch64_pstatefield_hsh,
8923 aarch64_pstatefields[i].name,
8924 (void *) (aarch64_pstatefields + i));
8925
8926 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8927 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8928 aarch64_sys_regs_ic[i].name,
8929 (void *) (aarch64_sys_regs_ic + i));
8930
8931 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8932 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8933 aarch64_sys_regs_dc[i].name,
8934 (void *) (aarch64_sys_regs_dc + i));
8935
8936 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8937 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8938 aarch64_sys_regs_at[i].name,
8939 (void *) (aarch64_sys_regs_at + i));
8940
8941 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8942 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8943 aarch64_sys_regs_tlbi[i].name,
8944 (void *) (aarch64_sys_regs_tlbi + i));
8945
8946 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8947 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8948 aarch64_sys_regs_sr[i].name,
8949 (void *) (aarch64_sys_regs_sr + i));
8950
8951 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8952 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8953 (void *) (reg_names + i));
8954
8955 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8956 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8957 (void *) (nzcv_names + i));
8958
8959 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8960 {
8961 const char *name = aarch64_operand_modifiers[i].name;
8962 checked_hash_insert (aarch64_shift_hsh, name,
8963 (void *) (aarch64_operand_modifiers + i));
8964 /* Also hash the name in the upper case. */
8965 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8966 (void *) (aarch64_operand_modifiers + i));
8967 }
8968
8969 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8970 {
8971 unsigned int j;
8972 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8973 the same condition code. */
8974 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8975 {
8976 const char *name = aarch64_conds[i].names[j];
8977 if (name == NULL)
8978 break;
8979 checked_hash_insert (aarch64_cond_hsh, name,
8980 (void *) (aarch64_conds + i));
8981 /* Also hash the name in the upper case. */
8982 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8983 (void *) (aarch64_conds + i));
8984 }
8985 }
8986
8987 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8988 {
8989 const char *name = aarch64_barrier_options[i].name;
8990 /* Skip xx00 - the unallocated values of option. */
8991 if ((i & 0x3) == 0)
8992 continue;
8993 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8994 (void *) (aarch64_barrier_options + i));
8995 /* Also hash the name in the upper case. */
8996 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8997 (void *) (aarch64_barrier_options + i));
8998 }
8999
9000 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9001 {
9002 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9003 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9004 (void *) (aarch64_barrier_dsb_nxs_options + i));
9005 /* Also hash the name in the upper case. */
9006 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9007 (void *) (aarch64_barrier_dsb_nxs_options + i));
9008 }
9009
9010 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9011 {
9012 const char* name = aarch64_prfops[i].name;
9013 /* Skip the unallocated hint encodings. */
9014 if (name == NULL)
9015 continue;
9016 checked_hash_insert (aarch64_pldop_hsh, name,
9017 (void *) (aarch64_prfops + i));
9018 /* Also hash the name in the upper case. */
9019 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9020 (void *) (aarch64_prfops + i));
9021 }
9022
9023 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9024 {
9025 const char* name = aarch64_hint_options[i].name;
9026 const char* upper_name = get_upper_str(name);
9027
9028 checked_hash_insert (aarch64_hint_opt_hsh, name,
9029 (void *) (aarch64_hint_options + i));
9030
9031 /* Also hash the name in the upper case if not the same. */
9032 if (strcmp (name, upper_name) != 0)
9033 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9034 (void *) (aarch64_hint_options + i));
9035 }
9036
9037 /* Set the cpu variant based on the command-line options. */
9038 if (!mcpu_cpu_opt)
9039 mcpu_cpu_opt = march_cpu_opt;
9040
9041 if (!mcpu_cpu_opt)
9042 mcpu_cpu_opt = &cpu_default;
9043
9044 cpu_variant = *mcpu_cpu_opt;
9045
9046 /* Record the CPU type. */
9047 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9048
9049 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9050 }
9051
9052 /* Command line processing. */
9053
9054 const char *md_shortopts = "m:";
9055
9056 #ifdef AARCH64_BI_ENDIAN
9057 #define OPTION_EB (OPTION_MD_BASE + 0)
9058 #define OPTION_EL (OPTION_MD_BASE + 1)
9059 #else
9060 #if TARGET_BYTES_BIG_ENDIAN
9061 #define OPTION_EB (OPTION_MD_BASE + 0)
9062 #else
9063 #define OPTION_EL (OPTION_MD_BASE + 1)
9064 #endif
9065 #endif
9066
9067 struct option md_longopts[] = {
9068 #ifdef OPTION_EB
9069 {"EB", no_argument, NULL, OPTION_EB},
9070 #endif
9071 #ifdef OPTION_EL
9072 {"EL", no_argument, NULL, OPTION_EL},
9073 #endif
9074 {NULL, no_argument, NULL, 0}
9075 };
9076
9077 size_t md_longopts_size = sizeof (md_longopts);
9078
9079 struct aarch64_option_table
9080 {
9081 const char *option; /* Option name to match. */
9082 const char *help; /* Help information. */
9083 int *var; /* Variable to change. */
9084 int value; /* What to change it to. */
9085 char *deprecated; /* If non-null, print this message. */
9086 };
9087
9088 static struct aarch64_option_table aarch64_opts[] = {
9089 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9090 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9091 NULL},
9092 #ifdef DEBUG_AARCH64
9093 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9094 #endif /* DEBUG_AARCH64 */
9095 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9096 NULL},
9097 {"mno-verbose-error", N_("do not output verbose error messages"),
9098 &verbose_error_p, 0, NULL},
9099 {NULL, NULL, NULL, 0, NULL}
9100 };
9101
9102 struct aarch64_cpu_option_table
9103 {
9104 const char *name;
9105 const aarch64_feature_set value;
9106 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9107 case. */
9108 const char *canonical_name;
9109 };
9110
9111 /* This list should, at a minimum, contain all the cpu names
9112 recognized by GCC. */
9113 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9114 {"all", AARCH64_ANY, NULL},
9115 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9116 AARCH64_FEATURE_CRC), "Cortex-A34"},
9117 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9118 AARCH64_FEATURE_CRC), "Cortex-A35"},
9119 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9120 AARCH64_FEATURE_CRC), "Cortex-A53"},
9121 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9122 AARCH64_FEATURE_CRC), "Cortex-A57"},
9123 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9124 AARCH64_FEATURE_CRC), "Cortex-A72"},
9125 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9126 AARCH64_FEATURE_CRC), "Cortex-A73"},
9127 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9128 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9129 "Cortex-A55"},
9130 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9131 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9132 "Cortex-A75"},
9133 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9134 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9135 "Cortex-A76"},
9136 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9137 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9138 | AARCH64_FEATURE_DOTPROD
9139 | AARCH64_FEATURE_SSBS),
9140 "Cortex-A76AE"},
9141 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9142 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9143 | AARCH64_FEATURE_DOTPROD
9144 | AARCH64_FEATURE_SSBS),
9145 "Cortex-A77"},
9146 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9147 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9148 | AARCH64_FEATURE_DOTPROD
9149 | AARCH64_FEATURE_SSBS),
9150 "Cortex-A65"},
9151 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9152 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9153 | AARCH64_FEATURE_DOTPROD
9154 | AARCH64_FEATURE_SSBS),
9155 "Cortex-A65AE"},
9156 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9157 AARCH64_FEATURE_F16
9158 | AARCH64_FEATURE_RCPC
9159 | AARCH64_FEATURE_DOTPROD
9160 | AARCH64_FEATURE_SSBS
9161 | AARCH64_FEATURE_PROFILE),
9162 "Cortex-A78"},
9163 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9164 AARCH64_FEATURE_F16
9165 | AARCH64_FEATURE_RCPC
9166 | AARCH64_FEATURE_DOTPROD
9167 | AARCH64_FEATURE_SSBS
9168 | AARCH64_FEATURE_PROFILE),
9169 "Cortex-A78AE"},
9170 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9171 AARCH64_FEATURE_DOTPROD
9172 | AARCH64_FEATURE_F16
9173 | AARCH64_FEATURE_FLAGM
9174 | AARCH64_FEATURE_PAC
9175 | AARCH64_FEATURE_PROFILE
9176 | AARCH64_FEATURE_RCPC
9177 | AARCH64_FEATURE_SSBS),
9178 "Cortex-A78C"},
9179 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9180 AARCH64_FEATURE_BFLOAT16
9181 | AARCH64_FEATURE_I8MM
9182 | AARCH64_FEATURE_MEMTAG
9183 | AARCH64_FEATURE_SVE2_BITPERM),
9184 "Cortex-A510"},
9185 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9186 AARCH64_FEATURE_BFLOAT16
9187 | AARCH64_FEATURE_I8MM
9188 | AARCH64_FEATURE_MEMTAG
9189 | AARCH64_FEATURE_SVE2_BITPERM),
9190 "Cortex-A710"},
9191 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9192 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9193 | AARCH64_FEATURE_DOTPROD
9194 | AARCH64_FEATURE_PROFILE),
9195 "Ares"},
9196 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9197 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9198 "Samsung Exynos M1"},
9199 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9200 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9201 | AARCH64_FEATURE_RDMA),
9202 "Qualcomm Falkor"},
9203 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9204 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9205 | AARCH64_FEATURE_DOTPROD
9206 | AARCH64_FEATURE_SSBS),
9207 "Neoverse E1"},
9208 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9209 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9210 | AARCH64_FEATURE_DOTPROD
9211 | AARCH64_FEATURE_PROFILE),
9212 "Neoverse N1"},
9213 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9214 AARCH64_FEATURE_BFLOAT16
9215 | AARCH64_FEATURE_I8MM
9216 | AARCH64_FEATURE_F16
9217 | AARCH64_FEATURE_SVE
9218 | AARCH64_FEATURE_SVE2
9219 | AARCH64_FEATURE_SVE2_BITPERM
9220 | AARCH64_FEATURE_MEMTAG
9221 | AARCH64_FEATURE_RNG),
9222 "Neoverse N2"},
9223 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9224 AARCH64_FEATURE_PROFILE
9225 | AARCH64_FEATURE_CVADP
9226 | AARCH64_FEATURE_SVE
9227 | AARCH64_FEATURE_SSBS
9228 | AARCH64_FEATURE_RNG
9229 | AARCH64_FEATURE_F16
9230 | AARCH64_FEATURE_BFLOAT16
9231 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9232 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9233 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9234 | AARCH64_FEATURE_RDMA),
9235 "Qualcomm QDF24XX"},
9236 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9237 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9238 "Qualcomm Saphira"},
9239 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9240 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9241 "Cavium ThunderX"},
9242 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9243 AARCH64_FEATURE_CRYPTO),
9244 "Broadcom Vulcan"},
9245 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9246 in earlier releases and is superseded by 'xgene1' in all
9247 tools. */
9248 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9249 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9250 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9251 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9252 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9253 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9254 AARCH64_FEATURE_F16
9255 | AARCH64_FEATURE_RCPC
9256 | AARCH64_FEATURE_DOTPROD
9257 | AARCH64_FEATURE_SSBS
9258 | AARCH64_FEATURE_PROFILE),
9259 "Cortex-X1"},
9260 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9261 AARCH64_FEATURE_BFLOAT16
9262 | AARCH64_FEATURE_I8MM
9263 | AARCH64_FEATURE_MEMTAG
9264 | AARCH64_FEATURE_SVE2_BITPERM),
9265 "Cortex-X2"},
9266 {"generic", AARCH64_ARCH_V8, NULL},
9267
9268 {NULL, AARCH64_ARCH_NONE, NULL}
9269 };
9270
9271 struct aarch64_arch_option_table
9272 {
9273 const char *name;
9274 const aarch64_feature_set value;
9275 };
9276
9277 /* This list should, at a minimum, contain all the architecture names
9278 recognized by GCC. */
9279 static const struct aarch64_arch_option_table aarch64_archs[] = {
9280 {"all", AARCH64_ANY},
9281 {"armv8-a", AARCH64_ARCH_V8},
9282 {"armv8.1-a", AARCH64_ARCH_V8_1},
9283 {"armv8.2-a", AARCH64_ARCH_V8_2},
9284 {"armv8.3-a", AARCH64_ARCH_V8_3},
9285 {"armv8.4-a", AARCH64_ARCH_V8_4},
9286 {"armv8.5-a", AARCH64_ARCH_V8_5},
9287 {"armv8.6-a", AARCH64_ARCH_V8_6},
9288 {"armv8.7-a", AARCH64_ARCH_V8_7},
9289 {"armv8-r", AARCH64_ARCH_V8_R},
9290 {"armv9-a", AARCH64_ARCH_V9},
9291 {NULL, AARCH64_ARCH_NONE}
9292 };
9293
9294 /* ISA extensions. */
9295 struct aarch64_option_cpu_value_table
9296 {
9297 const char *name;
9298 const aarch64_feature_set value;
9299 const aarch64_feature_set require; /* Feature dependencies. */
9300 };
9301
9302 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9303 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9304 AARCH64_ARCH_NONE},
9305 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9306 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9307 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9308 AARCH64_ARCH_NONE},
9309 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9310 AARCH64_ARCH_NONE},
9311 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9312 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9313 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9314 AARCH64_ARCH_NONE},
9315 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9316 AARCH64_ARCH_NONE},
9317 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9318 AARCH64_ARCH_NONE},
9319 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9320 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9321 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9322 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9323 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9324 AARCH64_FEATURE (AARCH64_FEATURE_FP
9325 | AARCH64_FEATURE_F16, 0)},
9326 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9327 AARCH64_ARCH_NONE},
9328 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9329 AARCH64_FEATURE (AARCH64_FEATURE_F16
9330 | AARCH64_FEATURE_SIMD
9331 | AARCH64_FEATURE_COMPNUM, 0)},
9332 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9333 AARCH64_ARCH_NONE},
9334 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9335 AARCH64_FEATURE (AARCH64_FEATURE_F16
9336 | AARCH64_FEATURE_SIMD, 0)},
9337 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9338 AARCH64_ARCH_NONE},
9339 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9340 AARCH64_ARCH_NONE},
9341 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9342 AARCH64_ARCH_NONE},
9343 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9344 AARCH64_ARCH_NONE},
9345 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9346 AARCH64_ARCH_NONE},
9347 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9348 AARCH64_ARCH_NONE},
9349 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9350 AARCH64_ARCH_NONE},
9351 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9352 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9353 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9354 AARCH64_ARCH_NONE},
9355 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9356 AARCH64_ARCH_NONE},
9357 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9358 AARCH64_ARCH_NONE},
9359 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9360 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9361 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9362 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9363 | AARCH64_FEATURE_SM4, 0)},
9364 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9365 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9366 | AARCH64_FEATURE_AES, 0)},
9367 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9368 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9369 | AARCH64_FEATURE_SHA3, 0)},
9370 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9371 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9372 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9373 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9374 | AARCH64_FEATURE_BFLOAT16, 0)},
9375 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9376 AARCH64_FEATURE (AARCH64_FEATURE_SME
9377 | AARCH64_FEATURE_SVE2
9378 | AARCH64_FEATURE_BFLOAT16, 0)},
9379 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9380 AARCH64_FEATURE (AARCH64_FEATURE_SME
9381 | AARCH64_FEATURE_SVE2
9382 | AARCH64_FEATURE_BFLOAT16, 0)},
9383 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9384 AARCH64_ARCH_NONE},
9385 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9386 AARCH64_ARCH_NONE},
9387 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9388 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9389 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9390 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9391 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9392 AARCH64_ARCH_NONE},
9393 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9394 AARCH64_ARCH_NONE},
9395 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9396 AARCH64_ARCH_NONE},
9397 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9398 };
9399
9400 struct aarch64_long_option_table
9401 {
9402 const char *option; /* Substring to match. */
9403 const char *help; /* Help information. */
9404 int (*func) (const char *subopt); /* Function to decode sub-option. */
9405 char *deprecated; /* If non-null, print this message. */
9406 };
9407
9408 /* Transitive closure of features depending on set. */
9409 static aarch64_feature_set
9410 aarch64_feature_disable_set (aarch64_feature_set set)
9411 {
9412 const struct aarch64_option_cpu_value_table *opt;
9413 aarch64_feature_set prev = 0;
9414
9415 while (prev != set) {
9416 prev = set;
9417 for (opt = aarch64_features; opt->name != NULL; opt++)
9418 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9419 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9420 }
9421 return set;
9422 }
9423
9424 /* Transitive closure of dependencies of set. */
9425 static aarch64_feature_set
9426 aarch64_feature_enable_set (aarch64_feature_set set)
9427 {
9428 const struct aarch64_option_cpu_value_table *opt;
9429 aarch64_feature_set prev = 0;
9430
9431 while (prev != set) {
9432 prev = set;
9433 for (opt = aarch64_features; opt->name != NULL; opt++)
9434 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9435 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9436 }
9437 return set;
9438 }
9439
9440 static int
9441 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9442 bool ext_only)
9443 {
9444 /* We insist on extensions being added before being removed. We achieve
9445 this by using the ADDING_VALUE variable to indicate whether we are
9446 adding an extension (1) or removing it (0) and only allowing it to
9447 change in the order -1 -> 1 -> 0. */
9448 int adding_value = -1;
9449 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9450
9451 /* Copy the feature set, so that we can modify it. */
9452 *ext_set = **opt_p;
9453 *opt_p = ext_set;
9454
9455 while (str != NULL && *str != 0)
9456 {
9457 const struct aarch64_option_cpu_value_table *opt;
9458 const char *ext = NULL;
9459 int optlen;
9460
9461 if (!ext_only)
9462 {
9463 if (*str != '+')
9464 {
9465 as_bad (_("invalid architectural extension"));
9466 return 0;
9467 }
9468
9469 ext = strchr (++str, '+');
9470 }
9471
9472 if (ext != NULL)
9473 optlen = ext - str;
9474 else
9475 optlen = strlen (str);
9476
9477 if (optlen >= 2 && startswith (str, "no"))
9478 {
9479 if (adding_value != 0)
9480 adding_value = 0;
9481 optlen -= 2;
9482 str += 2;
9483 }
9484 else if (optlen > 0)
9485 {
9486 if (adding_value == -1)
9487 adding_value = 1;
9488 else if (adding_value != 1)
9489 {
9490 as_bad (_("must specify extensions to add before specifying "
9491 "those to remove"));
9492 return false;
9493 }
9494 }
9495
9496 if (optlen == 0)
9497 {
9498 as_bad (_("missing architectural extension"));
9499 return 0;
9500 }
9501
9502 gas_assert (adding_value != -1);
9503
9504 for (opt = aarch64_features; opt->name != NULL; opt++)
9505 if (strncmp (opt->name, str, optlen) == 0)
9506 {
9507 aarch64_feature_set set;
9508
9509 /* Add or remove the extension. */
9510 if (adding_value)
9511 {
9512 set = aarch64_feature_enable_set (opt->value);
9513 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9514 }
9515 else
9516 {
9517 set = aarch64_feature_disable_set (opt->value);
9518 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9519 }
9520 break;
9521 }
9522
9523 if (opt->name == NULL)
9524 {
9525 as_bad (_("unknown architectural extension `%s'"), str);
9526 return 0;
9527 }
9528
9529 str = ext;
9530 };
9531
9532 return 1;
9533 }
9534
9535 static int
9536 aarch64_parse_cpu (const char *str)
9537 {
9538 const struct aarch64_cpu_option_table *opt;
9539 const char *ext = strchr (str, '+');
9540 size_t optlen;
9541
9542 if (ext != NULL)
9543 optlen = ext - str;
9544 else
9545 optlen = strlen (str);
9546
9547 if (optlen == 0)
9548 {
9549 as_bad (_("missing cpu name `%s'"), str);
9550 return 0;
9551 }
9552
9553 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9554 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9555 {
9556 mcpu_cpu_opt = &opt->value;
9557 if (ext != NULL)
9558 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
9559
9560 return 1;
9561 }
9562
9563 as_bad (_("unknown cpu `%s'"), str);
9564 return 0;
9565 }
9566
9567 static int
9568 aarch64_parse_arch (const char *str)
9569 {
9570 const struct aarch64_arch_option_table *opt;
9571 const char *ext = strchr (str, '+');
9572 size_t optlen;
9573
9574 if (ext != NULL)
9575 optlen = ext - str;
9576 else
9577 optlen = strlen (str);
9578
9579 if (optlen == 0)
9580 {
9581 as_bad (_("missing architecture name `%s'"), str);
9582 return 0;
9583 }
9584
9585 for (opt = aarch64_archs; opt->name != NULL; opt++)
9586 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9587 {
9588 march_cpu_opt = &opt->value;
9589 if (ext != NULL)
9590 return aarch64_parse_features (ext, &march_cpu_opt, false);
9591
9592 return 1;
9593 }
9594
9595 as_bad (_("unknown architecture `%s'\n"), str);
9596 return 0;
9597 }
9598
9599 /* ABIs. */
9600 struct aarch64_option_abi_value_table
9601 {
9602 const char *name;
9603 enum aarch64_abi_type value;
9604 };
9605
9606 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9607 {"ilp32", AARCH64_ABI_ILP32},
9608 {"lp64", AARCH64_ABI_LP64},
9609 };
9610
9611 static int
9612 aarch64_parse_abi (const char *str)
9613 {
9614 unsigned int i;
9615
9616 if (str[0] == '\0')
9617 {
9618 as_bad (_("missing abi name `%s'"), str);
9619 return 0;
9620 }
9621
9622 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9623 if (strcmp (str, aarch64_abis[i].name) == 0)
9624 {
9625 aarch64_abi = aarch64_abis[i].value;
9626 return 1;
9627 }
9628
9629 as_bad (_("unknown abi `%s'\n"), str);
9630 return 0;
9631 }
9632
9633 static struct aarch64_long_option_table aarch64_long_opts[] = {
9634 #ifdef OBJ_ELF
9635 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9636 aarch64_parse_abi, NULL},
9637 #endif /* OBJ_ELF */
9638 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9639 aarch64_parse_cpu, NULL},
9640 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9641 aarch64_parse_arch, NULL},
9642 {NULL, NULL, 0, NULL}
9643 };
9644
9645 int
9646 md_parse_option (int c, const char *arg)
9647 {
9648 struct aarch64_option_table *opt;
9649 struct aarch64_long_option_table *lopt;
9650
9651 switch (c)
9652 {
9653 #ifdef OPTION_EB
9654 case OPTION_EB:
9655 target_big_endian = 1;
9656 break;
9657 #endif
9658
9659 #ifdef OPTION_EL
9660 case OPTION_EL:
9661 target_big_endian = 0;
9662 break;
9663 #endif
9664
9665 case 'a':
9666 /* Listing option. Just ignore these, we don't support additional
9667 ones. */
9668 return 0;
9669
9670 default:
9671 for (opt = aarch64_opts; opt->option != NULL; opt++)
9672 {
9673 if (c == opt->option[0]
9674 && ((arg == NULL && opt->option[1] == 0)
9675 || streq (arg, opt->option + 1)))
9676 {
9677 /* If the option is deprecated, tell the user. */
9678 if (opt->deprecated != NULL)
9679 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9680 arg ? arg : "", _(opt->deprecated));
9681
9682 if (opt->var != NULL)
9683 *opt->var = opt->value;
9684
9685 return 1;
9686 }
9687 }
9688
9689 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9690 {
9691 /* These options are expected to have an argument. */
9692 if (c == lopt->option[0]
9693 && arg != NULL
9694 && startswith (arg, lopt->option + 1))
9695 {
9696 /* If the option is deprecated, tell the user. */
9697 if (lopt->deprecated != NULL)
9698 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9699 _(lopt->deprecated));
9700
9701 /* Call the sup-option parser. */
9702 return lopt->func (arg + strlen (lopt->option) - 1);
9703 }
9704 }
9705
9706 return 0;
9707 }
9708
9709 return 1;
9710 }
9711
9712 void
9713 md_show_usage (FILE * fp)
9714 {
9715 struct aarch64_option_table *opt;
9716 struct aarch64_long_option_table *lopt;
9717
9718 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9719
9720 for (opt = aarch64_opts; opt->option != NULL; opt++)
9721 if (opt->help != NULL)
9722 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9723
9724 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9725 if (lopt->help != NULL)
9726 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9727
9728 #ifdef OPTION_EB
9729 fprintf (fp, _("\
9730 -EB assemble code for a big-endian cpu\n"));
9731 #endif
9732
9733 #ifdef OPTION_EL
9734 fprintf (fp, _("\
9735 -EL assemble code for a little-endian cpu\n"));
9736 #endif
9737 }
9738
9739 /* Parse a .cpu directive. */
9740
9741 static void
9742 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9743 {
9744 const struct aarch64_cpu_option_table *opt;
9745 char saved_char;
9746 char *name;
9747 char *ext;
9748 size_t optlen;
9749
9750 name = input_line_pointer;
9751 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9752 input_line_pointer++;
9753 saved_char = *input_line_pointer;
9754 *input_line_pointer = 0;
9755
9756 ext = strchr (name, '+');
9757
9758 if (ext != NULL)
9759 optlen = ext - name;
9760 else
9761 optlen = strlen (name);
9762
9763 /* Skip the first "all" entry. */
9764 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9765 if (strlen (opt->name) == optlen
9766 && strncmp (name, opt->name, optlen) == 0)
9767 {
9768 mcpu_cpu_opt = &opt->value;
9769 if (ext != NULL)
9770 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9771 return;
9772
9773 cpu_variant = *mcpu_cpu_opt;
9774
9775 *input_line_pointer = saved_char;
9776 demand_empty_rest_of_line ();
9777 return;
9778 }
9779 as_bad (_("unknown cpu `%s'"), name);
9780 *input_line_pointer = saved_char;
9781 ignore_rest_of_line ();
9782 }
9783
9784
9785 /* Parse a .arch directive. */
9786
9787 static void
9788 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9789 {
9790 const struct aarch64_arch_option_table *opt;
9791 char saved_char;
9792 char *name;
9793 char *ext;
9794 size_t optlen;
9795
9796 name = input_line_pointer;
9797 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9798 input_line_pointer++;
9799 saved_char = *input_line_pointer;
9800 *input_line_pointer = 0;
9801
9802 ext = strchr (name, '+');
9803
9804 if (ext != NULL)
9805 optlen = ext - name;
9806 else
9807 optlen = strlen (name);
9808
9809 /* Skip the first "all" entry. */
9810 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9811 if (strlen (opt->name) == optlen
9812 && strncmp (name, opt->name, optlen) == 0)
9813 {
9814 mcpu_cpu_opt = &opt->value;
9815 if (ext != NULL)
9816 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9817 return;
9818
9819 cpu_variant = *mcpu_cpu_opt;
9820
9821 *input_line_pointer = saved_char;
9822 demand_empty_rest_of_line ();
9823 return;
9824 }
9825
9826 as_bad (_("unknown architecture `%s'\n"), name);
9827 *input_line_pointer = saved_char;
9828 ignore_rest_of_line ();
9829 }
9830
9831 /* Parse a .arch_extension directive. */
9832
9833 static void
9834 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9835 {
9836 char saved_char;
9837 char *ext = input_line_pointer;;
9838
9839 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9840 input_line_pointer++;
9841 saved_char = *input_line_pointer;
9842 *input_line_pointer = 0;
9843
9844 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
9845 return;
9846
9847 cpu_variant = *mcpu_cpu_opt;
9848
9849 *input_line_pointer = saved_char;
9850 demand_empty_rest_of_line ();
9851 }
9852
9853 /* Copy symbol information. */
9854
9855 void
9856 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9857 {
9858 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9859 }
9860
9861 #ifdef OBJ_ELF
9862 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9863 This is needed so AArch64 specific st_other values can be independently
9864 specified for an IFUNC resolver (that is called by the dynamic linker)
9865 and the symbol it resolves (aliased to the resolver). In particular,
9866 if a function symbol has special st_other value set via directives,
9867 then attaching an IFUNC resolver to that symbol should not override
9868 the st_other setting. Requiring the directive on the IFUNC resolver
9869 symbol would be unexpected and problematic in C code, where the two
9870 symbols appear as two independent function declarations. */
9871
9872 void
9873 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9874 {
9875 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9876 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9877 if (srcelf->size)
9878 {
9879 if (destelf->size == NULL)
9880 destelf->size = XNEW (expressionS);
9881 *destelf->size = *srcelf->size;
9882 }
9883 else
9884 {
9885 free (destelf->size);
9886 destelf->size = NULL;
9887 }
9888 S_SET_SIZE (dest, S_GET_SIZE (src));
9889 }
9890 #endif