]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
gas: Implement categorization of Morello-specific instructions
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30 #include "cpu-aarch64.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 #define END_OF_INSN '\0'
47
48 #define MAP_CUR_INSN (AARCH64_CPU_HAS_FEATURE (cpu_variant, \
49 AARCH64_FEATURE_C64) \
50 ? MAP_C64 : MAP_INSN)
51
52 #define IS_C64 (AARCH64_CPU_HAS_FEATURE (cpu_variant, AARCH64_FEATURE_C64) \
53 ? 1 : 0)
54
55 static aarch64_feature_set cpu_variant;
56
57 /* Variables that we set while parsing command-line options. Once all
58 options have been read we re-process these values to set the real
59 assembly flags. */
60 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
61 static const aarch64_feature_set *march_cpu_opt = NULL;
62
63 /* Constants for known architecture features. */
64 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
65
66 /* Currently active instruction sequence. */
67 static aarch64_instr_sequence *insn_sequence = NULL;
68
69 #ifdef OBJ_ELF
70 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
71 static symbolS *GOT_symbol;
72
73 /* Which ABI to use. */
74 enum aarch64_abi_type
75 {
76 AARCH64_ABI_NONE = 0,
77 AARCH64_ABI_LP64 = 1,
78 AARCH64_ABI_ILP32 = 2,
79 AARCH64_ABI_PURECAP = 3,
80 AARCH64_ABI_HYBRID = 4,
81 };
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
88 static const char *default_arch = DEFAULT_ARCH;
89
90 /* AArch64 ABI for the output file. */
91 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
92
93 /* When non-zero, program to a 32-bit model, in which the C data types
94 int, long and all pointer types are 32-bit objects (ILP32); or to a
95 64-bit model, in which the C int type is 32-bits but the C long type
96 and all pointer types are 64-bit objects (LP64). */
97 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
98 #endif
99
100 enum vector_el_type
101 {
102 NT_invtype = -1,
103 NT_b,
104 NT_h,
105 NT_s,
106 NT_d,
107 NT_q,
108 NT_zero,
109 NT_merge
110 };
111
112 /* Bits for DEFINED field in vector_type_el. */
113 #define NTA_HASTYPE 1
114 #define NTA_HASINDEX 2
115 #define NTA_HASVARWIDTH 4
116
117 struct vector_type_el
118 {
119 enum vector_el_type type;
120 unsigned char defined;
121 unsigned width;
122 int64_t index;
123 };
124
125 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
126 #define FIXUP_F_C64 0x00000002
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
161 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
162
163 #ifdef OBJ_ELF
164 # define now_instr_sequence seg_info \
165 (now_seg)->tc_segment_info_data.insn_sequence
166 #else
167 static struct aarch64_instr_sequence now_instr_sequence;
168 #endif
169
170 /* Diagnostics inline function utilities.
171
172 These are lightweight utilities which should only be called by parse_operands
173 and other parsers. GAS processes each assembly line by parsing it against
174 instruction template(s), in the case of multiple templates (for the same
175 mnemonic name), those templates are tried one by one until one succeeds or
176 all fail. An assembly line may fail a few templates before being
177 successfully parsed; an error saved here in most cases is not a user error
178 but an error indicating the current template is not the right template.
179 Therefore it is very important that errors can be saved at a low cost during
180 the parsing; we don't want to slow down the whole parsing by recording
181 non-user errors in detail.
182
183 Remember that the objective is to help GAS pick up the most appropriate
184 error message in the case of multiple templates, e.g. FMOV which has 8
185 templates. */
186
187 static inline void
188 clear_error (void)
189 {
190 inst.parsing_error.kind = AARCH64_OPDE_NIL;
191 inst.parsing_error.error = NULL;
192 }
193
194 static inline bfd_boolean
195 error_p (void)
196 {
197 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
198 }
199
200 static inline const char *
201 get_error_message (void)
202 {
203 return inst.parsing_error.error;
204 }
205
206 static inline enum aarch64_operand_error_kind
207 get_error_kind (void)
208 {
209 return inst.parsing_error.kind;
210 }
211
212 static inline void
213 set_error (enum aarch64_operand_error_kind kind, const char *error)
214 {
215 inst.parsing_error.kind = kind;
216 inst.parsing_error.error = error;
217 }
218
219 static inline void
220 set_recoverable_error (const char *error)
221 {
222 set_error (AARCH64_OPDE_RECOVERABLE, error);
223 }
224
225 /* Use the DESC field of the corresponding aarch64_operand entry to compose
226 the error message. */
227 static inline void
228 set_default_error (void)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
231 }
232
233 static inline void
234 set_syntax_error (const char *error)
235 {
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_first_syntax_error (const char *error)
241 {
242 if (! error_p ())
243 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
244 }
245
246 static inline void
247 set_fatal_syntax_error (const char *error)
248 {
249 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
250 }
251 \f
252 /* Return value for certain parsers when the parsing fails; those parsers
253 return the information of the parsed result, e.g. register number, on
254 success. */
255 #define PARSE_FAIL -1
256
257 /* This is an invalid condition code that means no conditional field is
258 present. */
259 #define COND_ALWAYS 0x10
260
261 typedef struct
262 {
263 const char *template;
264 uint32_t value;
265 } asm_nzcv;
266
267 struct reloc_entry
268 {
269 char *name;
270 bfd_reloc_code_real_type reloc;
271 };
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(CA_N) /* c[0-30] */ \
290 BASIC_REG_TYPE(CA_SP) /* csp */ \
291 BASIC_REG_TYPE(CA_Z) /* czr */ \
292 BASIC_REG_TYPE(CA_D) /* ddc */ \
293 BASIC_REG_TYPE(VN) /* v[0-31] */ \
294 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
295 BASIC_REG_TYPE(PN) /* p[0-15] */ \
296 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
297 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
300 | REG_TYPE(ZN)) \
301 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
302 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
304 /* Typecheck: same, plus SVE registers. */ \
305 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
307 | REG_TYPE(ZN)) \
308 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
309 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
310 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
311 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
312 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
315 /* Typecheck: any [BHSDQ]P FP. */ \
316 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
317 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
318 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
319 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
321 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
322 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
323 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
324 be used for SVE instructions, since Zn and Pn are valid symbols \
325 in other contexts. */ \
326 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
327 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
328 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
329 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
330 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
331 | REG_TYPE(ZN) | REG_TYPE(PN)) \
332 /* Any integer register; used for error messages only. */ \
333 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
334 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
335 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
336 /* Typecheck: any capability register (inc CSP) */ \
337 MULTI_REG_TYPE(CA_N_SP, REG_TYPE(CA_N) | REG_TYPE(CA_SP)) \
338 MULTI_REG_TYPE(CA_N_Z, REG_TYPE(CA_N) | REG_TYPE(CA_Z)) \
339 /* Pseudo type to mark the end of the enumerator sequence. */ \
340 BASIC_REG_TYPE(MAX)
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
344 #undef MULTI_REG_TYPE
345 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
346
347 /* Register type enumerators. */
348 typedef enum aarch64_reg_type_
349 {
350 /* A list of REG_TYPE_*. */
351 AARCH64_REG_TYPES
352 } aarch64_reg_type;
353
354 #undef BASIC_REG_TYPE
355 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
356 #undef REG_TYPE
357 #define REG_TYPE(T) (1 << REG_TYPE_##T)
358 #undef MULTI_REG_TYPE
359 #define MULTI_REG_TYPE(T,V) V,
360
361 /* Structure for a hash table entry for a register. */
362 typedef struct
363 {
364 const char *name;
365 unsigned char number;
366 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
367 unsigned char builtin;
368 } reg_entry;
369
370 /* Values indexed by aarch64_reg_type to assist the type checking. */
371 static const unsigned reg_type_masks[] =
372 {
373 AARCH64_REG_TYPES
374 };
375
376 #undef BASIC_REG_TYPE
377 #undef REG_TYPE
378 #undef MULTI_REG_TYPE
379 #undef AARCH64_REG_TYPES
380
381 /* Diagnostics used when we don't get a register of the expected type.
382 Note: this has to synchronized with aarch64_reg_type definitions
383 above. */
384 static const char *
385 get_reg_expected_msg (aarch64_reg_type reg_type)
386 {
387 const char *msg;
388
389 switch (reg_type)
390 {
391 case REG_TYPE_R_32:
392 msg = N_("integer 32-bit register expected");
393 break;
394 case REG_TYPE_R_64:
395 msg = N_("integer 64-bit register expected");
396 break;
397 case REG_TYPE_R_N:
398 msg = N_("integer register expected");
399 break;
400 case REG_TYPE_R64_SP:
401 msg = N_("64-bit integer or SP register expected");
402 break;
403 case REG_TYPE_SVE_BASE:
404 msg = N_("base register expected");
405 break;
406 case REG_TYPE_R_Z:
407 msg = N_("integer or zero register expected");
408 break;
409 case REG_TYPE_SVE_OFFSET:
410 msg = N_("offset register expected");
411 break;
412 case REG_TYPE_R_SP:
413 msg = N_("integer or SP register expected");
414 break;
415 case REG_TYPE_R_Z_SP:
416 msg = N_("integer, zero or SP register expected");
417 break;
418 case REG_TYPE_FP_B:
419 msg = N_("8-bit SIMD scalar register expected");
420 break;
421 case REG_TYPE_FP_H:
422 msg = N_("16-bit SIMD scalar or floating-point half precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_S:
426 msg = N_("32-bit SIMD scalar or floating-point single precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_D:
430 msg = N_("64-bit SIMD scalar or floating-point double precision "
431 "register expected");
432 break;
433 case REG_TYPE_FP_Q:
434 msg = N_("128-bit SIMD scalar or floating-point quad precision "
435 "register expected");
436 break;
437 case REG_TYPE_R_Z_BHSDQ_V:
438 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
439 msg = N_("register expected");
440 break;
441 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
442 msg = N_("SIMD scalar or floating-point register expected");
443 break;
444 case REG_TYPE_VN: /* any V reg */
445 msg = N_("vector register expected");
446 break;
447 case REG_TYPE_ZN:
448 msg = N_("SVE vector register expected");
449 break;
450 case REG_TYPE_PN:
451 msg = N_("SVE predicate register expected");
452 break;
453 case REG_TYPE_CA_N:
454 msg = N_("Capability register C0 - C30 expected");
455 break;
456 case REG_TYPE_CA_SP:
457 msg = N_("Capability register CSP expected");
458 break;
459 case REG_TYPE_CA_N_SP:
460 msg = N_("Capability register C0 - C30 or CSP expected");
461 break;
462 case REG_TYPE_CA_Z:
463 msg = N_("Capability register CZR expected");
464 break;
465 default:
466 as_fatal (_("invalid register type %d"), reg_type);
467 }
468 return msg;
469 }
470
471 /* Some well known registers that we refer to directly elsewhere. */
472 #define REG_SP 31
473 #define REG_ZR 31
474 #define REG_DW_CSP (31 + 198)
475 #define REG_DW_CLR (30 + 198)
476
477 /* Instructions take 4 bytes in the object file. */
478 #define INSN_SIZE 4
479
480 static htab_t aarch64_ops_hsh;
481 static htab_t aarch64_cond_hsh;
482 static htab_t aarch64_shift_hsh;
483 static htab_t aarch64_sys_regs_hsh;
484 static htab_t aarch64_pstatefield_hsh;
485 static htab_t aarch64_sys_regs_ic_hsh;
486 static htab_t aarch64_sys_regs_dc_hsh;
487 static htab_t aarch64_sys_regs_at_hsh;
488 static htab_t aarch64_sys_regs_tlbi_hsh;
489 static htab_t aarch64_sys_regs_sr_hsh;
490 static htab_t aarch64_reg_hsh;
491 static htab_t aarch64_barrier_opt_hsh;
492 static htab_t aarch64_nzcv_hsh;
493 static htab_t aarch64_pldop_hsh;
494 static htab_t aarch64_hint_opt_hsh;
495
496 /* Stuff needed to resolve the label ambiguity
497 As:
498 ...
499 label: <insn>
500 may differ from:
501 ...
502 label:
503 <insn> */
504
505 static symbolS *last_label_seen;
506
507 /* Literal pool structure. Held on a per-section
508 and per-sub-section basis. */
509
510 #define MAX_LITERAL_POOL_SIZE 1024
511 typedef struct literal_expression
512 {
513 expressionS exp;
514 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
515 LITTLENUM_TYPE * bignum;
516 } literal_expression;
517
518 typedef struct literal_pool
519 {
520 literal_expression literals[MAX_LITERAL_POOL_SIZE];
521 unsigned int next_free_entry;
522 unsigned int id;
523 symbolS *symbol;
524 segT section;
525 subsegT sub_section;
526 int size;
527 struct literal_pool *next;
528 } literal_pool;
529
530 /* Pointer to a linked list of literal pools. */
531 static literal_pool *list_of_pools = NULL;
532 \f
533 /* Pure syntax. */
534
535 /* This array holds the chars that always start a comment. If the
536 pre-processor is disabled, these aren't very useful. */
537 const char comment_chars[] = "";
538
539 /* This array holds the chars that only start a comment at the beginning of
540 a line. If the line seems to have the form '# 123 filename'
541 .line and .file directives will appear in the pre-processed output. */
542 /* Note that input_file.c hand checks for '#' at the beginning of the
543 first line of the input file. This is because the compiler outputs
544 #NO_APP at the beginning of its output. */
545 /* Also note that comments like this one will always work. */
546 const char line_comment_chars[] = "#";
547
548 const char line_separator_chars[] = ";";
549
550 /* Chars that can be used to separate mant
551 from exp in floating point numbers. */
552 const char EXP_CHARS[] = "eE";
553
554 /* Chars that mean this number is a floating point constant. */
555 /* As in 0f12.456 */
556 /* or 0d1.2345e12 */
557
558 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
559
560 /* Prefix character that indicates the start of an immediate value. */
561 #define is_immediate_prefix(C) ((C) == '#')
562
563 /* Separator character handling. */
564
565 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
566
567 static inline bfd_boolean
568 skip_past_char (char **str, char c)
569 {
570 if (**str == c)
571 {
572 (*str)++;
573 return TRUE;
574 }
575 else
576 return FALSE;
577 }
578
579 #define skip_past_comma(str) skip_past_char (str, ',')
580
581 /* Arithmetic expressions (possibly involving symbols). */
582
583 static bfd_boolean in_my_get_expression_p = FALSE;
584
585 /* Third argument to my_get_expression. */
586 #define GE_NO_PREFIX 0
587 #define GE_OPT_PREFIX 1
588
589 /* Return TRUE if the string pointed by *STR is successfully parsed
590 as an valid expression; *EP will be filled with the information of
591 such an expression. Otherwise return FALSE. */
592
593 static bfd_boolean
594 my_get_expression (expressionS * ep, char **str, int prefix_mode,
595 int reject_absent)
596 {
597 char *save_in;
598 segT seg;
599 int prefix_present_p = 0;
600
601 switch (prefix_mode)
602 {
603 case GE_NO_PREFIX:
604 break;
605 case GE_OPT_PREFIX:
606 if (is_immediate_prefix (**str))
607 {
608 (*str)++;
609 prefix_present_p = 1;
610 }
611 break;
612 default:
613 abort ();
614 }
615
616 memset (ep, 0, sizeof (expressionS));
617
618 save_in = input_line_pointer;
619 input_line_pointer = *str;
620 in_my_get_expression_p = TRUE;
621 seg = expression (ep);
622 in_my_get_expression_p = FALSE;
623
624 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
625 {
626 /* We found a bad expression in md_operand(). */
627 *str = input_line_pointer;
628 input_line_pointer = save_in;
629 if (prefix_present_p && ! error_p ())
630 set_fatal_syntax_error (_("bad expression"));
631 else
632 set_first_syntax_error (_("bad expression"));
633 return FALSE;
634 }
635
636 #ifdef OBJ_AOUT
637 if (seg != absolute_section
638 && seg != text_section
639 && seg != data_section
640 && seg != bss_section && seg != undefined_section)
641 {
642 set_syntax_error (_("bad segment"));
643 *str = input_line_pointer;
644 input_line_pointer = save_in;
645 return FALSE;
646 }
647 #else
648 (void) seg;
649 #endif
650
651 *str = input_line_pointer;
652 input_line_pointer = save_in;
653 return TRUE;
654 }
655
656 /* Turn a string in input_line_pointer into a floating point constant
657 of type TYPE, and store the appropriate bytes in *LITP. The number
658 of LITTLENUMS emitted is stored in *SIZEP. An error message is
659 returned, or NULL on OK. */
660
661 const char *
662 md_atof (int type, char *litP, int *sizeP)
663 {
664 /* If this is a bfloat16 type, then parse it slightly differently -
665 as it does not follow the IEEE standard exactly. */
666 if (type == 'b')
667 {
668 char * t;
669 LITTLENUM_TYPE words[MAX_LITTLENUMS];
670 FLONUM_TYPE generic_float;
671
672 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
673
674 if (t)
675 input_line_pointer = t;
676 else
677 return _("invalid floating point number");
678
679 switch (generic_float.sign)
680 {
681 /* Is +Inf. */
682 case 'P':
683 words[0] = 0x7f80;
684 break;
685
686 /* Is -Inf. */
687 case 'N':
688 words[0] = 0xff80;
689 break;
690
691 /* Is NaN. */
692 /* bfloat16 has two types of NaN - quiet and signalling.
693 Quiet NaN has bit[6] == 1 && faction != 0, whereas
694 signalling Nan's have bit[0] == 0 && fraction != 0.
695 Chose this specific encoding as it is the same form
696 as used by other IEEE 754 encodings in GAS. */
697 case 0:
698 words[0] = 0x7fff;
699 break;
700
701 default:
702 break;
703 }
704
705 *sizeP = 2;
706
707 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
708
709 return NULL;
710 }
711
712 return ieee_md_atof (type, litP, sizeP, target_big_endian);
713 }
714
715 /* We handle all bad expressions here, so that we can report the faulty
716 instruction in the error message. */
717 void
718 md_operand (expressionS * exp)
719 {
720 if (in_my_get_expression_p)
721 exp->X_op = O_illegal;
722 }
723
724 /* Immediate values. */
725
726 /* Errors may be set multiple times during parsing or bit encoding
727 (particularly in the Neon bits), but usually the earliest error which is set
728 will be the most meaningful. Avoid overwriting it with later (cascading)
729 errors by calling this function. */
730
731 static void
732 first_error (const char *error)
733 {
734 if (! error_p ())
735 set_syntax_error (error);
736 }
737
738 /* Similar to first_error, but this function accepts formatted error
739 message. */
740 static void
741 first_error_fmt (const char *format, ...)
742 {
743 va_list args;
744 enum
745 { size = 100 };
746 /* N.B. this single buffer will not cause error messages for different
747 instructions to pollute each other; this is because at the end of
748 processing of each assembly line, error message if any will be
749 collected by as_bad. */
750 static char buffer[size];
751
752 if (! error_p ())
753 {
754 int ret ATTRIBUTE_UNUSED;
755 va_start (args, format);
756 ret = vsnprintf (buffer, size, format, args);
757 know (ret <= size - 1 && ret >= 0);
758 va_end (args);
759 set_syntax_error (buffer);
760 }
761 }
762
763 /* Register parsing. */
764
765 /* Generic register parser which is called by other specialized
766 register parsers.
767 CCP points to what should be the beginning of a register name.
768 If it is indeed a valid register name, advance CCP over it and
769 return the reg_entry structure; otherwise return NULL.
770 It does not issue diagnostics. */
771
772 static reg_entry *
773 parse_reg (char **ccp)
774 {
775 char *start = *ccp;
776 char *p;
777 reg_entry *reg;
778
779 #ifdef REGISTER_PREFIX
780 if (*start != REGISTER_PREFIX)
781 return NULL;
782 start++;
783 #endif
784
785 p = start;
786 if (!ISALPHA (*p) || !is_name_beginner (*p))
787 return NULL;
788
789 do
790 p++;
791 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
792
793 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
794
795 if (!reg)
796 return NULL;
797
798 *ccp = p;
799 return reg;
800 }
801
802 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
803 return FALSE. */
804 static bfd_boolean
805 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
806 {
807 return (reg_type_masks[type] & (1 << reg->type)) != 0;
808 }
809
810 /* Try to parse a base or offset register. Allow SVE base and offset
811 registers if REG_TYPE includes SVE registers. Return the register
812 entry on success, setting *QUALIFIER to the register qualifier.
813 Return null otherwise.
814
815 Note that this function does not issue any diagnostics. */
816
817 static const reg_entry *
818 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
819 aarch64_opnd_qualifier_t *qualifier)
820 {
821 char *str = *ccp;
822 const reg_entry *reg = parse_reg (&str);
823
824 if (reg == NULL)
825 return NULL;
826
827 switch (reg->type)
828 {
829 case REG_TYPE_R_32:
830 case REG_TYPE_SP_32:
831 case REG_TYPE_Z_32:
832 *qualifier = AARCH64_OPND_QLF_W;
833 break;
834
835 case REG_TYPE_R_64:
836 case REG_TYPE_SP_64:
837 case REG_TYPE_Z_64:
838 *qualifier = AARCH64_OPND_QLF_X;
839 break;
840
841 case REG_TYPE_ZN:
842 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
843 || str[0] != '.')
844 return NULL;
845 switch (TOLOWER (str[1]))
846 {
847 case 's':
848 *qualifier = AARCH64_OPND_QLF_S_S;
849 break;
850 case 'd':
851 *qualifier = AARCH64_OPND_QLF_S_D;
852 break;
853 default:
854 return NULL;
855 }
856 str += 2;
857 break;
858
859 case REG_TYPE_CA_N:
860 case REG_TYPE_CA_SP:
861 *qualifier = AARCH64_OPND_QLF_CA;
862 break;
863
864 default:
865 return NULL;
866 }
867
868 *ccp = str;
869
870 return reg;
871 }
872
873 /* Try to parse a base or offset register. Return the register entry
874 on success, setting *QUALIFIER to the register qualifier. Return null
875 otherwise.
876
877 Note that this function does not issue any diagnostics. */
878
879 static const reg_entry *
880 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
881 {
882 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
883 }
884
885 /* Parse the qualifier of a vector register or vector element of type
886 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
887 succeeds; otherwise return FALSE.
888
889 Accept only one occurrence of:
890 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
891 b h s d q */
892 static bfd_boolean
893 parse_vector_type_for_operand (aarch64_reg_type reg_type,
894 struct vector_type_el *parsed_type, char **str)
895 {
896 char *ptr = *str;
897 unsigned width;
898 unsigned element_size;
899 enum vector_el_type type;
900
901 /* skip '.' */
902 gas_assert (*ptr == '.');
903 ptr++;
904
905 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
906 {
907 width = 0;
908 goto elt_size;
909 }
910 width = strtoul (ptr, &ptr, 10);
911 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
912 {
913 first_error_fmt (_("bad size %d in vector width specifier"), width);
914 return FALSE;
915 }
916
917 elt_size:
918 switch (TOLOWER (*ptr))
919 {
920 case 'b':
921 type = NT_b;
922 element_size = 8;
923 break;
924 case 'h':
925 type = NT_h;
926 element_size = 16;
927 break;
928 case 's':
929 type = NT_s;
930 element_size = 32;
931 break;
932 case 'd':
933 type = NT_d;
934 element_size = 64;
935 break;
936 case 'q':
937 if (reg_type == REG_TYPE_ZN || width == 1)
938 {
939 type = NT_q;
940 element_size = 128;
941 break;
942 }
943 /* fall through. */
944 default:
945 if (*ptr != '\0')
946 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
947 else
948 first_error (_("missing element size"));
949 return FALSE;
950 }
951 if (width != 0 && width * element_size != 64
952 && width * element_size != 128
953 && !(width == 2 && element_size == 16)
954 && !(width == 4 && element_size == 8))
955 {
956 first_error_fmt (_
957 ("invalid element size %d and vector size combination %c"),
958 width, *ptr);
959 return FALSE;
960 }
961 ptr++;
962
963 parsed_type->type = type;
964 parsed_type->width = width;
965
966 *str = ptr;
967
968 return TRUE;
969 }
970
971 /* *STR contains an SVE zero/merge predication suffix. Parse it into
972 *PARSED_TYPE and point *STR at the end of the suffix. */
973
974 static bfd_boolean
975 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
976 {
977 char *ptr = *str;
978
979 /* Skip '/'. */
980 gas_assert (*ptr == '/');
981 ptr++;
982 switch (TOLOWER (*ptr))
983 {
984 case 'z':
985 parsed_type->type = NT_zero;
986 break;
987 case 'm':
988 parsed_type->type = NT_merge;
989 break;
990 default:
991 if (*ptr != '\0' && *ptr != ',')
992 first_error_fmt (_("unexpected character `%c' in predication type"),
993 *ptr);
994 else
995 first_error (_("missing predication type"));
996 return FALSE;
997 }
998 parsed_type->width = 0;
999 *str = ptr + 1;
1000 return TRUE;
1001 }
1002
1003 /* Parse a register of the type TYPE.
1004
1005 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
1006 name or the parsed register is not of TYPE.
1007
1008 Otherwise return the register number, and optionally fill in the actual
1009 type of the register in *RTYPE when multiple alternatives were given, and
1010 return the register shape and element index information in *TYPEINFO.
1011
1012 IN_REG_LIST should be set with TRUE if the caller is parsing a register
1013 list. */
1014
1015 static int
1016 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
1017 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
1018 {
1019 char *str = *ccp;
1020 const reg_entry *reg = parse_reg (&str);
1021 struct vector_type_el atype;
1022 struct vector_type_el parsetype;
1023 bfd_boolean is_typed_vecreg = FALSE;
1024
1025 atype.defined = 0;
1026 atype.type = NT_invtype;
1027 atype.width = -1;
1028 atype.index = 0;
1029
1030 if (reg == NULL)
1031 {
1032 if (typeinfo)
1033 *typeinfo = atype;
1034 set_default_error ();
1035 return PARSE_FAIL;
1036 }
1037
1038 if (! aarch64_check_reg_type (reg, type))
1039 {
1040 DEBUG_TRACE ("reg type check failed");
1041 set_default_error ();
1042 return PARSE_FAIL;
1043 }
1044 type = reg->type;
1045
1046 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1047 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1048 {
1049 if (*str == '.')
1050 {
1051 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1052 return PARSE_FAIL;
1053 }
1054 else
1055 {
1056 if (!parse_predication_for_operand (&parsetype, &str))
1057 return PARSE_FAIL;
1058 }
1059
1060 /* Register if of the form Vn.[bhsdq]. */
1061 is_typed_vecreg = TRUE;
1062
1063 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1064 {
1065 /* The width is always variable; we don't allow an integer width
1066 to be specified. */
1067 gas_assert (parsetype.width == 0);
1068 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1069 }
1070 else if (parsetype.width == 0)
1071 /* Expect index. In the new scheme we cannot have
1072 Vn.[bhsdq] represent a scalar. Therefore any
1073 Vn.[bhsdq] should have an index following it.
1074 Except in reglists of course. */
1075 atype.defined |= NTA_HASINDEX;
1076 else
1077 atype.defined |= NTA_HASTYPE;
1078
1079 atype.type = parsetype.type;
1080 atype.width = parsetype.width;
1081 }
1082
1083 if (skip_past_char (&str, '['))
1084 {
1085 expressionS exp;
1086
1087 /* Reject Sn[index] syntax. */
1088 if (!is_typed_vecreg)
1089 {
1090 first_error (_("this type of register can't be indexed"));
1091 return PARSE_FAIL;
1092 }
1093
1094 if (in_reg_list)
1095 {
1096 first_error (_("index not allowed inside register list"));
1097 return PARSE_FAIL;
1098 }
1099
1100 atype.defined |= NTA_HASINDEX;
1101
1102 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1103
1104 if (exp.X_op != O_constant)
1105 {
1106 first_error (_("constant expression required"));
1107 return PARSE_FAIL;
1108 }
1109
1110 if (! skip_past_char (&str, ']'))
1111 return PARSE_FAIL;
1112
1113 atype.index = exp.X_add_number;
1114 }
1115 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1116 {
1117 /* Indexed vector register expected. */
1118 first_error (_("indexed vector register expected"));
1119 return PARSE_FAIL;
1120 }
1121
1122 /* A vector reg Vn should be typed or indexed. */
1123 if (type == REG_TYPE_VN && atype.defined == 0)
1124 {
1125 first_error (_("invalid use of vector register"));
1126 }
1127
1128 if (typeinfo)
1129 *typeinfo = atype;
1130
1131 if (rtype)
1132 *rtype = type;
1133
1134 *ccp = str;
1135
1136 return reg->number;
1137 }
1138
1139 /* Parse register.
1140
1141 Return the register number on success; return PARSE_FAIL otherwise.
1142
1143 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1144 the register (e.g. NEON double or quad reg when either has been requested).
1145
1146 If this is a NEON vector register with additional type information, fill
1147 in the struct pointed to by VECTYPE (if non-NULL).
1148
1149 This parser does not handle register list. */
1150
1151 static int
1152 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1153 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1154 {
1155 struct vector_type_el atype;
1156 char *str = *ccp;
1157 int reg = parse_typed_reg (&str, type, rtype, &atype,
1158 /*in_reg_list= */ FALSE);
1159
1160 if (reg == PARSE_FAIL)
1161 return PARSE_FAIL;
1162
1163 if (vectype)
1164 *vectype = atype;
1165
1166 *ccp = str;
1167
1168 return reg;
1169 }
1170
1171 static inline bfd_boolean
1172 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1173 {
1174 return
1175 e1.type == e2.type
1176 && e1.defined == e2.defined
1177 && e1.width == e2.width && e1.index == e2.index;
1178 }
1179
1180 /* This function parses a list of vector registers of type TYPE.
1181 On success, it returns the parsed register list information in the
1182 following encoded format:
1183
1184 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1185 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1186
1187 The information of the register shape and/or index is returned in
1188 *VECTYPE.
1189
1190 It returns PARSE_FAIL if the register list is invalid.
1191
1192 The list contains one to four registers.
1193 Each register can be one of:
1194 <Vt>.<T>[<index>]
1195 <Vt>.<T>
1196 All <T> should be identical.
1197 All <index> should be identical.
1198 There are restrictions on <Vt> numbers which are checked later
1199 (by reg_list_valid_p). */
1200
1201 static int
1202 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1203 struct vector_type_el *vectype)
1204 {
1205 char *str = *ccp;
1206 int nb_regs;
1207 struct vector_type_el typeinfo, typeinfo_first;
1208 int val, val_range;
1209 int in_range;
1210 int ret_val;
1211 int i;
1212 bfd_boolean error = FALSE;
1213 bfd_boolean expect_index = FALSE;
1214
1215 if (*str != '{')
1216 {
1217 set_syntax_error (_("expecting {"));
1218 return PARSE_FAIL;
1219 }
1220 str++;
1221
1222 nb_regs = 0;
1223 typeinfo_first.defined = 0;
1224 typeinfo_first.type = NT_invtype;
1225 typeinfo_first.width = -1;
1226 typeinfo_first.index = 0;
1227 ret_val = 0;
1228 val = -1;
1229 val_range = -1;
1230 in_range = 0;
1231 do
1232 {
1233 if (in_range)
1234 {
1235 str++; /* skip over '-' */
1236 val_range = val;
1237 }
1238 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1239 /*in_reg_list= */ TRUE);
1240 if (val == PARSE_FAIL)
1241 {
1242 set_first_syntax_error (_("invalid vector register in list"));
1243 error = TRUE;
1244 continue;
1245 }
1246 /* reject [bhsd]n */
1247 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1248 {
1249 set_first_syntax_error (_("invalid scalar register in list"));
1250 error = TRUE;
1251 continue;
1252 }
1253
1254 if (typeinfo.defined & NTA_HASINDEX)
1255 expect_index = TRUE;
1256
1257 if (in_range)
1258 {
1259 if (val < val_range)
1260 {
1261 set_first_syntax_error
1262 (_("invalid range in vector register list"));
1263 error = TRUE;
1264 }
1265 val_range++;
1266 }
1267 else
1268 {
1269 val_range = val;
1270 if (nb_regs == 0)
1271 typeinfo_first = typeinfo;
1272 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1273 {
1274 set_first_syntax_error
1275 (_("type mismatch in vector register list"));
1276 error = TRUE;
1277 }
1278 }
1279 if (! error)
1280 for (i = val_range; i <= val; i++)
1281 {
1282 ret_val |= i << (5 * nb_regs);
1283 nb_regs++;
1284 }
1285 in_range = 0;
1286 }
1287 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1288
1289 skip_whitespace (str);
1290 if (*str != '}')
1291 {
1292 set_first_syntax_error (_("end of vector register list not found"));
1293 error = TRUE;
1294 }
1295 str++;
1296
1297 skip_whitespace (str);
1298
1299 if (expect_index)
1300 {
1301 if (skip_past_char (&str, '['))
1302 {
1303 expressionS exp;
1304
1305 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1306 if (exp.X_op != O_constant)
1307 {
1308 set_first_syntax_error (_("constant expression required."));
1309 error = TRUE;
1310 }
1311 if (! skip_past_char (&str, ']'))
1312 error = TRUE;
1313 else
1314 typeinfo_first.index = exp.X_add_number;
1315 }
1316 else
1317 {
1318 set_first_syntax_error (_("expected index"));
1319 error = TRUE;
1320 }
1321 }
1322
1323 if (nb_regs > 4)
1324 {
1325 set_first_syntax_error (_("too many registers in vector register list"));
1326 error = TRUE;
1327 }
1328 else if (nb_regs == 0)
1329 {
1330 set_first_syntax_error (_("empty vector register list"));
1331 error = TRUE;
1332 }
1333
1334 *ccp = str;
1335 if (! error)
1336 *vectype = typeinfo_first;
1337
1338 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1339 }
1340
1341 /* Directives: register aliases. */
1342
1343 static reg_entry *
1344 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1345 {
1346 reg_entry *new;
1347 const char *name;
1348
1349 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1350 {
1351 if (new->builtin)
1352 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1353 str);
1354
1355 /* Only warn about a redefinition if it's not defined as the
1356 same register. */
1357 else if (new->number != number || new->type != type)
1358 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1359
1360 return NULL;
1361 }
1362
1363 name = xstrdup (str);
1364 new = XNEW (reg_entry);
1365
1366 new->name = name;
1367 new->number = number;
1368 new->type = type;
1369 new->builtin = FALSE;
1370
1371 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1372
1373 return new;
1374 }
1375
1376 /* Look for the .req directive. This is of the form:
1377
1378 new_register_name .req existing_register_name
1379
1380 If we find one, or if it looks sufficiently like one that we want to
1381 handle any error here, return TRUE. Otherwise return FALSE. */
1382
1383 static bfd_boolean
1384 create_register_alias (char *newname, char *p)
1385 {
1386 const reg_entry *old;
1387 char *oldname, *nbuf;
1388 size_t nlen;
1389
1390 /* The input scrubber ensures that whitespace after the mnemonic is
1391 collapsed to single spaces. */
1392 oldname = p;
1393 if (strncmp (oldname, " .req ", 6) != 0)
1394 return FALSE;
1395
1396 oldname += 6;
1397 if (*oldname == '\0')
1398 return FALSE;
1399
1400 old = str_hash_find (aarch64_reg_hsh, oldname);
1401 if (!old)
1402 {
1403 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1404 return TRUE;
1405 }
1406
1407 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1408 the desired alias name, and p points to its end. If not, then
1409 the desired alias name is in the global original_case_string. */
1410 #ifdef TC_CASE_SENSITIVE
1411 nlen = p - newname;
1412 #else
1413 newname = original_case_string;
1414 nlen = strlen (newname);
1415 #endif
1416
1417 nbuf = xmemdup0 (newname, nlen);
1418
1419 /* Create aliases under the new name as stated; an all-lowercase
1420 version of the new name; and an all-uppercase version of the new
1421 name. */
1422 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1423 {
1424 for (p = nbuf; *p; p++)
1425 *p = TOUPPER (*p);
1426
1427 if (strncmp (nbuf, newname, nlen))
1428 {
1429 /* If this attempt to create an additional alias fails, do not bother
1430 trying to create the all-lower case alias. We will fail and issue
1431 a second, duplicate error message. This situation arises when the
1432 programmer does something like:
1433 foo .req r0
1434 Foo .req r1
1435 The second .req creates the "Foo" alias but then fails to create
1436 the artificial FOO alias because it has already been created by the
1437 first .req. */
1438 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1439 {
1440 free (nbuf);
1441 return TRUE;
1442 }
1443 }
1444
1445 for (p = nbuf; *p; p++)
1446 *p = TOLOWER (*p);
1447
1448 if (strncmp (nbuf, newname, nlen))
1449 insert_reg_alias (nbuf, old->number, old->type);
1450 }
1451
1452 free (nbuf);
1453 return TRUE;
1454 }
1455
1456 /* Should never be called, as .req goes between the alias and the
1457 register name, not at the beginning of the line. */
1458 static void
1459 s_req (int a ATTRIBUTE_UNUSED)
1460 {
1461 as_bad (_("invalid syntax for .req directive"));
1462 }
1463
1464 /* The .unreq directive deletes an alias which was previously defined
1465 by .req. For example:
1466
1467 my_alias .req r11
1468 .unreq my_alias */
1469
1470 static void
1471 s_unreq (int a ATTRIBUTE_UNUSED)
1472 {
1473 char *name;
1474 char saved_char;
1475
1476 name = input_line_pointer;
1477
1478 while (*input_line_pointer != 0
1479 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1480 ++input_line_pointer;
1481
1482 saved_char = *input_line_pointer;
1483 *input_line_pointer = 0;
1484
1485 if (!*name)
1486 as_bad (_("invalid syntax for .unreq directive"));
1487 else
1488 {
1489 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1490
1491 if (!reg)
1492 as_bad (_("unknown register alias '%s'"), name);
1493 else if (reg->builtin)
1494 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1495 name);
1496 else
1497 {
1498 char *p;
1499 char *nbuf;
1500
1501 str_hash_delete (aarch64_reg_hsh, name);
1502 free ((char *) reg->name);
1503 free (reg);
1504
1505 /* Also locate the all upper case and all lower case versions.
1506 Do not complain if we cannot find one or the other as it
1507 was probably deleted above. */
1508
1509 nbuf = strdup (name);
1510 for (p = nbuf; *p; p++)
1511 *p = TOUPPER (*p);
1512 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1513 if (reg)
1514 {
1515 str_hash_delete (aarch64_reg_hsh, nbuf);
1516 free ((char *) reg->name);
1517 free (reg);
1518 }
1519
1520 for (p = nbuf; *p; p++)
1521 *p = TOLOWER (*p);
1522 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1523 if (reg)
1524 {
1525 str_hash_delete (aarch64_reg_hsh, nbuf);
1526 free ((char *) reg->name);
1527 free (reg);
1528 }
1529
1530 free (nbuf);
1531 }
1532 }
1533
1534 *input_line_pointer = saved_char;
1535 demand_empty_rest_of_line ();
1536 }
1537
1538 /* Directives: Instruction set selection. */
1539
1540 #ifdef OBJ_ELF
1541 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1542 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1543 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1544 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1545
1546 /* Create a new mapping symbol for the transition to STATE. */
1547
1548 static void
1549 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1550 {
1551 symbolS *symbolP;
1552 const char *symname;
1553 int type;
1554
1555 switch (state)
1556 {
1557 case MAP_DATA:
1558 symname = "$d";
1559 type = BSF_NO_FLAGS;
1560 break;
1561 case MAP_INSN:
1562 symname = "$x";
1563 type = BSF_NO_FLAGS;
1564 break;
1565 case MAP_C64:
1566 symname = "$c";
1567 type = BSF_NO_FLAGS;
1568 break;
1569 default:
1570 abort ();
1571 }
1572
1573 symbolP = symbol_new (symname, now_seg, frag, value);
1574 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1575
1576 if (state == MAP_C64)
1577 AARCH64_SET_C64 (symbolP, 1);
1578 else if (state == MAP_INSN)
1579 AARCH64_SET_C64 (symbolP, 0);
1580
1581 /* Save the mapping symbols for future reference. Also check that
1582 we do not place two mapping symbols at the same offset within a
1583 frag. We'll handle overlap between frags in
1584 check_mapping_symbols.
1585
1586 If .fill or other data filling directive generates zero sized data,
1587 the mapping symbol for the following code will have the same value
1588 as the one generated for the data filling directive. In this case,
1589 we replace the old symbol with the new one at the same address. */
1590 if (value == 0)
1591 {
1592 if (frag->tc_frag_data.first_map != NULL)
1593 {
1594 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1595 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1596 &symbol_lastP);
1597 }
1598 frag->tc_frag_data.first_map = symbolP;
1599 }
1600 if (frag->tc_frag_data.last_map != NULL)
1601 {
1602 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1603 S_GET_VALUE (symbolP));
1604 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1605 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1606 &symbol_lastP);
1607 }
1608 frag->tc_frag_data.last_map = symbolP;
1609 }
1610
1611 /* We must sometimes convert a region marked as code to data during
1612 code alignment, if an odd number of bytes have to be padded. The
1613 code mapping symbol is pushed to an aligned address. */
1614
1615 static void
1616 insert_data_mapping_symbol (enum mstate state,
1617 valueT value, fragS * frag, offsetT bytes)
1618 {
1619 /* If there was already a mapping symbol, remove it. */
1620 if (frag->tc_frag_data.last_map != NULL
1621 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1622 frag->fr_address + value)
1623 {
1624 symbolS *symp = frag->tc_frag_data.last_map;
1625
1626 if (value == 0)
1627 {
1628 know (frag->tc_frag_data.first_map == symp);
1629 frag->tc_frag_data.first_map = NULL;
1630 }
1631 frag->tc_frag_data.last_map = NULL;
1632 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1633 }
1634
1635 make_mapping_symbol (MAP_DATA, value, frag);
1636 make_mapping_symbol (state, value + bytes, frag);
1637 }
1638
1639 static void mapping_state_2 (enum mstate state, int max_chars);
1640
1641 /* Set the mapping state to STATE. Only call this when about to
1642 emit some STATE bytes to the file. */
1643
1644 void
1645 mapping_state (enum mstate state)
1646 {
1647 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1648
1649 if (state == MAP_CUR_INSN)
1650 /* AArch64 instructions require 4-byte alignment. When emitting
1651 instructions into any section, record the appropriate section
1652 alignment. */
1653 record_alignment (now_seg, 2);
1654
1655 if (mapstate == state)
1656 /* The mapping symbol has already been emitted.
1657 There is nothing else to do. */
1658 return;
1659
1660 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1661 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1662 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1663 evaluated later in the next else. */
1664 return;
1665 else if (TRANSITION (MAP_UNDEFINED, MAP_CUR_INSN))
1666 {
1667 /* Only add the symbol if the offset is > 0:
1668 if we're at the first frag, check it's size > 0;
1669 if we're not at the first frag, then for sure
1670 the offset is > 0. */
1671 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1672 const int add_symbol = (frag_now != frag_first)
1673 || (frag_now_fix () > 0);
1674
1675 if (add_symbol)
1676 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1677 }
1678 #undef TRANSITION
1679
1680 mapping_state_2 (state, 0);
1681 }
1682
1683 /* Same as mapping_state, but MAX_CHARS bytes have already been
1684 allocated. Put the mapping symbol that far back. */
1685
1686 static void
1687 mapping_state_2 (enum mstate state, int max_chars)
1688 {
1689 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1690
1691 if (!SEG_NORMAL (now_seg))
1692 return;
1693
1694 if (mapstate == state)
1695 /* The mapping symbol has already been emitted.
1696 There is nothing else to do. */
1697 return;
1698
1699 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1700 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1701 }
1702 #else
1703 #define mapping_state(x) /* nothing */
1704 #define mapping_state_2(x, y) /* nothing */
1705 #endif
1706
1707 /* Directives: sectioning and alignment. */
1708
1709 static void
1710 s_bss (int ignore ATTRIBUTE_UNUSED)
1711 {
1712 /* We don't support putting frags in the BSS segment, we fake it by
1713 marking in_bss, then looking at s_skip for clues. */
1714 subseg_set (bss_section, 0);
1715 demand_empty_rest_of_line ();
1716 mapping_state (MAP_DATA);
1717 }
1718
1719 static void
1720 s_even (int ignore ATTRIBUTE_UNUSED)
1721 {
1722 /* Never make frag if expect extra pass. */
1723 if (!need_pass_2)
1724 frag_align (1, 0, 0);
1725
1726 record_alignment (now_seg, 1);
1727
1728 demand_empty_rest_of_line ();
1729 }
1730
1731 /* Directives: Literal pools. */
1732
1733 static literal_pool *
1734 find_literal_pool (int size)
1735 {
1736 literal_pool *pool;
1737
1738 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1739 {
1740 if (pool->section == now_seg
1741 && pool->sub_section == now_subseg && pool->size == size)
1742 break;
1743 }
1744
1745 return pool;
1746 }
1747
1748 static literal_pool *
1749 find_or_make_literal_pool (int size)
1750 {
1751 /* Next literal pool ID number. */
1752 static unsigned int latest_pool_num = 1;
1753 literal_pool *pool;
1754
1755 pool = find_literal_pool (size);
1756
1757 if (pool == NULL)
1758 {
1759 /* Create a new pool. */
1760 pool = XNEW (literal_pool);
1761 if (!pool)
1762 return NULL;
1763
1764 /* Currently we always put the literal pool in the current text
1765 section. If we were generating "small" model code where we
1766 knew that all code and initialised data was within 1MB then
1767 we could output literals to mergeable, read-only data
1768 sections. */
1769
1770 pool->next_free_entry = 0;
1771 pool->section = now_seg;
1772 pool->sub_section = now_subseg;
1773 pool->size = size;
1774 pool->next = list_of_pools;
1775 pool->symbol = NULL;
1776
1777 /* Add it to the list. */
1778 list_of_pools = pool;
1779 }
1780
1781 /* New pools, and emptied pools, will have a NULL symbol. */
1782 if (pool->symbol == NULL)
1783 {
1784 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1785 &zero_address_frag, 0);
1786 pool->id = latest_pool_num++;
1787 }
1788
1789 /* Done. */
1790 return pool;
1791 }
1792
1793 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1794 Return TRUE on success, otherwise return FALSE. */
1795 static bfd_boolean
1796 add_to_lit_pool (expressionS *exp, int size)
1797 {
1798 literal_pool *pool;
1799 unsigned int entry;
1800
1801 pool = find_or_make_literal_pool (size);
1802
1803 /* Check if this literal value is already in the pool. */
1804 for (entry = 0; entry < pool->next_free_entry; entry++)
1805 {
1806 expressionS * litexp = & pool->literals[entry].exp;
1807
1808 if ((litexp->X_op == exp->X_op)
1809 && (exp->X_op == O_constant)
1810 && (litexp->X_add_number == exp->X_add_number)
1811 && (litexp->X_unsigned == exp->X_unsigned))
1812 break;
1813
1814 if ((litexp->X_op == exp->X_op)
1815 && (exp->X_op == O_symbol)
1816 && (litexp->X_add_number == exp->X_add_number)
1817 && (litexp->X_add_symbol == exp->X_add_symbol)
1818 && (litexp->X_op_symbol == exp->X_op_symbol))
1819 break;
1820 }
1821
1822 /* Do we need to create a new entry? */
1823 if (entry == pool->next_free_entry)
1824 {
1825 if (entry >= MAX_LITERAL_POOL_SIZE)
1826 {
1827 set_syntax_error (_("literal pool overflow"));
1828 return FALSE;
1829 }
1830
1831 pool->literals[entry].exp = *exp;
1832 pool->next_free_entry += 1;
1833 if (exp->X_op == O_big)
1834 {
1835 /* PR 16688: Bignums are held in a single global array. We must
1836 copy and preserve that value now, before it is overwritten. */
1837 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1838 exp->X_add_number);
1839 memcpy (pool->literals[entry].bignum, generic_bignum,
1840 CHARS_PER_LITTLENUM * exp->X_add_number);
1841 }
1842 else
1843 pool->literals[entry].bignum = NULL;
1844 }
1845
1846 exp->X_op = O_symbol;
1847 exp->X_add_number = ((int) entry) * size;
1848 exp->X_add_symbol = pool->symbol;
1849
1850 return TRUE;
1851 }
1852
1853 /* Can't use symbol_new here, so have to create a symbol and then at
1854 a later date assign it a value. That's what these functions do. */
1855
1856 static void
1857 symbol_locate (symbolS * symbolP,
1858 const char *name,/* It is copied, the caller can modify. */
1859 segT segment, /* Segment identifier (SEG_<something>). */
1860 valueT valu, /* Symbol value. */
1861 fragS * frag) /* Associated fragment. */
1862 {
1863 size_t name_length;
1864 char *preserved_copy_of_name;
1865
1866 name_length = strlen (name) + 1; /* +1 for \0. */
1867 obstack_grow (&notes, name, name_length);
1868 preserved_copy_of_name = obstack_finish (&notes);
1869
1870 #ifdef tc_canonicalize_symbol_name
1871 preserved_copy_of_name =
1872 tc_canonicalize_symbol_name (preserved_copy_of_name);
1873 #endif
1874
1875 S_SET_NAME (symbolP, preserved_copy_of_name);
1876
1877 S_SET_SEGMENT (symbolP, segment);
1878 S_SET_VALUE (symbolP, valu);
1879 symbol_clear_list_pointers (symbolP);
1880
1881 symbol_set_frag (symbolP, frag);
1882
1883 /* Link to end of symbol chain. */
1884 {
1885 extern int symbol_table_frozen;
1886
1887 if (symbol_table_frozen)
1888 abort ();
1889 }
1890
1891 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1892
1893 obj_symbol_new_hook (symbolP);
1894
1895 #ifdef tc_symbol_new_hook
1896 tc_symbol_new_hook (symbolP);
1897 #endif
1898
1899 #ifdef DEBUG_SYMS
1900 verify_symbol_chain (symbol_rootP, symbol_lastP);
1901 #endif /* DEBUG_SYMS */
1902 }
1903
1904
1905 static void
1906 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1907 {
1908 unsigned int entry;
1909 literal_pool *pool;
1910 char sym_name[20];
1911 int align;
1912
1913 for (align = 2; align <= 4; align++)
1914 {
1915 int size = 1 << align;
1916
1917 pool = find_literal_pool (size);
1918 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1919 continue;
1920
1921 /* Align pool as you have word accesses.
1922 Only make a frag if we have to. */
1923 if (!need_pass_2)
1924 frag_align (align, 0, 0);
1925
1926 mapping_state (MAP_DATA);
1927
1928 record_alignment (now_seg, align);
1929
1930 sprintf (sym_name, "$$lit_\002%x", pool->id);
1931
1932 symbol_locate (pool->symbol, sym_name, now_seg,
1933 (valueT) frag_now_fix (), frag_now);
1934 symbol_table_insert (pool->symbol);
1935
1936 for (entry = 0; entry < pool->next_free_entry; entry++)
1937 {
1938 expressionS * exp = & pool->literals[entry].exp;
1939
1940 if (exp->X_op == O_big)
1941 {
1942 /* PR 16688: Restore the global bignum value. */
1943 gas_assert (pool->literals[entry].bignum != NULL);
1944 memcpy (generic_bignum, pool->literals[entry].bignum,
1945 CHARS_PER_LITTLENUM * exp->X_add_number);
1946 }
1947
1948 /* First output the expression in the instruction to the pool. */
1949 emit_expr (exp, size); /* .word|.xword */
1950
1951 if (exp->X_op == O_big)
1952 {
1953 free (pool->literals[entry].bignum);
1954 pool->literals[entry].bignum = NULL;
1955 }
1956 }
1957
1958 /* Mark the pool as empty. */
1959 pool->next_free_entry = 0;
1960 pool->symbol = NULL;
1961 }
1962 }
1963
1964 #ifdef OBJ_ELF
1965 /* Forward declarations for functions below, in the MD interface
1966 section. */
1967 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1968 static struct reloc_table_entry * find_reloc_table_entry (char **);
1969
1970 /* Directives: Data. */
1971 /* N.B. the support for relocation suffix in this directive needs to be
1972 implemented properly. */
1973
1974 static void
1975 s_aarch64_elf_cons (int nbytes)
1976 {
1977 expressionS exp;
1978
1979 #ifdef md_flush_pending_output
1980 md_flush_pending_output ();
1981 #endif
1982
1983 if (is_it_end_of_statement ())
1984 {
1985 demand_empty_rest_of_line ();
1986 return;
1987 }
1988
1989 #ifdef md_cons_align
1990 md_cons_align (nbytes);
1991 #endif
1992
1993 mapping_state (MAP_DATA);
1994 do
1995 {
1996 struct reloc_table_entry *reloc;
1997
1998 expression (&exp);
1999
2000 if (exp.X_op != O_symbol)
2001 emit_expr (&exp, (unsigned int) nbytes);
2002 else
2003 {
2004 skip_past_char (&input_line_pointer, '#');
2005 if (skip_past_char (&input_line_pointer, ':'))
2006 {
2007 reloc = find_reloc_table_entry (&input_line_pointer);
2008 if (reloc == NULL)
2009 as_bad (_("unrecognized relocation suffix"));
2010 else
2011 as_bad (_("unimplemented relocation suffix"));
2012 ignore_rest_of_line ();
2013 return;
2014 }
2015 else
2016 emit_expr (&exp, (unsigned int) nbytes);
2017 }
2018 }
2019 while (*input_line_pointer++ == ',');
2020
2021 /* Put terminator back into stream. */
2022 input_line_pointer--;
2023 demand_empty_rest_of_line ();
2024 }
2025
2026 /* Mark symbol that it follows a variant PCS convention. */
2027
2028 static void
2029 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2030 {
2031 char *name;
2032 char c;
2033 symbolS *sym;
2034 asymbol *bfdsym;
2035 elf_symbol_type *elfsym;
2036
2037 c = get_symbol_name (&name);
2038 if (!*name)
2039 as_bad (_("Missing symbol name in directive"));
2040 sym = symbol_find_or_make (name);
2041 restore_line_pointer (c);
2042 demand_empty_rest_of_line ();
2043 bfdsym = symbol_get_bfdsym (sym);
2044 elfsym = elf_symbol_from (bfdsym);
2045 gas_assert (elfsym);
2046 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2047 }
2048 #endif /* OBJ_ELF */
2049
2050 /* Output a 32-bit word, but mark as an instruction. */
2051
2052 static void
2053 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2054 {
2055 expressionS exp;
2056
2057 #ifdef md_flush_pending_output
2058 md_flush_pending_output ();
2059 #endif
2060
2061 if (is_it_end_of_statement ())
2062 {
2063 demand_empty_rest_of_line ();
2064 return;
2065 }
2066
2067 /* Sections are assumed to start aligned. In executable section, there is no
2068 MAP_DATA symbol pending. So we only align the address during
2069 MAP_DATA --> MAP_CUR_INSN transition.
2070 For other sections, this is not guaranteed. */
2071 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2072 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2073 frag_align_code (2, 0);
2074
2075 #ifdef OBJ_ELF
2076 mapping_state (MAP_CUR_INSN);
2077 #endif
2078
2079 do
2080 {
2081 expression (&exp);
2082 if (exp.X_op != O_constant)
2083 {
2084 as_bad (_("constant expression required"));
2085 ignore_rest_of_line ();
2086 return;
2087 }
2088
2089 if (target_big_endian)
2090 {
2091 unsigned int val = exp.X_add_number;
2092 exp.X_add_number = SWAP_32 (val);
2093 }
2094 emit_expr (&exp, 4);
2095 }
2096 while (*input_line_pointer++ == ',');
2097
2098 /* Put terminator back into stream. */
2099 input_line_pointer--;
2100 demand_empty_rest_of_line ();
2101 }
2102
2103 static void
2104 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2105 {
2106 demand_empty_rest_of_line ();
2107 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2108 fde->entry_extras.pauth_key = AARCH64_PAUTH_KEY_B;
2109 }
2110
2111 #ifdef OBJ_ELF
2112 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2113
2114 static void
2115 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2116 {
2117 expressionS exp;
2118
2119 expression (&exp);
2120 frag_grow (4);
2121 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2122 BFD_RELOC_AARCH64_TLSDESC_ADD);
2123
2124 demand_empty_rest_of_line ();
2125 }
2126
2127 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2128
2129 static void
2130 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2131 {
2132 expressionS exp;
2133
2134 /* Since we're just labelling the code, there's no need to define a
2135 mapping symbol. */
2136 expression (&exp);
2137 /* Make sure there is enough room in this frag for the following
2138 blr. This trick only works if the blr follows immediately after
2139 the .tlsdesc directive. */
2140 frag_grow (4);
2141 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2142 (IS_C64 ? BFD_RELOC_MORELLO_TLSDESC_CALL
2143 : BFD_RELOC_AARCH64_TLSDESC_CALL));
2144
2145 demand_empty_rest_of_line ();
2146 }
2147
2148 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2149
2150 static void
2151 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2152 {
2153 expressionS exp;
2154
2155 expression (&exp);
2156 frag_grow (4);
2157 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2158 BFD_RELOC_AARCH64_TLSDESC_LDR);
2159
2160 demand_empty_rest_of_line ();
2161 }
2162
2163 static void
2164 s_aarch64_capinit (int ignored ATTRIBUTE_UNUSED)
2165 {
2166 expressionS exp;
2167 expression (&exp);
2168
2169 /* align to 16 bytes. */
2170 do_align (4, (char *) NULL, 0, 0);
2171
2172 frag_grow (16);
2173 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 16, &exp, 0,
2174 BFD_RELOC_MORELLO_CAPINIT);
2175
2176 demand_empty_rest_of_line ();
2177 }
2178
2179 static void
2180 s_aarch64_chericap (int ignored ATTRIBUTE_UNUSED)
2181 {
2182 expressionS exp;
2183 expression (&exp);
2184
2185 #ifdef md_flush_pending_output
2186 md_flush_pending_output ();
2187 #endif
2188
2189 /* align to 16 bytes. */
2190 do_align (4, (char *) NULL, 0, 0);
2191
2192 frag_grow (16);
2193 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 16, &exp, 0,
2194 BFD_RELOC_MORELLO_CAPINIT);
2195
2196 mapping_state (MAP_DATA);
2197 int i;
2198 for (i = 0; i < 4; i++)
2199 {
2200 /* The documentation of our md_number_to_chars says the greatest value
2201 size it can handle is 4 bytes. */
2202 char *p = frag_more (4);
2203 md_number_to_chars (p, 0, 4);
2204 }
2205 demand_empty_rest_of_line ();
2206 }
2207 #endif /* OBJ_ELF */
2208
2209 static void s_aarch64_arch (int);
2210 static void s_aarch64_cpu (int);
2211 static void s_aarch64_arch_extension (int);
2212
2213 /* This table describes all the machine specific pseudo-ops the assembler
2214 has to support. The fields are:
2215 pseudo-op name without dot
2216 function to call to execute this pseudo-op
2217 Integer arg to pass to the function. */
2218
2219 const pseudo_typeS md_pseudo_table[] = {
2220 /* Never called because '.req' does not start a line. */
2221 {"req", s_req, 0},
2222 {"unreq", s_unreq, 0},
2223 {"bss", s_bss, 0},
2224 {"even", s_even, 0},
2225 {"ltorg", s_ltorg, 0},
2226 {"pool", s_ltorg, 0},
2227 {"cpu", s_aarch64_cpu, 0},
2228 {"arch", s_aarch64_arch, 0},
2229 {"arch_extension", s_aarch64_arch_extension, 0},
2230 {"inst", s_aarch64_inst, 0},
2231 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2232 #ifdef OBJ_ELF
2233 {"tlsdescadd", s_tlsdescadd, 0},
2234 {"tlsdesccall", s_tlsdesccall, 0},
2235 {"tlsdescldr", s_tlsdescldr, 0},
2236 {"word", s_aarch64_elf_cons, 4},
2237 {"long", s_aarch64_elf_cons, 4},
2238 {"xword", s_aarch64_elf_cons, 8},
2239 {"dword", s_aarch64_elf_cons, 8},
2240 {"variant_pcs", s_variant_pcs, 0},
2241 {"capinit", s_aarch64_capinit, 0},
2242 {"chericap", s_aarch64_chericap, 0},
2243 #endif
2244 {"float16", float_cons, 'h'},
2245 {"bfloat16", float_cons, 'b'},
2246 {0, 0, 0}
2247 };
2248 \f
2249
2250 /* Check whether STR points to a register name followed by a comma or the
2251 end of line; REG_TYPE indicates which register types are checked
2252 against. Return TRUE if STR is such a register name; otherwise return
2253 FALSE. The function does not intend to produce any diagnostics, but since
2254 the register parser aarch64_reg_parse, which is called by this function,
2255 does produce diagnostics, we call clear_error to clear any diagnostics
2256 that may be generated by aarch64_reg_parse.
2257 Also, the function returns FALSE directly if there is any user error
2258 present at the function entry. This prevents the existing diagnostics
2259 state from being spoiled.
2260 The function currently serves parse_constant_immediate and
2261 parse_big_immediate only. */
2262 static bfd_boolean
2263 reg_name_p (char *str, aarch64_reg_type reg_type)
2264 {
2265 int reg;
2266
2267 /* Prevent the diagnostics state from being spoiled. */
2268 if (error_p ())
2269 return FALSE;
2270
2271 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2272
2273 /* Clear the parsing error that may be set by the reg parser. */
2274 clear_error ();
2275
2276 if (reg == PARSE_FAIL)
2277 return FALSE;
2278
2279 skip_whitespace (str);
2280 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2281 return TRUE;
2282
2283 return FALSE;
2284 }
2285
2286 /* Parser functions used exclusively in instruction operands. */
2287
2288 /* Parse an immediate expression which may not be constant.
2289
2290 To prevent the expression parser from pushing a register name
2291 into the symbol table as an undefined symbol, firstly a check is
2292 done to find out whether STR is a register of type REG_TYPE followed
2293 by a comma or the end of line. Return FALSE if STR is such a string. */
2294
2295 static bfd_boolean
2296 parse_immediate_expression (char **str, expressionS *exp,
2297 aarch64_reg_type reg_type)
2298 {
2299 if (reg_name_p (*str, reg_type))
2300 {
2301 set_recoverable_error (_("immediate operand required"));
2302 return FALSE;
2303 }
2304
2305 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2306
2307 if (exp->X_op == O_absent)
2308 {
2309 set_fatal_syntax_error (_("missing immediate expression"));
2310 return FALSE;
2311 }
2312
2313 return TRUE;
2314 }
2315
2316 /* Constant immediate-value read function for use in insn parsing.
2317 STR points to the beginning of the immediate (with the optional
2318 leading #); *VAL receives the value. REG_TYPE says which register
2319 names should be treated as registers rather than as symbolic immediates.
2320
2321 Return TRUE on success; otherwise return FALSE. */
2322
2323 static bfd_boolean
2324 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2325 {
2326 expressionS exp;
2327
2328 if (! parse_immediate_expression (str, &exp, reg_type))
2329 return FALSE;
2330
2331 if (exp.X_op != O_constant)
2332 {
2333 set_syntax_error (_("constant expression required"));
2334 return FALSE;
2335 }
2336
2337 *val = exp.X_add_number;
2338 return TRUE;
2339 }
2340
2341 static uint32_t
2342 encode_imm_float_bits (uint32_t imm)
2343 {
2344 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2345 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2346 }
2347
2348 /* Return TRUE if the single-precision floating-point value encoded in IMM
2349 can be expressed in the AArch64 8-bit signed floating-point format with
2350 3-bit exponent and normalized 4 bits of precision; in other words, the
2351 floating-point value must be expressable as
2352 (+/-) n / 16 * power (2, r)
2353 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2354
2355 static bfd_boolean
2356 aarch64_imm_float_p (uint32_t imm)
2357 {
2358 /* If a single-precision floating-point value has the following bit
2359 pattern, it can be expressed in the AArch64 8-bit floating-point
2360 format:
2361
2362 3 32222222 2221111111111
2363 1 09876543 21098765432109876543210
2364 n Eeeeeexx xxxx0000000000000000000
2365
2366 where n, e and each x are either 0 or 1 independently, with
2367 E == ~ e. */
2368
2369 uint32_t pattern;
2370
2371 /* Prepare the pattern for 'Eeeeee'. */
2372 if (((imm >> 30) & 0x1) == 0)
2373 pattern = 0x3e000000;
2374 else
2375 pattern = 0x40000000;
2376
2377 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2378 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2379 }
2380
2381 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2382 as an IEEE float without any loss of precision. Store the value in
2383 *FPWORD if so. */
2384
2385 static bfd_boolean
2386 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2387 {
2388 /* If a double-precision floating-point value has the following bit
2389 pattern, it can be expressed in a float:
2390
2391 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2392 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2393 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2394
2395 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2396 if Eeee_eeee != 1111_1111
2397
2398 where n, e, s and S are either 0 or 1 independently and where ~ is the
2399 inverse of E. */
2400
2401 uint32_t pattern;
2402 uint32_t high32 = imm >> 32;
2403 uint32_t low32 = imm;
2404
2405 /* Lower 29 bits need to be 0s. */
2406 if ((imm & 0x1fffffff) != 0)
2407 return FALSE;
2408
2409 /* Prepare the pattern for 'Eeeeeeeee'. */
2410 if (((high32 >> 30) & 0x1) == 0)
2411 pattern = 0x38000000;
2412 else
2413 pattern = 0x40000000;
2414
2415 /* Check E~~~. */
2416 if ((high32 & 0x78000000) != pattern)
2417 return FALSE;
2418
2419 /* Check Eeee_eeee != 1111_1111. */
2420 if ((high32 & 0x7ff00000) == 0x47f00000)
2421 return FALSE;
2422
2423 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2424 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2425 | (low32 >> 29)); /* 3 S bits. */
2426 return TRUE;
2427 }
2428
2429 /* Return true if we should treat OPERAND as a double-precision
2430 floating-point operand rather than a single-precision one. */
2431 static bfd_boolean
2432 double_precision_operand_p (const aarch64_opnd_info *operand)
2433 {
2434 /* Check for unsuffixed SVE registers, which are allowed
2435 for LDR and STR but not in instructions that require an
2436 immediate. We get better error messages if we arbitrarily
2437 pick one size, parse the immediate normally, and then
2438 report the match failure in the normal way. */
2439 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2440 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2441 }
2442
2443 /* Parse a floating-point immediate. Return TRUE on success and return the
2444 value in *IMMED in the format of IEEE754 single-precision encoding.
2445 *CCP points to the start of the string; DP_P is TRUE when the immediate
2446 is expected to be in double-precision (N.B. this only matters when
2447 hexadecimal representation is involved). REG_TYPE says which register
2448 names should be treated as registers rather than as symbolic immediates.
2449
2450 This routine accepts any IEEE float; it is up to the callers to reject
2451 invalid ones. */
2452
2453 static bfd_boolean
2454 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2455 aarch64_reg_type reg_type)
2456 {
2457 char *str = *ccp;
2458 char *fpnum;
2459 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2460 int64_t val = 0;
2461 unsigned fpword = 0;
2462 bfd_boolean hex_p = FALSE;
2463
2464 skip_past_char (&str, '#');
2465
2466 fpnum = str;
2467 skip_whitespace (fpnum);
2468
2469 if (strncmp (fpnum, "0x", 2) == 0)
2470 {
2471 /* Support the hexadecimal representation of the IEEE754 encoding.
2472 Double-precision is expected when DP_P is TRUE, otherwise the
2473 representation should be in single-precision. */
2474 if (! parse_constant_immediate (&str, &val, reg_type))
2475 goto invalid_fp;
2476
2477 if (dp_p)
2478 {
2479 if (!can_convert_double_to_float (val, &fpword))
2480 goto invalid_fp;
2481 }
2482 else if ((uint64_t) val > 0xffffffff)
2483 goto invalid_fp;
2484 else
2485 fpword = val;
2486
2487 hex_p = TRUE;
2488 }
2489 else if (reg_name_p (str, reg_type))
2490 {
2491 set_recoverable_error (_("immediate operand required"));
2492 return FALSE;
2493 }
2494
2495 if (! hex_p)
2496 {
2497 int i;
2498
2499 if ((str = atof_ieee (str, 's', words)) == NULL)
2500 goto invalid_fp;
2501
2502 /* Our FP word must be 32 bits (single-precision FP). */
2503 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2504 {
2505 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2506 fpword |= words[i];
2507 }
2508 }
2509
2510 *immed = fpword;
2511 *ccp = str;
2512 return TRUE;
2513
2514 invalid_fp:
2515 set_fatal_syntax_error (_("invalid floating-point constant"));
2516 return FALSE;
2517 }
2518
2519 /* Less-generic immediate-value read function with the possibility of loading
2520 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2521 instructions.
2522
2523 To prevent the expression parser from pushing a register name into the
2524 symbol table as an undefined symbol, a check is firstly done to find
2525 out whether STR is a register of type REG_TYPE followed by a comma or
2526 the end of line. Return FALSE if STR is such a register. */
2527
2528 static bfd_boolean
2529 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2530 {
2531 char *ptr = *str;
2532
2533 if (reg_name_p (ptr, reg_type))
2534 {
2535 set_syntax_error (_("immediate operand required"));
2536 return FALSE;
2537 }
2538
2539 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2540
2541 if (inst.reloc.exp.X_op == O_constant)
2542 *imm = inst.reloc.exp.X_add_number;
2543
2544 *str = ptr;
2545
2546 return TRUE;
2547 }
2548
2549 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2550 if NEED_LIBOPCODES is non-zero, the fixup will need
2551 assistance from the libopcodes. */
2552
2553 static inline void
2554 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2555 const aarch64_opnd_info *operand,
2556 int need_libopcodes_p)
2557 {
2558 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2559 reloc->opnd = operand->type;
2560 if (need_libopcodes_p)
2561 reloc->need_libopcodes_p = 1;
2562 };
2563
2564 /* Return TRUE if the instruction needs to be fixed up later internally by
2565 the GAS; otherwise return FALSE. */
2566
2567 static inline bfd_boolean
2568 aarch64_gas_internal_fixup_p (void)
2569 {
2570 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2571 }
2572
2573 /* Assign the immediate value to the relevant field in *OPERAND if
2574 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2575 needs an internal fixup in a later stage.
2576 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2577 IMM.VALUE that may get assigned with the constant. */
2578 static inline void
2579 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2580 aarch64_opnd_info *operand,
2581 int addr_off_p,
2582 int need_libopcodes_p,
2583 int skip_p)
2584 {
2585 if (reloc->exp.X_op == O_constant)
2586 {
2587 if (addr_off_p)
2588 operand->addr.offset.imm = reloc->exp.X_add_number;
2589 else
2590 operand->imm.value = reloc->exp.X_add_number;
2591 reloc->type = BFD_RELOC_UNUSED;
2592 }
2593 else
2594 {
2595 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2596 /* Tell libopcodes to ignore this operand or not. This is helpful
2597 when one of the operands needs to be fixed up later but we need
2598 libopcodes to check the other operands. */
2599 operand->skip = skip_p;
2600 }
2601 }
2602
2603 /* Relocation modifiers. Each entry in the table contains the textual
2604 name for the relocation which may be placed before a symbol used as
2605 a load/store offset, or add immediate. It must be surrounded by a
2606 leading and trailing colon, for example:
2607
2608 ldr x0, [x1, #:rello:varsym]
2609 add x0, x1, #:rello:varsym */
2610
2611 struct reloc_table_entry
2612 {
2613 const char *name;
2614 int pc_rel;
2615 bfd_reloc_code_real_type adr_type;
2616 bfd_reloc_code_real_type adrp_type;
2617 bfd_reloc_code_real_type c64_adrp_type;
2618 bfd_reloc_code_real_type movw_type;
2619 bfd_reloc_code_real_type add_type;
2620 bfd_reloc_code_real_type ldst_type;
2621 bfd_reloc_code_real_type ld_literal_type;
2622 };
2623
2624 static struct reloc_table_entry reloc_table[] = {
2625 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2626 {"lo12", 0,
2627 0, /* adr_type */
2628 0,
2629 0,
2630 0,
2631 BFD_RELOC_AARCH64_ADD_LO12,
2632 BFD_RELOC_AARCH64_LDST_LO12,
2633 0},
2634
2635 /* Higher 21 bits of pc-relative page offset: ADRP */
2636 {"pg_hi21", 1,
2637 0, /* adr_type */
2638 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2639 BFD_RELOC_MORELLO_ADR_HI20_PCREL,
2640 0,
2641 0,
2642 0,
2643 0},
2644
2645 /* Higher 21 bits (20 bits for C64) of pc-relative page offset: ADRP, no
2646 check */
2647 {"pg_hi21_nc", 1,
2648 0, /* adr_type */
2649 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2650 BFD_RELOC_MORELLO_ADR_HI20_NC_PCREL,
2651 0,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2657 {"abs_g0", 0,
2658 0, /* adr_type */
2659 0,
2660 0,
2661 BFD_RELOC_AARCH64_MOVW_G0,
2662 0,
2663 0,
2664 0},
2665
2666 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2667 {"abs_g0_s", 0,
2668 0, /* adr_type */
2669 0,
2670 0,
2671 BFD_RELOC_AARCH64_MOVW_G0_S,
2672 0,
2673 0,
2674 0},
2675
2676 /* Less significant bits 0-15 of address/value: MOVK, no check */
2677 {"abs_g0_nc", 0,
2678 0, /* adr_type */
2679 0,
2680 0,
2681 BFD_RELOC_AARCH64_MOVW_G0_NC,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2687 {"abs_g1", 0,
2688 0, /* adr_type */
2689 0,
2690 0,
2691 BFD_RELOC_AARCH64_MOVW_G1,
2692 0,
2693 0,
2694 0},
2695
2696 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2697 {"abs_g1_s", 0,
2698 0, /* adr_type */
2699 0,
2700 0,
2701 BFD_RELOC_AARCH64_MOVW_G1_S,
2702 0,
2703 0,
2704 0},
2705
2706 /* Less significant bits 16-31 of address/value: MOVK, no check */
2707 {"abs_g1_nc", 0,
2708 0, /* adr_type */
2709 0,
2710 0,
2711 BFD_RELOC_AARCH64_MOVW_G1_NC,
2712 0,
2713 0,
2714 0},
2715
2716 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2717 {"abs_g2", 0,
2718 0, /* adr_type */
2719 0,
2720 0,
2721 BFD_RELOC_AARCH64_MOVW_G2,
2722 0,
2723 0,
2724 0},
2725
2726 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2727 {"abs_g2_s", 0,
2728 0, /* adr_type */
2729 0,
2730 0,
2731 BFD_RELOC_AARCH64_MOVW_G2_S,
2732 0,
2733 0,
2734 0},
2735
2736 /* Less significant bits 32-47 of address/value: MOVK, no check */
2737 {"abs_g2_nc", 0,
2738 0, /* adr_type */
2739 0,
2740 0,
2741 BFD_RELOC_AARCH64_MOVW_G2_NC,
2742 0,
2743 0,
2744 0},
2745
2746 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2747 {"abs_g3", 0,
2748 0, /* adr_type */
2749 0,
2750 0,
2751 BFD_RELOC_AARCH64_MOVW_G3,
2752 0,
2753 0,
2754 0},
2755
2756 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2757 {"prel_g0", 1,
2758 0, /* adr_type */
2759 0,
2760 0,
2761 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2762 0,
2763 0,
2764 0},
2765
2766 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2767 {"prel_g0_nc", 1,
2768 0, /* adr_type */
2769 0,
2770 0,
2771 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2772 0,
2773 0,
2774 0},
2775
2776 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2777 {"prel_g1", 1,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2782 0,
2783 0,
2784 0},
2785
2786 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2787 {"prel_g1_nc", 1,
2788 0, /* adr_type */
2789 0,
2790 0,
2791 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2792 0,
2793 0,
2794 0},
2795
2796 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2797 {"prel_g2", 1,
2798 0, /* adr_type */
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2802 0,
2803 0,
2804 0},
2805
2806 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2807 {"prel_g2_nc", 1,
2808 0, /* adr_type */
2809 0,
2810 0,
2811 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2812 0,
2813 0,
2814 0},
2815
2816 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2817 {"prel_g3", 1,
2818 0, /* adr_type */
2819 0,
2820 0,
2821 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2822 0,
2823 0,
2824 0},
2825
2826 /* Get to the page containing GOT entry for a symbol. */
2827 {"got", 1,
2828 0, /* adr_type */
2829 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2830 BFD_RELOC_MORELLO_ADR_GOT_PAGE,
2831 0,
2832 0,
2833 0,
2834 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2835
2836 /* 12 bit offset into the page containing GOT entry for that symbol. */
2837 {"got_lo12", 0,
2838 0, /* adr_type */
2839 0,
2840 0,
2841 0,
2842 0,
2843 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2844 0},
2845
2846 /* 0-15 bits of address/value: MOVk, no check. */
2847 {"gotoff_g0_nc", 0,
2848 0, /* adr_type */
2849 0,
2850 0,
2851 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2852 0,
2853 0,
2854 0},
2855
2856 /* Most significant bits 16-31 of address/value: MOVZ. */
2857 {"gotoff_g1", 0,
2858 0, /* adr_type */
2859 0,
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2862 0,
2863 0,
2864 0},
2865
2866 /* 15 bit offset into the page containing GOT entry for that symbol. */
2867 {"gotoff_lo15", 0,
2868 0, /* adr_type */
2869 0,
2870 0,
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2874 0},
2875
2876 /* Get to the page containing GOT TLS entry for a symbol */
2877 {"gottprel_g0_nc", 0,
2878 0, /* adr_type */
2879 0,
2880 0,
2881 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2882 0,
2883 0,
2884 0},
2885
2886 /* Get to the page containing GOT TLS entry for a symbol */
2887 {"gottprel_g1", 0,
2888 0, /* adr_type */
2889 0,
2890 0,
2891 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2892 0,
2893 0,
2894 0},
2895
2896 /* Get to the page containing GOT TLS entry for a symbol */
2897 {"tlsgd", 0,
2898 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2899 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2900 0,
2901 0,
2902 0,
2903 0,
2904 0},
2905
2906 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2907 {"tlsgd_lo12", 0,
2908 0, /* adr_type */
2909 0,
2910 0,
2911 0,
2912 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2913 0,
2914 0},
2915
2916 /* Lower 16 bits address/value: MOVk. */
2917 {"tlsgd_g0_nc", 0,
2918 0, /* adr_type */
2919 0,
2920 0,
2921 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2922 0,
2923 0,
2924 0},
2925
2926 /* Most significant bits 16-31 of address/value: MOVZ. */
2927 {"tlsgd_g1", 0,
2928 0, /* adr_type */
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2932 0,
2933 0,
2934 0},
2935
2936 /* Get to the page containing GOT TLS entry for a symbol */
2937 {"tlsdesc", 0,
2938 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2939 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2940 BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20,
2941 0,
2942 0,
2943 0,
2944 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2945
2946 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2947 {"tlsdesc_lo12", 0,
2948 0, /* adr_type */
2949 0,
2950 0,
2951 0,
2952 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2953 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2954 0},
2955
2956 /* Get to the page containing GOT TLS entry for a symbol.
2957 The same as GD, we allocate two consecutive GOT slots
2958 for module index and module offset, the only difference
2959 with GD is the module offset should be initialized to
2960 zero without any outstanding runtime relocation. */
2961 {"tlsldm", 0,
2962 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2963 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2964 0,
2965 0,
2966 0,
2967 0,
2968 0},
2969
2970 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2971 {"tlsldm_lo12_nc", 0,
2972 0, /* adr_type */
2973 0,
2974 0,
2975 0,
2976 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2977 0,
2978 0},
2979
2980 /* 12 bit offset into the module TLS base address. */
2981 {"dtprel_lo12", 0,
2982 0, /* adr_type */
2983 0,
2984 0,
2985 0,
2986 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2987 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2988 0},
2989
2990 /* Same as dtprel_lo12, no overflow check. */
2991 {"dtprel_lo12_nc", 0,
2992 0, /* adr_type */
2993 0,
2994 0,
2995 0,
2996 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2997 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2998 0},
2999
3000 /* bits[23:12] of offset to the module TLS base address. */
3001 {"dtprel_hi12", 0,
3002 0, /* adr_type */
3003 0,
3004 0,
3005 0,
3006 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3007 0,
3008 0},
3009
3010 /* bits[15:0] of offset to the module TLS base address. */
3011 {"dtprel_g0", 0,
3012 0, /* adr_type */
3013 0,
3014 0,
3015 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3016 0,
3017 0,
3018 0},
3019
3020 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3021 {"dtprel_g0_nc", 0,
3022 0, /* adr_type */
3023 0,
3024 0,
3025 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3026 0,
3027 0,
3028 0},
3029
3030 /* bits[31:16] of offset to the module TLS base address. */
3031 {"dtprel_g1", 0,
3032 0, /* adr_type */
3033 0,
3034 0,
3035 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3036 0,
3037 0,
3038 0},
3039
3040 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3041 {"dtprel_g1_nc", 0,
3042 0, /* adr_type */
3043 0,
3044 0,
3045 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3046 0,
3047 0,
3048 0},
3049
3050 /* bits[47:32] of offset to the module TLS base address. */
3051 {"dtprel_g2", 0,
3052 0, /* adr_type */
3053 0,
3054 0,
3055 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3056 0,
3057 0,
3058 0},
3059
3060 /* Lower 16 bit offset into GOT entry for a symbol */
3061 {"tlsdesc_off_g0_nc", 0,
3062 0, /* adr_type */
3063 0,
3064 0,
3065 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3066 0,
3067 0,
3068 0},
3069
3070 /* Higher 16 bit offset into GOT entry for a symbol */
3071 {"tlsdesc_off_g1", 0,
3072 0, /* adr_type */
3073 0,
3074 0,
3075 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3076 0,
3077 0,
3078 0},
3079
3080 /* Get to the page containing GOT TLS entry for a symbol */
3081 {"gottprel", 0,
3082 0, /* adr_type */
3083 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3084 BFD_RELOC_MORELLO_TLSIE_ADR_GOTTPREL_PAGE20,
3085 0,
3086 0,
3087 0,
3088 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3089
3090 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3091 {"gottprel_lo12", 0,
3092 0, /* adr_type */
3093 0,
3094 0,
3095 0,
3096 BFD_RELOC_MORELLO_TLSIE_ADD_LO12,
3097 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3098 0},
3099
3100 /* Get tp offset for a symbol. */
3101 {"tprel", 0,
3102 0, /* adr_type */
3103 0,
3104 0,
3105 0,
3106 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3107 0,
3108 0},
3109
3110 /* Get tp offset for a symbol. */
3111 {"tprel_lo12", 0,
3112 0, /* adr_type */
3113 0,
3114 0,
3115 0,
3116 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3117 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3118 0},
3119
3120 /* Get tp offset for a symbol. */
3121 {"tprel_hi12", 0,
3122 0, /* adr_type */
3123 0,
3124 0,
3125 0,
3126 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3127 0,
3128 0},
3129
3130 /* Get tp offset for a symbol. */
3131 {"tprel_lo12_nc", 0,
3132 0, /* adr_type */
3133 0,
3134 0,
3135 0,
3136 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3137 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3138 0},
3139
3140 /* Most significant bits 32-47 of address/value: MOVZ. */
3141 {"tprel_g2", 0,
3142 0, /* adr_type */
3143 0,
3144 0,
3145 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3146 0,
3147 0,
3148 0},
3149
3150 /* Most significant bits 16-31 of address/value: MOVZ. */
3151 {"tprel_g1", 0,
3152 0, /* adr_type */
3153 0,
3154 0,
3155 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3156 0,
3157 0,
3158 0},
3159
3160 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3161 {"tprel_g1_nc", 0,
3162 0, /* adr_type */
3163 0,
3164 0,
3165 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3166 0,
3167 0,
3168 0},
3169
3170 /* Most significant bits 0-15 of address/value: MOVZ. */
3171 {"tprel_g0", 0,
3172 0, /* adr_type */
3173 0,
3174 0,
3175 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3176 0,
3177 0,
3178 0},
3179
3180 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3181 {"tprel_g0_nc", 0,
3182 0, /* adr_type */
3183 0,
3184 0,
3185 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3186 0,
3187 0,
3188 0},
3189
3190 /* 15bit offset from got entry to base address of GOT table. */
3191 {"gotpage_lo15", 0,
3192 0,
3193 0,
3194 0,
3195 0,
3196 0,
3197 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3198 0},
3199
3200 /* 14bit offset from got entry to base address of GOT table. */
3201 {"gotpage_lo14", 0,
3202 0,
3203 0,
3204 0,
3205 0,
3206 0,
3207 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3208 0},
3209
3210 /* Most significant bits 0-15 of the size of a symbol: MOVZ */
3211 {"size_g0", 0,
3212 0, /* adr_type */
3213 0,
3214 0,
3215 BFD_RELOC_MORELLO_MOVW_SIZE_G0,
3216 0,
3217 0,
3218 0},
3219
3220 /* Less significant bits 0-15 of the size of a symbol: MOVK, no check */
3221 {"size_g0_nc", 0,
3222 0, /* adr_type */
3223 0,
3224 0,
3225 BFD_RELOC_MORELLO_MOVW_SIZE_G0_NC,
3226 0,
3227 0,
3228 0},
3229
3230 /* Most significant bits 16-31 of the size of a symbol: MOVZ */
3231 {"size_g1", 0,
3232 0, /* adr_type */
3233 0,
3234 0,
3235 BFD_RELOC_MORELLO_MOVW_SIZE_G1,
3236 0,
3237 0,
3238 0},
3239
3240 /* Less significant bits 16-31 of the size of a symbol: MOVK, no check */
3241 {"size_g1_nc", 0,
3242 0, /* adr_type */
3243 0,
3244 0,
3245 BFD_RELOC_MORELLO_MOVW_SIZE_G1_NC,
3246 0,
3247 0,
3248 0},
3249
3250 /* Most significant bits 32-47 of the size of a symbol: MOVZ */
3251 {"size_g2", 0,
3252 0, /* adr_type */
3253 0,
3254 0,
3255 BFD_RELOC_MORELLO_MOVW_SIZE_G2,
3256 0,
3257 0,
3258 0},
3259
3260 /* Less significant bits 32-47 of the size of a symbol: MOVK, no check */
3261 {"size_g2_nc", 0,
3262 0, /* adr_type */
3263 0,
3264 0,
3265 BFD_RELOC_MORELLO_MOVW_SIZE_G2_NC,
3266 0,
3267 0,
3268 0},
3269
3270 /* Most significant bits 48-63 of the size of a symbol: MOVZ */
3271 {"size_g3", 0,
3272 0, /* adr_type */
3273 0,
3274 0,
3275 BFD_RELOC_MORELLO_MOVW_SIZE_G3,
3276 0,
3277 0,
3278 0},
3279
3280 };
3281
3282 /* Given the address of a pointer pointing to the textual name of a
3283 relocation as may appear in assembler source, attempt to find its
3284 details in reloc_table. The pointer will be updated to the character
3285 after the trailing colon. On failure, NULL will be returned;
3286 otherwise return the reloc_table_entry. */
3287
3288 static struct reloc_table_entry *
3289 find_reloc_table_entry (char **str)
3290 {
3291 unsigned int i;
3292 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3293 {
3294 int length = strlen (reloc_table[i].name);
3295
3296 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3297 && (*str)[length] == ':')
3298 {
3299 *str += (length + 1);
3300 return &reloc_table[i];
3301 }
3302 }
3303
3304 return NULL;
3305 }
3306
3307 /* Mode argument to parse_shift and parser_shifter_operand. */
3308 enum parse_shift_mode
3309 {
3310 SHIFTED_NONE, /* no shifter allowed */
3311 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3312 "#imm{,lsl #n}" */
3313 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3314 "#imm" */
3315 SHIFTED_LSL, /* bare "lsl #n" */
3316 SHIFTED_MUL, /* bare "mul #n" */
3317 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3318 SHIFTED_MUL_VL, /* "mul vl" */
3319 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3320 };
3321
3322 /* Parse a <shift> operator on an AArch64 data processing instruction.
3323 Return TRUE on success; otherwise return FALSE. */
3324 static bfd_boolean
3325 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3326 {
3327 const struct aarch64_name_value_pair *shift_op;
3328 enum aarch64_modifier_kind kind;
3329 expressionS exp;
3330 int exp_has_prefix;
3331 char *s = *str;
3332 char *p = s;
3333
3334 for (p = *str; ISALPHA (*p); p++)
3335 ;
3336
3337 if (p == *str)
3338 {
3339 set_syntax_error (_("shift expression expected"));
3340 return FALSE;
3341 }
3342
3343 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3344
3345 if (shift_op == NULL)
3346 {
3347 set_syntax_error (_("shift operator expected"));
3348 return FALSE;
3349 }
3350
3351 kind = aarch64_get_operand_modifier (shift_op);
3352
3353 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3354 {
3355 set_syntax_error (_("invalid use of 'MSL'"));
3356 return FALSE;
3357 }
3358
3359 if (kind == AARCH64_MOD_MUL
3360 && mode != SHIFTED_MUL
3361 && mode != SHIFTED_MUL_VL)
3362 {
3363 set_syntax_error (_("invalid use of 'MUL'"));
3364 return FALSE;
3365 }
3366
3367 switch (mode)
3368 {
3369 case SHIFTED_LOGIC_IMM:
3370 if (aarch64_extend_operator_p (kind))
3371 {
3372 set_syntax_error (_("extending shift is not permitted"));
3373 return FALSE;
3374 }
3375 break;
3376
3377 case SHIFTED_ARITH_IMM:
3378 if (kind == AARCH64_MOD_ROR)
3379 {
3380 set_syntax_error (_("'ROR' shift is not permitted"));
3381 return FALSE;
3382 }
3383 break;
3384
3385 case SHIFTED_LSL:
3386 if (kind != AARCH64_MOD_LSL)
3387 {
3388 set_syntax_error (_("only 'LSL' shift is permitted"));
3389 return FALSE;
3390 }
3391 break;
3392
3393 case SHIFTED_MUL:
3394 if (kind != AARCH64_MOD_MUL)
3395 {
3396 set_syntax_error (_("only 'MUL' is permitted"));
3397 return FALSE;
3398 }
3399 break;
3400
3401 case SHIFTED_MUL_VL:
3402 /* "MUL VL" consists of two separate tokens. Require the first
3403 token to be "MUL" and look for a following "VL". */
3404 if (kind == AARCH64_MOD_MUL)
3405 {
3406 skip_whitespace (p);
3407 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3408 {
3409 p += 2;
3410 kind = AARCH64_MOD_MUL_VL;
3411 break;
3412 }
3413 }
3414 set_syntax_error (_("only 'MUL VL' is permitted"));
3415 return FALSE;
3416
3417 case SHIFTED_REG_OFFSET:
3418 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3419 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3420 {
3421 set_fatal_syntax_error
3422 (_("invalid shift for the register offset addressing mode"));
3423 return FALSE;
3424 }
3425 break;
3426
3427 case SHIFTED_LSL_MSL:
3428 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3429 {
3430 set_syntax_error (_("invalid shift operator"));
3431 return FALSE;
3432 }
3433 break;
3434
3435 default:
3436 abort ();
3437 }
3438
3439 /* Whitespace can appear here if the next thing is a bare digit. */
3440 skip_whitespace (p);
3441
3442 /* Parse shift amount. */
3443 exp_has_prefix = 0;
3444 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3445 exp.X_op = O_absent;
3446 else
3447 {
3448 if (is_immediate_prefix (*p))
3449 {
3450 p++;
3451 exp_has_prefix = 1;
3452 }
3453 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3454 }
3455 if (kind == AARCH64_MOD_MUL_VL)
3456 /* For consistency, give MUL VL the same shift amount as an implicit
3457 MUL #1. */
3458 operand->shifter.amount = 1;
3459 else if (exp.X_op == O_absent)
3460 {
3461 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3462 {
3463 set_syntax_error (_("missing shift amount"));
3464 return FALSE;
3465 }
3466 operand->shifter.amount = 0;
3467 }
3468 else if (exp.X_op == O_big)
3469 {
3470 set_fatal_syntax_error (_("shift amount out of range"));
3471 return FALSE;
3472 }
3473 else if (exp.X_op != O_constant)
3474 {
3475 set_syntax_error (_("constant shift amount required"));
3476 return FALSE;
3477 }
3478 /* For parsing purposes, MUL #n has no inherent range. The range
3479 depends on the operand and will be checked by operand-specific
3480 routines. */
3481 else if (kind != AARCH64_MOD_MUL
3482 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3483 {
3484 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3485 return FALSE;
3486 }
3487 else
3488 {
3489 operand->shifter.amount = exp.X_add_number;
3490 operand->shifter.amount_present = 1;
3491 }
3492
3493 operand->shifter.operator_present = 1;
3494 operand->shifter.kind = kind;
3495
3496 *str = p;
3497 return TRUE;
3498 }
3499
3500 /* Parse a <shifter_operand> for a data processing instruction:
3501
3502 #<immediate>
3503 #<immediate>, LSL #imm
3504
3505 Validation of immediate operands is deferred to md_apply_fix.
3506
3507 Return TRUE on success; otherwise return FALSE. */
3508
3509 static bfd_boolean
3510 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3511 enum parse_shift_mode mode)
3512 {
3513 char *p;
3514
3515 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3516 return FALSE;
3517
3518 p = *str;
3519
3520 /* Accept an immediate expression. */
3521 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3522 return FALSE;
3523
3524 /* Accept optional LSL for arithmetic immediate values. */
3525 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3526 if (! parse_shift (&p, operand, SHIFTED_LSL))
3527 return FALSE;
3528
3529 /* Not accept any shifter for logical immediate values. */
3530 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3531 && parse_shift (&p, operand, mode))
3532 {
3533 set_syntax_error (_("unexpected shift operator"));
3534 return FALSE;
3535 }
3536
3537 *str = p;
3538 return TRUE;
3539 }
3540
3541 /* Parse a <shifter_operand> for a data processing instruction:
3542
3543 <Rm>
3544 <Rm>, <shift>
3545 #<immediate>
3546 #<immediate>, LSL #imm
3547
3548 where <shift> is handled by parse_shift above, and the last two
3549 cases are handled by the function above.
3550
3551 Validation of immediate operands is deferred to md_apply_fix.
3552
3553 Return TRUE on success; otherwise return FALSE. */
3554
3555 static bfd_boolean
3556 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3557 enum parse_shift_mode mode)
3558 {
3559 const reg_entry *reg;
3560 aarch64_opnd_qualifier_t qualifier;
3561 enum aarch64_operand_class opd_class
3562 = aarch64_get_operand_class (operand->type);
3563
3564 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3565 if (reg)
3566 {
3567 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3568 {
3569 set_syntax_error (_("unexpected register in the immediate operand"));
3570 return FALSE;
3571 }
3572
3573 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3574 {
3575 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3576 return FALSE;
3577 }
3578
3579 operand->reg.regno = reg->number;
3580 operand->qualifier = qualifier;
3581
3582 /* Accept optional shift operation on register. */
3583 if (! skip_past_comma (str))
3584 return TRUE;
3585
3586 if (! parse_shift (str, operand, mode))
3587 return FALSE;
3588
3589 return TRUE;
3590 }
3591 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3592 {
3593 set_syntax_error
3594 (_("integer register expected in the extended/shifted operand "
3595 "register"));
3596 return FALSE;
3597 }
3598
3599 /* We have a shifted immediate variable. */
3600 return parse_shifter_operand_imm (str, operand, mode);
3601 }
3602
3603 /* Return TRUE on success; return FALSE otherwise. */
3604
3605 static bfd_boolean
3606 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3607 enum parse_shift_mode mode)
3608 {
3609 char *p = *str;
3610
3611 /* Determine if we have the sequence of characters #: or just :
3612 coming next. If we do, then we check for a :rello: relocation
3613 modifier. If we don't, punt the whole lot to
3614 parse_shifter_operand. */
3615
3616 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3617 {
3618 struct reloc_table_entry *entry;
3619
3620 if (p[0] == '#')
3621 p += 2;
3622 else
3623 p++;
3624 *str = p;
3625
3626 /* Try to parse a relocation. Anything else is an error. */
3627 if (!(entry = find_reloc_table_entry (str)))
3628 {
3629 set_syntax_error (_("unknown relocation modifier"));
3630 return FALSE;
3631 }
3632
3633 if (entry->add_type == 0)
3634 {
3635 set_syntax_error
3636 (_("this relocation modifier is not allowed on this instruction"));
3637 return FALSE;
3638 }
3639 if (entry->add_type == BFD_RELOC_MORELLO_TLSIE_ADD_LO12 && !IS_C64)
3640 {
3641 set_syntax_error
3642 (_("this relocation modifier is not allowed in non-C64 mode"));
3643 return FALSE;
3644 }
3645
3646 /* Save str before we decompose it. */
3647 p = *str;
3648
3649 /* Next, we parse the expression. */
3650 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3651 return FALSE;
3652
3653 /* Record the relocation type (use the ADD variant here). */
3654 inst.reloc.type = entry->add_type;
3655 inst.reloc.pc_rel = entry->pc_rel;
3656
3657 /* If str is empty, we've reached the end, stop here. */
3658 if (**str == '\0')
3659 return TRUE;
3660
3661 /* Otherwise, we have a shifted reloc modifier, so rewind to
3662 recover the variable name and continue parsing for the shifter. */
3663 *str = p;
3664 return parse_shifter_operand_imm (str, operand, mode);
3665 }
3666
3667 return parse_shifter_operand (str, operand, mode);
3668 }
3669
3670 /* Parse all forms of an address expression. Information is written
3671 to *OPERAND and/or inst.reloc.
3672
3673 The A64 instruction set has the following addressing modes:
3674
3675 Offset
3676 [base] // in SIMD ld/st structure
3677 [base{,#0}] // in ld/st exclusive
3678 [base{,#imm}]
3679 [base,Xm{,LSL #imm}]
3680 [base,Xm,SXTX {#imm}]
3681 [base,Wm,(S|U)XTW {#imm}]
3682 Pre-indexed
3683 [base]! // in ldraa/ldrab exclusive
3684 [base,#imm]!
3685 Post-indexed
3686 [base],#imm
3687 [base],Xm // in SIMD ld/st structure
3688 PC-relative (literal)
3689 label
3690 SVE:
3691 [base,#imm,MUL VL]
3692 [base,Zm.D{,LSL #imm}]
3693 [base,Zm.S,(S|U)XTW {#imm}]
3694 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3695 [Zn.S,#imm]
3696 [Zn.D,#imm]
3697 [Zn.S{, Xm}]
3698 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3699 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3700 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3701
3702 (As a convenience, the notation "=immediate" is permitted in conjunction
3703 with the pc-relative literal load instructions to automatically place an
3704 immediate value or symbolic address in a nearby literal pool and generate
3705 a hidden label which references it.)
3706
3707 Upon a successful parsing, the address structure in *OPERAND will be
3708 filled in the following way:
3709
3710 .base_regno = <base>
3711 .offset.is_reg // 1 if the offset is a register
3712 .offset.imm = <imm>
3713 .offset.regno = <Rm>
3714
3715 For different addressing modes defined in the A64 ISA:
3716
3717 Offset
3718 .pcrel=0; .preind=1; .postind=0; .writeback=0
3719 Pre-indexed
3720 .pcrel=0; .preind=1; .postind=0; .writeback=1
3721 Post-indexed
3722 .pcrel=0; .preind=0; .postind=1; .writeback=1
3723 PC-relative (literal)
3724 .pcrel=1; .preind=1; .postind=0; .writeback=0
3725
3726 The shift/extension information, if any, will be stored in .shifter.
3727 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3728 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3729 corresponding register.
3730
3731 BASE_TYPE says which types of base register should be accepted and
3732 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3733 is the type of shifter that is allowed for immediate offsets,
3734 or SHIFTED_NONE if none.
3735
3736 In all other respects, it is the caller's responsibility to check
3737 for addressing modes not supported by the instruction, and to set
3738 inst.reloc.type. */
3739
3740 static bfd_boolean
3741 parse_address_main (char **str, aarch64_opnd_info *operand,
3742 aarch64_opnd_qualifier_t *base_qualifier,
3743 aarch64_opnd_qualifier_t *offset_qualifier,
3744 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3745 enum parse_shift_mode imm_shift_mode)
3746 {
3747 char *p = *str;
3748 const reg_entry *reg;
3749 expressionS *exp = &inst.reloc.exp;
3750
3751 *base_qualifier = AARCH64_OPND_QLF_NIL;
3752 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3753 if (! skip_past_char (&p, '['))
3754 {
3755 /* =immediate or label. */
3756 operand->addr.pcrel = 1;
3757 operand->addr.preind = 1;
3758
3759 /* #:<reloc_op>:<symbol> */
3760 skip_past_char (&p, '#');
3761 if (skip_past_char (&p, ':'))
3762 {
3763 bfd_reloc_code_real_type ty;
3764 struct reloc_table_entry *entry;
3765
3766 /* Try to parse a relocation modifier. Anything else is
3767 an error. */
3768 entry = find_reloc_table_entry (&p);
3769 if (! entry)
3770 {
3771 set_syntax_error (_("unknown relocation modifier"));
3772 return FALSE;
3773 }
3774
3775 switch (operand->type)
3776 {
3777 case AARCH64_OPND_ADDR_PCREL21:
3778 /* adr */
3779 ty = entry->adr_type;
3780 break;
3781
3782 default:
3783 ty = entry->ld_literal_type;
3784 break;
3785 }
3786
3787 if (ty == 0)
3788 {
3789 set_syntax_error
3790 (_("this relocation modifier is not allowed on this "
3791 "instruction"));
3792 return FALSE;
3793 }
3794
3795 /* #:<reloc_op>: */
3796 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3797 {
3798 set_syntax_error (_("invalid relocation expression"));
3799 return FALSE;
3800 }
3801
3802 /* #:<reloc_op>:<expr> */
3803 /* Record the relocation type. */
3804 inst.reloc.type = ty;
3805 inst.reloc.pc_rel = entry->pc_rel;
3806 }
3807 else
3808 {
3809
3810 if (skip_past_char (&p, '='))
3811 /* =immediate; need to generate the literal in the literal pool. */
3812 inst.gen_lit_pool = 1;
3813
3814 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3815 {
3816 set_syntax_error (_("invalid address"));
3817 return FALSE;
3818 }
3819 }
3820
3821 *str = p;
3822 return TRUE;
3823 }
3824
3825 /* [ */
3826
3827 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3828 if (!reg || !aarch64_check_reg_type (reg, base_type))
3829 {
3830 set_syntax_error (_(get_reg_expected_msg (base_type)));
3831 return FALSE;
3832 }
3833 operand->addr.base_regno = reg->number;
3834
3835 /* [Xn */
3836 if (skip_past_comma (&p))
3837 {
3838 /* [Xn, */
3839 operand->addr.preind = 1;
3840
3841 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3842 if (reg)
3843 {
3844 if (!aarch64_check_reg_type (reg, offset_type))
3845 {
3846 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3847 return FALSE;
3848 }
3849
3850 /* [Xn,Rm */
3851 operand->addr.offset.regno = reg->number;
3852 operand->addr.offset.is_reg = 1;
3853 /* Shifted index. */
3854 if (skip_past_comma (&p))
3855 {
3856 /* [Xn,Rm, */
3857 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3858 /* Use the diagnostics set in parse_shift, so not set new
3859 error message here. */
3860 return FALSE;
3861 }
3862 /* We only accept:
3863 [base,Xm] # For vector plus scalar SVE2 indexing.
3864 [base,Xm{,LSL #imm}]
3865 [base,Xm,SXTX {#imm}]
3866 [base,Wm,(S|U)XTW {#imm}] */
3867 if (operand->shifter.kind == AARCH64_MOD_NONE
3868 || operand->shifter.kind == AARCH64_MOD_LSL
3869 || operand->shifter.kind == AARCH64_MOD_SXTX)
3870 {
3871 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3872 {
3873 set_syntax_error (_("invalid use of 32-bit register offset"));
3874 return FALSE;
3875 }
3876 if (aarch64_get_qualifier_esize (*base_qualifier)
3877 != aarch64_get_qualifier_esize (*offset_qualifier)
3878 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3879 || *base_qualifier != AARCH64_OPND_QLF_S_S
3880 || *offset_qualifier != AARCH64_OPND_QLF_X)
3881 /* Capabilities can have W as well as X registers as
3882 offsets. */
3883 && (*base_qualifier != AARCH64_OPND_QLF_CA))
3884 {
3885 set_syntax_error (_("offset has different size from base"));
3886 return FALSE;
3887 }
3888 }
3889 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3890 {
3891 set_syntax_error (_("invalid use of 64-bit register offset"));
3892 return FALSE;
3893 }
3894 }
3895 else
3896 {
3897 /* [Xn,#:<reloc_op>:<symbol> */
3898 skip_past_char (&p, '#');
3899 if (skip_past_char (&p, ':'))
3900 {
3901 struct reloc_table_entry *entry;
3902
3903 /* Try to parse a relocation modifier. Anything else is
3904 an error. */
3905 if (!(entry = find_reloc_table_entry (&p)))
3906 {
3907 set_syntax_error (_("unknown relocation modifier"));
3908 return FALSE;
3909 }
3910
3911 if (entry->ldst_type == 0)
3912 {
3913 set_syntax_error
3914 (_("this relocation modifier is not allowed on this "
3915 "instruction"));
3916 return FALSE;
3917 }
3918
3919 /* [Xn,#:<reloc_op>: */
3920 /* We now have the group relocation table entry corresponding to
3921 the name in the assembler source. Next, we parse the
3922 expression. */
3923 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3924 {
3925 set_syntax_error (_("invalid relocation expression"));
3926 return FALSE;
3927 }
3928
3929 /* [Xn,#:<reloc_op>:<expr> */
3930 /* Record the load/store relocation type. */
3931 inst.reloc.type = entry->ldst_type;
3932 inst.reloc.pc_rel = entry->pc_rel;
3933 }
3934 else
3935 {
3936 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3937 {
3938 set_syntax_error (_("invalid expression in the address"));
3939 return FALSE;
3940 }
3941 /* [Xn,<expr> */
3942 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3943 /* [Xn,<expr>,<shifter> */
3944 if (! parse_shift (&p, operand, imm_shift_mode))
3945 return FALSE;
3946 }
3947 }
3948 }
3949
3950 if (! skip_past_char (&p, ']'))
3951 {
3952 set_syntax_error (_("']' expected"));
3953 return FALSE;
3954 }
3955
3956 if (skip_past_char (&p, '!'))
3957 {
3958 if (operand->addr.preind && operand->addr.offset.is_reg)
3959 {
3960 set_syntax_error (_("register offset not allowed in pre-indexed "
3961 "addressing mode"));
3962 return FALSE;
3963 }
3964 /* [Xn]! */
3965 operand->addr.writeback = 1;
3966 }
3967 else if (skip_past_comma (&p))
3968 {
3969 /* [Xn], */
3970 operand->addr.postind = 1;
3971 operand->addr.writeback = 1;
3972
3973 if (operand->addr.preind)
3974 {
3975 set_syntax_error (_("cannot combine pre- and post-indexing"));
3976 return FALSE;
3977 }
3978
3979 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3980 if (reg)
3981 {
3982 /* [Xn],Xm */
3983 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3984 {
3985 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3986 return FALSE;
3987 }
3988
3989 operand->addr.offset.regno = reg->number;
3990 operand->addr.offset.is_reg = 1;
3991 }
3992 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3993 {
3994 /* [Xn],#expr */
3995 set_syntax_error (_("invalid expression in the address"));
3996 return FALSE;
3997 }
3998 }
3999
4000 /* If at this point neither .preind nor .postind is set, we have a
4001 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4002 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4003 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4004 [Zn.<T>, xzr]. */
4005 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4006 {
4007 if (operand->addr.writeback)
4008 {
4009 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4010 {
4011 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4012 operand->addr.offset.is_reg = 0;
4013 operand->addr.offset.imm = 0;
4014 operand->addr.preind = 1;
4015 }
4016 else
4017 {
4018 /* Reject [Rn]! */
4019 set_syntax_error (_("missing offset in the pre-indexed address"));
4020 return FALSE;
4021 }
4022 }
4023 else
4024 {
4025 operand->addr.preind = 1;
4026 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4027 {
4028 operand->addr.offset.is_reg = 1;
4029 operand->addr.offset.regno = REG_ZR;
4030 *offset_qualifier = AARCH64_OPND_QLF_X;
4031 }
4032 else
4033 {
4034 inst.reloc.exp.X_op = O_constant;
4035 inst.reloc.exp.X_add_number = 0;
4036 }
4037 }
4038 }
4039
4040 *str = p;
4041 return TRUE;
4042 }
4043
4044 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4045 on success. */
4046 static bfd_boolean
4047 parse_address (char **str, aarch64_opnd_info *operand)
4048 {
4049 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4050
4051 aarch64_reg_type base;
4052
4053 if (AARCH64_CPU_HAS_FEATURE (cpu_variant, AARCH64_FEATURE_C64))
4054 base = REG_TYPE_CA_N_SP;
4055 else
4056 base = REG_TYPE_R64_SP;
4057
4058 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4059 base, REG_TYPE_R_Z, SHIFTED_NONE);
4060 }
4061
4062 /* Parse a base capability address. Return TRUE on success. */
4063 static bfd_boolean
4064 parse_cap_address (char **str, aarch64_opnd_info *operand,
4065 enum aarch64_insn_class class)
4066 {
4067 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4068 aarch64_reg_type base;
4069
4070 if (AARCH64_CPU_HAS_FEATURE (cpu_variant, AARCH64_FEATURE_C64)
4071 && class != br_capaddr)
4072 base = REG_TYPE_R64_SP;
4073 else
4074 base = REG_TYPE_CA_N_SP;
4075
4076 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4077 base, REG_TYPE_R_Z, SHIFTED_NONE);
4078 }
4079
4080 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4081 The arguments have the same meaning as for parse_address_main.
4082 Return TRUE on success. */
4083 static bfd_boolean
4084 parse_sve_address (char **str, aarch64_opnd_info *operand,
4085 aarch64_opnd_qualifier_t *base_qualifier,
4086 aarch64_opnd_qualifier_t *offset_qualifier)
4087 {
4088 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4089 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4090 SHIFTED_MUL_VL);
4091 }
4092
4093 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4094 Return TRUE on success; otherwise return FALSE. */
4095 static bfd_boolean
4096 parse_half (char **str, int *internal_fixup_p)
4097 {
4098 char *p = *str;
4099
4100 skip_past_char (&p, '#');
4101
4102 gas_assert (internal_fixup_p);
4103 *internal_fixup_p = 0;
4104
4105 if (*p == ':')
4106 {
4107 struct reloc_table_entry *entry;
4108
4109 /* Try to parse a relocation. Anything else is an error. */
4110 ++p;
4111 if (!(entry = find_reloc_table_entry (&p)))
4112 {
4113 set_syntax_error (_("unknown relocation modifier"));
4114 return FALSE;
4115 }
4116
4117 if (entry->movw_type == 0)
4118 {
4119 set_syntax_error
4120 (_("this relocation modifier is not allowed on this instruction"));
4121 return FALSE;
4122 }
4123
4124 inst.reloc.type = entry->movw_type;
4125 }
4126 else
4127 *internal_fixup_p = 1;
4128
4129 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
4130 return FALSE;
4131
4132 bfd_boolean is_morello_size_reloc
4133 = (inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G0
4134 || inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G0_NC
4135 || inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G1
4136 || inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G1_NC
4137 || inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G2
4138 || inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G2_NC
4139 || inst.reloc.type == BFD_RELOC_MORELLO_MOVW_SIZE_G3);
4140 if (inst.reloc.exp.X_add_symbol == 0 && is_morello_size_reloc)
4141 {
4142 set_syntax_error
4143 (_("size relocation is not allowed on non-symbol expression"));
4144 return FALSE;
4145 }
4146 if (is_morello_size_reloc && !IS_C64)
4147 {
4148 set_syntax_error (_("size relocation is not allowed in non-C64 mode"));
4149 return FALSE;
4150 }
4151
4152 *str = p;
4153 return TRUE;
4154 }
4155
4156 /* Parse an operand for an ADRP instruction:
4157 ADRP <Xd>, <label>
4158 Return TRUE on success; otherwise return FALSE. */
4159
4160 static bfd_boolean
4161 parse_adrp (char **str)
4162 {
4163 char *p;
4164
4165 p = *str;
4166 if (*p == ':')
4167 {
4168 struct reloc_table_entry *entry;
4169 bfd_reloc_code_real_type adrp_type;
4170
4171 /* Try to parse a relocation. Anything else is an error. */
4172 ++p;
4173 if (!(entry = find_reloc_table_entry (&p)))
4174 {
4175 set_syntax_error (_("unknown relocation modifier"));
4176 return FALSE;
4177 }
4178
4179 adrp_type = (AARCH64_CPU_HAS_FEATURE (cpu_variant, AARCH64_FEATURE_C64)
4180 ? entry->c64_adrp_type : entry->adrp_type);
4181
4182 if (adrp_type == 0)
4183 {
4184 set_syntax_error
4185 (_("this relocation modifier is not allowed on this instruction"));
4186 return FALSE;
4187 }
4188
4189 inst.reloc.type = adrp_type;
4190 }
4191 else
4192 inst.reloc.type = (AARCH64_CPU_HAS_FEATURE (cpu_variant,
4193 AARCH64_FEATURE_C64)
4194 ? BFD_RELOC_MORELLO_ADR_HI20_PCREL
4195 : BFD_RELOC_AARCH64_ADR_HI21_PCREL);
4196
4197 inst.reloc.pc_rel = 1;
4198
4199 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
4200 return FALSE;
4201
4202 *str = p;
4203 return TRUE;
4204 }
4205
4206 /* Miscellaneous. */
4207
4208 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4209 of SIZE tokens in which index I gives the token for field value I,
4210 or is null if field value I is invalid. REG_TYPE says which register
4211 names should be treated as registers rather than as symbolic immediates.
4212
4213 Return true on success, moving *STR past the operand and storing the
4214 field value in *VAL. */
4215
4216 static int
4217 parse_enum_string (char **str, int64_t *val, const char *const *array,
4218 size_t size, aarch64_reg_type reg_type)
4219 {
4220 expressionS exp;
4221 char *p, *q;
4222 size_t i;
4223
4224 /* Match C-like tokens. */
4225 p = q = *str;
4226 while (ISALNUM (*q))
4227 q++;
4228
4229 for (i = 0; i < size; ++i)
4230 if (array[i]
4231 && strncasecmp (array[i], p, q - p) == 0
4232 && array[i][q - p] == 0)
4233 {
4234 *val = i;
4235 *str = q;
4236 return TRUE;
4237 }
4238
4239 if (!parse_immediate_expression (&p, &exp, reg_type))
4240 return FALSE;
4241
4242 if (exp.X_op == O_constant
4243 && (uint64_t) exp.X_add_number < size)
4244 {
4245 *val = exp.X_add_number;
4246 *str = p;
4247 return TRUE;
4248 }
4249
4250 /* Use the default error for this operand. */
4251 return FALSE;
4252 }
4253
4254 /* Parse an option for a preload instruction. Returns the encoding for the
4255 option, or PARSE_FAIL. */
4256
4257 static int
4258 parse_pldop (char **str)
4259 {
4260 char *p, *q;
4261 const struct aarch64_name_value_pair *o;
4262
4263 p = q = *str;
4264 while (ISALNUM (*q))
4265 q++;
4266
4267 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4268 if (!o)
4269 return PARSE_FAIL;
4270
4271 *str = q;
4272 return o->value;
4273 }
4274
4275 /* Parse an option for a barrier instruction. Returns the encoding for the
4276 option, or PARSE_FAIL. */
4277
4278 static int
4279 parse_barrier (char **str)
4280 {
4281 char *p, *q;
4282 const struct aarch64_name_value_pair *o;
4283
4284 p = q = *str;
4285 while (ISALPHA (*q))
4286 q++;
4287
4288 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4289 if (!o)
4290 return PARSE_FAIL;
4291
4292 *str = q;
4293 return o->value;
4294 }
4295
4296 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4297 return 0 if successful. Otherwise return PARSE_FAIL. */
4298
4299 static int
4300 parse_barrier_psb (char **str,
4301 const struct aarch64_name_value_pair ** hint_opt)
4302 {
4303 char *p, *q;
4304 const struct aarch64_name_value_pair *o;
4305
4306 p = q = *str;
4307 while (ISALPHA (*q))
4308 q++;
4309
4310 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4311 if (!o)
4312 {
4313 set_fatal_syntax_error
4314 ( _("unknown or missing option to PSB/TSB"));
4315 return PARSE_FAIL;
4316 }
4317
4318 if (o->value != 0x11)
4319 {
4320 /* PSB only accepts option name 'CSYNC'. */
4321 set_syntax_error
4322 (_("the specified option is not accepted for PSB/TSB"));
4323 return PARSE_FAIL;
4324 }
4325
4326 *str = q;
4327 *hint_opt = o;
4328 return 0;
4329 }
4330
4331 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4332 return 0 if successful. Otherwise return PARSE_FAIL. */
4333
4334 static int
4335 parse_bti_operand (char **str,
4336 const struct aarch64_name_value_pair ** hint_opt)
4337 {
4338 char *p, *q;
4339 const struct aarch64_name_value_pair *o;
4340
4341 p = q = *str;
4342 while (ISALPHA (*q))
4343 q++;
4344
4345 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4346 if (!o)
4347 {
4348 set_fatal_syntax_error
4349 ( _("unknown option to BTI"));
4350 return PARSE_FAIL;
4351 }
4352
4353 switch (o->value)
4354 {
4355 /* Valid BTI operands. */
4356 case HINT_OPD_C:
4357 case HINT_OPD_J:
4358 case HINT_OPD_JC:
4359 break;
4360
4361 default:
4362 set_syntax_error
4363 (_("unknown option to BTI"));
4364 return PARSE_FAIL;
4365 }
4366
4367 *str = q;
4368 *hint_opt = o;
4369 return 0;
4370 }
4371
4372 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4373 Returns the encoding for the option, or PARSE_FAIL.
4374
4375 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4376 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4377
4378 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4379 field, otherwise as a system register.
4380 */
4381
4382 static int
4383 parse_sys_reg (const aarch64_opcode *opcode, char **str, htab_t sys_regs,
4384 int imple_defined_p, int pstatefield_p,
4385 uint32_t* flags)
4386 {
4387 char *p, *q;
4388 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4389 const aarch64_sys_reg *o;
4390 int value;
4391
4392 p = buf;
4393 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4394 if (p < buf + (sizeof (buf) - 1))
4395 *p++ = TOLOWER (*q);
4396 *p = '\0';
4397
4398 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4399 valid system register. This is enforced by construction of the hash
4400 table. */
4401 if (p - buf != q - *str)
4402 return PARSE_FAIL;
4403
4404 o = str_hash_find (sys_regs, buf);
4405 if (!o)
4406 {
4407 if (!imple_defined_p)
4408 return PARSE_FAIL;
4409 else
4410 {
4411 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4412 unsigned int op0, op1, cn, cm, op2;
4413
4414 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4415 != 5)
4416 return PARSE_FAIL;
4417 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4418 return PARSE_FAIL;
4419 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4420 if (flags)
4421 *flags = 0;
4422 }
4423 }
4424 else
4425 {
4426 if (!aarch64_sys_reg_capreg_supported_p (opcode->iclass, o))
4427 return PARSE_FAIL;
4428
4429 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4430 as_bad (_("selected processor does not support PSTATE field "
4431 "name '%s'"), buf);
4432 if (!pstatefield_p
4433 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4434 o->value, o->flags, o->features))
4435 as_bad (_("selected processor does not support system register "
4436 "name '%s'"), buf);
4437 if (aarch64_sys_reg_deprecated_p (o->flags))
4438 as_warn (_("system register name '%s' is deprecated and may be "
4439 "removed in a future release"), buf);
4440 value = o->value;
4441 if (flags)
4442 *flags = o->flags;
4443 }
4444
4445 *str = q;
4446 return value;
4447 }
4448
4449 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4450 for the option, or NULL. */
4451
4452 static const aarch64_sys_ins_reg *
4453 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4454 {
4455 char *p, *q;
4456 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4457 const aarch64_sys_ins_reg *o;
4458
4459 p = buf;
4460 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4461 if (p < buf + (sizeof (buf) - 1))
4462 *p++ = TOLOWER (*q);
4463 *p = '\0';
4464
4465 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4466 valid system register. This is enforced by construction of the hash
4467 table. */
4468 if (p - buf != q - *str)
4469 return NULL;
4470
4471 o = str_hash_find (sys_ins_regs, buf);
4472 if (!o)
4473 return NULL;
4474
4475 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4476 o->name, o->value, o->flags, 0))
4477 as_bad (_("selected processor does not support system register "
4478 "name '%s'"), buf);
4479 if (aarch64_sys_reg_deprecated_p (o->flags))
4480 as_warn (_("system register name '%s' is deprecated and may be "
4481 "removed in a future release"), buf);
4482
4483 *str = q;
4484 return o;
4485 }
4486 \f
4487 #define po_char_or_fail(chr) do { \
4488 if (! skip_past_char (&str, chr)) \
4489 goto failure; \
4490 } while (0)
4491
4492 #define po_reg_or_fail(regtype) do { \
4493 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4494 if (val == PARSE_FAIL) \
4495 { \
4496 set_default_error (); \
4497 goto failure; \
4498 } \
4499 } while (0)
4500
4501 #define po_int_reg_or_fail(reg_type) do { \
4502 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4503 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4504 { \
4505 set_default_error (); \
4506 goto failure; \
4507 } \
4508 info->reg.regno = reg->number; \
4509 info->qualifier = qualifier; \
4510 } while (0)
4511
4512 #define po_imm_nc_or_fail() do { \
4513 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4514 goto failure; \
4515 } while (0)
4516
4517 #define po_imm_or_fail(min, max) do { \
4518 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4519 goto failure; \
4520 if (val < min || val > max) \
4521 { \
4522 set_fatal_syntax_error (_("immediate value out of range "\
4523 #min " to "#max)); \
4524 goto failure; \
4525 } \
4526 } while (0)
4527
4528 #define po_enum_or_fail(array) do { \
4529 if (!parse_enum_string (&str, &val, array, \
4530 ARRAY_SIZE (array), imm_reg_type)) \
4531 goto failure; \
4532 } while (0)
4533
4534 #define po_misc_or_fail(expr) do { \
4535 if (!expr) \
4536 goto failure; \
4537 } while (0)
4538 \f
4539 /* encode the 12-bit imm field of Add/sub immediate */
4540 static inline uint32_t
4541 encode_addsub_imm (uint32_t imm)
4542 {
4543 return imm << 10;
4544 }
4545
4546 /* encode the shift amount field of Add/sub immediate */
4547 static inline uint32_t
4548 encode_addsub_imm_shift_amount (uint32_t cnt)
4549 {
4550 return cnt << 22;
4551 }
4552
4553
4554 /* encode the imm field of Adr instruction */
4555 static inline uint32_t
4556 encode_adr_imm (uint32_t imm)
4557 {
4558 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4559 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4560 }
4561
4562 /* encode the immediate field of Move wide immediate */
4563 static inline uint32_t
4564 encode_movw_imm (uint32_t imm)
4565 {
4566 return imm << 5;
4567 }
4568
4569 /* encode the 26-bit offset of unconditional branch */
4570 static inline uint32_t
4571 encode_branch_ofs_26 (uint32_t ofs)
4572 {
4573 return ofs & ((1 << 26) - 1);
4574 }
4575
4576 /* encode the 19-bit offset of conditional branch and compare & branch */
4577 static inline uint32_t
4578 encode_cond_branch_ofs_19 (uint32_t ofs)
4579 {
4580 return (ofs & ((1 << 19) - 1)) << 5;
4581 }
4582
4583 /* encode the 17-bit offset of ld literal */
4584 static inline uint32_t
4585 encode_ld_lit_ofs_17 (uint32_t ofs)
4586 {
4587 return (ofs & ((1 << 17) - 1)) << 5;
4588 }
4589
4590 /* encode the 19-bit offset of ld literal */
4591 static inline uint32_t
4592 encode_ld_lit_ofs_19 (uint32_t ofs)
4593 {
4594 return (ofs & ((1 << 19) - 1)) << 5;
4595 }
4596
4597 /* Encode the 14-bit offset of test & branch. */
4598 static inline uint32_t
4599 encode_tst_branch_ofs_14 (uint32_t ofs)
4600 {
4601 return (ofs & ((1 << 14) - 1)) << 5;
4602 }
4603
4604 /* Encode the 16-bit imm field of svc/hvc/smc. */
4605 static inline uint32_t
4606 encode_svc_imm (uint32_t imm)
4607 {
4608 return imm << 5;
4609 }
4610
4611 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4612 static inline uint32_t
4613 reencode_addsub_switch_add_sub (uint32_t opcode)
4614 {
4615 return opcode ^ (1 << 30);
4616 }
4617
4618 static inline uint32_t
4619 reencode_movzn_to_movz (uint32_t opcode)
4620 {
4621 return opcode | (1 << 30);
4622 }
4623
4624 static inline uint32_t
4625 reencode_movzn_to_movn (uint32_t opcode)
4626 {
4627 return opcode & ~(1 << 30);
4628 }
4629
4630 /* Overall per-instruction processing. */
4631
4632 /* We need to be able to fix up arbitrary expressions in some statements.
4633 This is so that we can handle symbols that are an arbitrary distance from
4634 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4635 which returns part of an address in a form which will be valid for
4636 a data instruction. We do this by pushing the expression into a symbol
4637 in the expr_section, and creating a fix for that. */
4638
4639 static fixS *
4640 fix_new_aarch64 (fragS * frag,
4641 int where,
4642 short int size,
4643 expressionS * exp,
4644 int pc_rel,
4645 int reloc)
4646 {
4647 fixS *new_fix;
4648
4649 switch (exp->X_op)
4650 {
4651 case O_constant:
4652 case O_symbol:
4653 case O_add:
4654 case O_subtract:
4655 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4656 break;
4657
4658 default:
4659 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4660 pc_rel, reloc);
4661 break;
4662 }
4663 return new_fix;
4664 }
4665 \f
4666 /* Diagnostics on operands errors. */
4667
4668 /* By default, output verbose error message.
4669 Disable the verbose error message by -mno-verbose-error. */
4670 static int verbose_error_p = 1;
4671
4672 #ifdef DEBUG_AARCH64
4673 /* N.B. this is only for the purpose of debugging. */
4674 const char* operand_mismatch_kind_names[] =
4675 {
4676 "AARCH64_OPDE_NIL",
4677 "AARCH64_OPDE_RECOVERABLE",
4678 "AARCH64_OPDE_SYNTAX_ERROR",
4679 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4680 "AARCH64_OPDE_INVALID_VARIANT",
4681 "AARCH64_OPDE_OUT_OF_RANGE",
4682 "AARCH64_OPDE_UNALIGNED",
4683 "AARCH64_OPDE_REG_LIST",
4684 "AARCH64_OPDE_OTHER_ERROR",
4685 };
4686 #endif /* DEBUG_AARCH64 */
4687
4688 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4689
4690 When multiple errors of different kinds are found in the same assembly
4691 line, only the error of the highest severity will be picked up for
4692 issuing the diagnostics. */
4693
4694 static inline bfd_boolean
4695 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4696 enum aarch64_operand_error_kind rhs)
4697 {
4698 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4699 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4700 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4701 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4702 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4703 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4704 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4705 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4706 return lhs > rhs;
4707 }
4708
4709 /* Helper routine to get the mnemonic name from the assembly instruction
4710 line; should only be called for the diagnosis purpose, as there is
4711 string copy operation involved, which may affect the runtime
4712 performance if used in elsewhere. */
4713
4714 static const char*
4715 get_mnemonic_name (const char *str)
4716 {
4717 static char mnemonic[32];
4718 char *ptr;
4719
4720 /* Get the first 15 bytes and assume that the full name is included. */
4721 strncpy (mnemonic, str, 31);
4722 mnemonic[31] = '\0';
4723
4724 /* Scan up to the end of the mnemonic, which must end in white space,
4725 '.', or end of string. */
4726 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4727 ;
4728
4729 *ptr = '\0';
4730
4731 /* Append '...' to the truncated long name. */
4732 if (ptr - mnemonic == 31)
4733 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4734
4735 return mnemonic;
4736 }
4737
4738 static void
4739 reset_aarch64_instruction (aarch64_instruction *instruction)
4740 {
4741 memset (instruction, '\0', sizeof (aarch64_instruction));
4742 instruction->reloc.type = BFD_RELOC_UNUSED;
4743 }
4744
4745 /* Data structures storing one user error in the assembly code related to
4746 operands. */
4747
4748 struct operand_error_record
4749 {
4750 const aarch64_opcode *opcode;
4751 aarch64_operand_error detail;
4752 struct operand_error_record *next;
4753 };
4754
4755 typedef struct operand_error_record operand_error_record;
4756
4757 struct operand_errors
4758 {
4759 operand_error_record *head;
4760 operand_error_record *tail;
4761 };
4762
4763 typedef struct operand_errors operand_errors;
4764
4765 /* Top-level data structure reporting user errors for the current line of
4766 the assembly code.
4767 The way md_assemble works is that all opcodes sharing the same mnemonic
4768 name are iterated to find a match to the assembly line. In this data
4769 structure, each of the such opcodes will have one operand_error_record
4770 allocated and inserted. In other words, excessive errors related with
4771 a single opcode are disregarded. */
4772 operand_errors operand_error_report;
4773
4774 /* Free record nodes. */
4775 static operand_error_record *free_opnd_error_record_nodes = NULL;
4776
4777 /* Initialize the data structure that stores the operand mismatch
4778 information on assembling one line of the assembly code. */
4779 static void
4780 init_operand_error_report (void)
4781 {
4782 if (operand_error_report.head != NULL)
4783 {
4784 gas_assert (operand_error_report.tail != NULL);
4785 operand_error_report.tail->next = free_opnd_error_record_nodes;
4786 free_opnd_error_record_nodes = operand_error_report.head;
4787 operand_error_report.head = NULL;
4788 operand_error_report.tail = NULL;
4789 return;
4790 }
4791 gas_assert (operand_error_report.tail == NULL);
4792 }
4793
4794 /* Return TRUE if some operand error has been recorded during the
4795 parsing of the current assembly line using the opcode *OPCODE;
4796 otherwise return FALSE. */
4797 static inline bfd_boolean
4798 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4799 {
4800 operand_error_record *record = operand_error_report.head;
4801 return record && record->opcode == opcode;
4802 }
4803
4804 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4805 OPCODE field is initialized with OPCODE.
4806 N.B. only one record for each opcode, i.e. the maximum of one error is
4807 recorded for each instruction template. */
4808
4809 static void
4810 add_operand_error_record (const operand_error_record* new_record)
4811 {
4812 const aarch64_opcode *opcode = new_record->opcode;
4813 operand_error_record* record = operand_error_report.head;
4814
4815 /* The record may have been created for this opcode. If not, we need
4816 to prepare one. */
4817 if (! opcode_has_operand_error_p (opcode))
4818 {
4819 /* Get one empty record. */
4820 if (free_opnd_error_record_nodes == NULL)
4821 {
4822 record = XNEW (operand_error_record);
4823 }
4824 else
4825 {
4826 record = free_opnd_error_record_nodes;
4827 free_opnd_error_record_nodes = record->next;
4828 }
4829 record->opcode = opcode;
4830 /* Insert at the head. */
4831 record->next = operand_error_report.head;
4832 operand_error_report.head = record;
4833 if (operand_error_report.tail == NULL)
4834 operand_error_report.tail = record;
4835 }
4836 else if (record->detail.kind != AARCH64_OPDE_NIL
4837 && record->detail.index <= new_record->detail.index
4838 && operand_error_higher_severity_p (record->detail.kind,
4839 new_record->detail.kind))
4840 {
4841 /* In the case of multiple errors found on operands related with a
4842 single opcode, only record the error of the leftmost operand and
4843 only if the error is of higher severity. */
4844 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4845 " the existing error %s on operand %d",
4846 operand_mismatch_kind_names[new_record->detail.kind],
4847 new_record->detail.index,
4848 operand_mismatch_kind_names[record->detail.kind],
4849 record->detail.index);
4850 return;
4851 }
4852
4853 record->detail = new_record->detail;
4854 }
4855
4856 static inline void
4857 record_operand_error_info (const aarch64_opcode *opcode,
4858 aarch64_operand_error *error_info)
4859 {
4860 operand_error_record record;
4861 record.opcode = opcode;
4862 record.detail = *error_info;
4863 add_operand_error_record (&record);
4864 }
4865
4866 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4867 error message *ERROR, for operand IDX (count from 0). */
4868
4869 static void
4870 record_operand_error (const aarch64_opcode *opcode, int idx,
4871 enum aarch64_operand_error_kind kind,
4872 const char* error)
4873 {
4874 aarch64_operand_error info;
4875 memset(&info, 0, sizeof (info));
4876 info.index = idx;
4877 info.kind = kind;
4878 info.error = error;
4879 info.non_fatal = FALSE;
4880 record_operand_error_info (opcode, &info);
4881 }
4882
4883 static void
4884 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4885 enum aarch64_operand_error_kind kind,
4886 const char* error, const int *extra_data)
4887 {
4888 aarch64_operand_error info;
4889 info.index = idx;
4890 info.kind = kind;
4891 info.error = error;
4892 info.data[0] = extra_data[0];
4893 info.data[1] = extra_data[1];
4894 info.data[2] = extra_data[2];
4895 info.non_fatal = FALSE;
4896 record_operand_error_info (opcode, &info);
4897 }
4898
4899 static void
4900 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4901 const char* error, int lower_bound,
4902 int upper_bound)
4903 {
4904 int data[3] = {lower_bound, upper_bound, 0};
4905 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4906 error, data);
4907 }
4908
4909 /* Remove the operand error record for *OPCODE. */
4910 static void ATTRIBUTE_UNUSED
4911 remove_operand_error_record (const aarch64_opcode *opcode)
4912 {
4913 if (opcode_has_operand_error_p (opcode))
4914 {
4915 operand_error_record* record = operand_error_report.head;
4916 gas_assert (record != NULL && operand_error_report.tail != NULL);
4917 operand_error_report.head = record->next;
4918 record->next = free_opnd_error_record_nodes;
4919 free_opnd_error_record_nodes = record;
4920 if (operand_error_report.head == NULL)
4921 {
4922 gas_assert (operand_error_report.tail == record);
4923 operand_error_report.tail = NULL;
4924 }
4925 }
4926 }
4927
4928 /* Given the instruction in *INSTR, return the index of the best matched
4929 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4930
4931 Return -1 if there is no qualifier sequence; return the first match
4932 if there is multiple matches found. */
4933
4934 static int
4935 find_best_match (const aarch64_inst *instr,
4936 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4937 {
4938 int i, num_opnds, max_num_matched, idx;
4939
4940 num_opnds = aarch64_num_of_operands (instr->opcode);
4941 if (num_opnds == 0)
4942 {
4943 DEBUG_TRACE ("no operand");
4944 return -1;
4945 }
4946
4947 max_num_matched = 0;
4948 idx = 0;
4949
4950 /* For each pattern. */
4951 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4952 {
4953 int j, num_matched;
4954 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4955
4956 /* Most opcodes has much fewer patterns in the list. */
4957 if (empty_qualifier_sequence_p (qualifiers))
4958 {
4959 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4960 break;
4961 }
4962
4963 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4964 if (*qualifiers == instr->operands[j].qualifier)
4965 ++num_matched;
4966
4967 if (num_matched > max_num_matched)
4968 {
4969 max_num_matched = num_matched;
4970 idx = i;
4971 }
4972 }
4973
4974 DEBUG_TRACE ("return with %d", idx);
4975 return idx;
4976 }
4977
4978 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4979 corresponding operands in *INSTR. */
4980
4981 static inline void
4982 assign_qualifier_sequence (aarch64_inst *instr,
4983 const aarch64_opnd_qualifier_t *qualifiers)
4984 {
4985 int i = 0;
4986 int num_opnds = aarch64_num_of_operands (instr->opcode);
4987 gas_assert (num_opnds);
4988 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4989 instr->operands[i].qualifier = *qualifiers;
4990 }
4991
4992 /* Print operands for the diagnosis purpose. */
4993
4994 static void
4995 print_operands (char *buf, const aarch64_opcode *opcode,
4996 const aarch64_opnd_info *opnds)
4997 {
4998 int i;
4999
5000 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5001 {
5002 char str[128];
5003
5004 /* We regard the opcode operand info more, however we also look into
5005 the inst->operands to support the disassembling of the optional
5006 operand.
5007 The two operand code should be the same in all cases, apart from
5008 when the operand can be optional. */
5009 if (opcode->operands[i] == AARCH64_OPND_NIL
5010 || opnds[i].type == AARCH64_OPND_NIL)
5011 break;
5012
5013 /* Generate the operand string in STR. */
5014 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5015 NULL, cpu_variant);
5016
5017 /* Delimiter. */
5018 if (str[0] != '\0')
5019 strcat (buf, i == 0 ? " " : ", ");
5020
5021 /* Append the operand string. */
5022 strcat (buf, str);
5023 }
5024 }
5025
5026 /* Send to stderr a string as information. */
5027
5028 static void
5029 output_info (const char *format, ...)
5030 {
5031 const char *file;
5032 unsigned int line;
5033 va_list args;
5034
5035 file = as_where (&line);
5036 if (file)
5037 {
5038 if (line != 0)
5039 fprintf (stderr, "%s:%u: ", file, line);
5040 else
5041 fprintf (stderr, "%s: ", file);
5042 }
5043 fprintf (stderr, _("Info: "));
5044 va_start (args, format);
5045 vfprintf (stderr, format, args);
5046 va_end (args);
5047 (void) putc ('\n', stderr);
5048 }
5049
5050 /* Output one operand error record. */
5051
5052 static void
5053 output_operand_error_record (const operand_error_record *record, char *str)
5054 {
5055 const aarch64_operand_error *detail = &record->detail;
5056 int idx = detail->index;
5057 const aarch64_opcode *opcode = record->opcode;
5058 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5059 : AARCH64_OPND_NIL);
5060
5061 typedef void (*handler_t)(const char *format, ...);
5062 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5063
5064 switch (detail->kind)
5065 {
5066 case AARCH64_OPDE_NIL:
5067 gas_assert (0);
5068 break;
5069 case AARCH64_OPDE_SYNTAX_ERROR:
5070 case AARCH64_OPDE_RECOVERABLE:
5071 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5072 case AARCH64_OPDE_OTHER_ERROR:
5073 /* Use the prepared error message if there is, otherwise use the
5074 operand description string to describe the error. */
5075 if (detail->error != NULL)
5076 {
5077 if (idx < 0)
5078 handler (_("%s -- `%s'"), detail->error, str);
5079 else
5080 handler (_("%s at operand %d -- `%s'"),
5081 detail->error, idx + 1, str);
5082 }
5083 else
5084 {
5085 gas_assert (idx >= 0);
5086 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5087 aarch64_get_operand_desc (opd_code), str);
5088 }
5089 break;
5090
5091 case AARCH64_OPDE_INVALID_VARIANT:
5092 handler (_("operand mismatch -- `%s'"), str);
5093 if (verbose_error_p)
5094 {
5095 /* We will try to correct the erroneous instruction and also provide
5096 more information e.g. all other valid variants.
5097
5098 The string representation of the corrected instruction and other
5099 valid variants are generated by
5100
5101 1) obtaining the intermediate representation of the erroneous
5102 instruction;
5103 2) manipulating the IR, e.g. replacing the operand qualifier;
5104 3) printing out the instruction by calling the printer functions
5105 shared with the disassembler.
5106
5107 The limitation of this method is that the exact input assembly
5108 line cannot be accurately reproduced in some cases, for example an
5109 optional operand present in the actual assembly line will be
5110 omitted in the output; likewise for the optional syntax rules,
5111 e.g. the # before the immediate. Another limitation is that the
5112 assembly symbols and relocation operations in the assembly line
5113 currently cannot be printed out in the error report. Last but not
5114 least, when there is other error(s) co-exist with this error, the
5115 'corrected' instruction may be still incorrect, e.g. given
5116 'ldnp h0,h1,[x0,#6]!'
5117 this diagnosis will provide the version:
5118 'ldnp s0,s1,[x0,#6]!'
5119 which is still not right. */
5120 size_t len = strlen (get_mnemonic_name (str));
5121 int i, qlf_idx;
5122 bfd_boolean result;
5123 char buf[2048];
5124 aarch64_inst *inst_base = &inst.base;
5125 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5126
5127 /* Init inst. */
5128 reset_aarch64_instruction (&inst);
5129 inst_base->opcode = opcode;
5130
5131 /* Reset the error report so that there is no side effect on the
5132 following operand parsing. */
5133 init_operand_error_report ();
5134
5135 /* Fill inst. */
5136 result = parse_operands (str + len, opcode)
5137 && programmer_friendly_fixup (&inst);
5138 gas_assert (result);
5139 result = aarch64_opcode_encode (cpu_variant, opcode, inst_base,
5140 &inst_base->value, NULL, NULL,
5141 insn_sequence);
5142 gas_assert (!result);
5143
5144 /* Find the most matched qualifier sequence. */
5145 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5146 gas_assert (qlf_idx > -1);
5147
5148 /* Assign the qualifiers. */
5149 assign_qualifier_sequence (inst_base,
5150 opcode->qualifiers_list[qlf_idx]);
5151
5152 /* Print the hint. */
5153 output_info (_(" did you mean this?"));
5154 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5155 print_operands (buf, opcode, inst_base->operands);
5156 output_info (_(" %s"), buf);
5157
5158 /* Print out other variant(s) if there is any. */
5159 if (qlf_idx != 0 ||
5160 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5161 output_info (_(" other valid variant(s):"));
5162
5163 /* For each pattern. */
5164 qualifiers_list = opcode->qualifiers_list;
5165 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5166 {
5167 /* Most opcodes has much fewer patterns in the list.
5168 First NIL qualifier indicates the end in the list. */
5169 if (empty_qualifier_sequence_p (*qualifiers_list))
5170 break;
5171
5172 if (i != qlf_idx)
5173 {
5174 /* Mnemonics name. */
5175 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5176
5177 /* Assign the qualifiers. */
5178 assign_qualifier_sequence (inst_base, *qualifiers_list);
5179
5180 /* Print instruction. */
5181 print_operands (buf, opcode, inst_base->operands);
5182
5183 output_info (_(" %s"), buf);
5184 }
5185 }
5186 }
5187 break;
5188
5189 case AARCH64_OPDE_UNTIED_OPERAND:
5190 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5191 detail->index + 1, str);
5192 break;
5193
5194 case AARCH64_OPDE_OUT_OF_RANGE:
5195 if (detail->data[0] != detail->data[1])
5196 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5197 detail->error ? detail->error : _("immediate value"),
5198 detail->data[0], detail->data[1], idx + 1, str);
5199 else
5200 handler (_("%s must be %d at operand %d -- `%s'"),
5201 detail->error ? detail->error : _("immediate value"),
5202 detail->data[0], idx + 1, str);
5203 break;
5204
5205 case AARCH64_OPDE_REG_LIST:
5206 if (detail->data[0] == 1)
5207 handler (_("invalid number of registers in the list; "
5208 "only 1 register is expected at operand %d -- `%s'"),
5209 idx + 1, str);
5210 else
5211 handler (_("invalid number of registers in the list; "
5212 "%d registers are expected at operand %d -- `%s'"),
5213 detail->data[0], idx + 1, str);
5214 break;
5215
5216 case AARCH64_OPDE_UNALIGNED:
5217 handler (_("immediate value must be a multiple of "
5218 "%d at operand %d -- `%s'"),
5219 detail->data[0], idx + 1, str);
5220 break;
5221
5222 default:
5223 gas_assert (0);
5224 break;
5225 }
5226 }
5227
5228 /* Process and output the error message about the operand mismatching.
5229
5230 When this function is called, the operand error information had
5231 been collected for an assembly line and there will be multiple
5232 errors in the case of multiple instruction templates; output the
5233 error message that most closely describes the problem.
5234
5235 The errors to be printed can be filtered on printing all errors
5236 or only non-fatal errors. This distinction has to be made because
5237 the error buffer may already be filled with fatal errors we don't want to
5238 print due to the different instruction templates. */
5239
5240 static void
5241 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
5242 {
5243 int largest_error_pos, largest_error_pos2;
5244 const char *msg = NULL, *msg2 = NULL;
5245 enum aarch64_operand_error_kind kind;
5246 operand_error_record *curr;
5247 operand_error_record *head = operand_error_report.head;
5248 operand_error_record *record = NULL, *record2 = NULL;
5249
5250 /* No error to report. */
5251 if (head == NULL)
5252 return;
5253
5254 gas_assert (head != NULL && operand_error_report.tail != NULL);
5255
5256 /* Only one error. */
5257 if (head == operand_error_report.tail)
5258 {
5259 /* If the only error is a non-fatal one and we don't want to print it,
5260 just exit. */
5261 if (!non_fatal_only || head->detail.non_fatal)
5262 {
5263 DEBUG_TRACE ("single opcode entry with error kind: %s",
5264 operand_mismatch_kind_names[head->detail.kind]);
5265 output_operand_error_record (head, str);
5266 }
5267 return;
5268 }
5269
5270 /* Find the error kind of the highest severity. */
5271 DEBUG_TRACE ("multiple opcode entries with error kind");
5272 kind = AARCH64_OPDE_NIL;
5273 for (curr = head; curr != NULL; curr = curr->next)
5274 {
5275 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5276 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5277 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5278 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5279 kind = curr->detail.kind;
5280 }
5281
5282 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5283
5284 /* Pick up one of errors of KIND to report. */
5285 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5286 largest_error_pos2 = -2; /* Index can be -1 which means unknown index. */
5287 for (curr = head; curr != NULL; curr = curr->next)
5288 {
5289 /* If we don't want to print non-fatal errors then don't consider them
5290 at all. */
5291 if (curr->detail.kind != kind
5292 || (non_fatal_only && !curr->detail.non_fatal))
5293 continue;
5294 /* If there are multiple errors, pick up the one with the highest
5295 mismatching operand index. In the case of multiple errors with
5296 the equally highest operand index, pick up the first one or the
5297 first one with non-NULL error message. */
5298 if (AARCH64_CPU_HAS_FEATURE (cpu_variant, *curr->opcode->avariant))
5299 {
5300 if (curr->detail.index > largest_error_pos
5301 || (curr->detail.index == largest_error_pos && msg == NULL
5302 && curr->detail.error != NULL))
5303 {
5304 largest_error_pos = curr->detail.index;
5305 record = curr;
5306 msg = record->detail.error;
5307 }
5308 }
5309 else
5310 {
5311 if (curr->detail.index > largest_error_pos2
5312 || (curr->detail.index == largest_error_pos2 && msg2 == NULL
5313 && curr->detail.error != NULL))
5314 {
5315 largest_error_pos2 = curr->detail.index;
5316 record2 = curr;
5317 msg2 = record2->detail.error;
5318 }
5319 }
5320 }
5321
5322 /* No errors in enabled cpu feature variants, look for errors in the disabled
5323 ones. XXX we should do this segregation when prioritizing too. */
5324 if (!record)
5325 {
5326 largest_error_pos = largest_error_pos2;
5327 record = record2;
5328 msg = msg2;
5329 }
5330
5331 /* The way errors are collected in the back-end is a bit non-intuitive. But
5332 essentially, because each operand template is tried recursively you may
5333 always have errors collected from the previous tried OPND. These are
5334 usually skipped if there is one successful match. However now with the
5335 non-fatal errors we have to ignore those previously collected hard errors
5336 when we're only interested in printing the non-fatal ones. This condition
5337 prevents us from printing errors that are not appropriate, since we did
5338 match a condition, but it also has warnings that it wants to print. */
5339 if (non_fatal_only && !record)
5340 return;
5341
5342 gas_assert (largest_error_pos != -2 && record != NULL);
5343 DEBUG_TRACE ("Pick up error kind %s to report",
5344 operand_mismatch_kind_names[record->detail.kind]);
5345
5346 /* Output. */
5347 output_operand_error_record (record, str);
5348 }
5349 \f
5350 /* Write an AARCH64 instruction to buf - always little-endian. */
5351 static void
5352 put_aarch64_insn (char *buf, uint32_t insn)
5353 {
5354 unsigned char *where = (unsigned char *) buf;
5355 where[0] = insn;
5356 where[1] = insn >> 8;
5357 where[2] = insn >> 16;
5358 where[3] = insn >> 24;
5359 }
5360
5361 static uint32_t
5362 get_aarch64_insn (char *buf)
5363 {
5364 unsigned char *where = (unsigned char *) buf;
5365 uint32_t result;
5366 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5367 | ((uint32_t) where[3] << 24)));
5368 return result;
5369 }
5370
5371 static void
5372 output_inst (struct aarch64_inst *new_inst)
5373 {
5374 char *to = NULL;
5375
5376 to = frag_more (INSN_SIZE);
5377
5378 frag_now->tc_frag_data.recorded = 1;
5379
5380 put_aarch64_insn (to, inst.base.value);
5381
5382 if (inst.reloc.type != BFD_RELOC_UNUSED)
5383 {
5384 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5385 INSN_SIZE, &inst.reloc.exp,
5386 inst.reloc.pc_rel,
5387 inst.reloc.type);
5388 DEBUG_TRACE ("Prepared relocation fix up");
5389 /* Don't check the addend value against the instruction size,
5390 that's the job of our code in md_apply_fix(). */
5391 fixp->fx_no_overflow = 1;
5392 if (new_inst != NULL)
5393 fixp->tc_fix_data.inst = new_inst;
5394 if (aarch64_gas_internal_fixup_p ())
5395 {
5396 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5397 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5398 fixp->fx_addnumber = inst.reloc.flags;
5399 }
5400 if (inst.reloc.flags & FIXUP_F_C64)
5401 fixp->tc_fix_data.c64 = TRUE;
5402 }
5403
5404 dwarf2_emit_insn (INSN_SIZE);
5405 }
5406
5407 /* Link together opcodes of the same name. */
5408
5409 struct templates
5410 {
5411 aarch64_opcode *opcode;
5412 struct templates *next;
5413 };
5414
5415 typedef struct templates templates;
5416
5417 static templates *
5418 lookup_mnemonic (const char *start, int len)
5419 {
5420 templates *templ = NULL;
5421
5422 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5423 return templ;
5424 }
5425
5426 /* Subroutine of md_assemble, responsible for looking up the primary
5427 opcode from the mnemonic the user wrote. STR points to the
5428 beginning of the mnemonic. */
5429
5430 static templates *
5431 opcode_lookup (char **str)
5432 {
5433 char *end, *base, *dot;
5434 const aarch64_cond *cond;
5435 char condname[16];
5436 int len;
5437
5438 /* Scan up to the end of the mnemonic, which must end in white space,
5439 '.', or end of string. */
5440 dot = 0;
5441 for (base = end = *str; is_part_of_name(*end); end++)
5442 if (*end == '.' && !dot)
5443 dot = end;
5444
5445 if (end == base || dot == base)
5446 return 0;
5447
5448 inst.cond = COND_ALWAYS;
5449
5450 /* Handle a possible condition. */
5451 if (dot)
5452 {
5453 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5454 if (cond)
5455 {
5456 inst.cond = cond->value;
5457 *str = end;
5458 }
5459 else
5460 {
5461 *str = dot;
5462 return 0;
5463 }
5464 len = dot - base;
5465 }
5466 else
5467 {
5468 *str = end;
5469 len = end - base;
5470 }
5471
5472 if (inst.cond == COND_ALWAYS)
5473 {
5474 /* Look for unaffixed mnemonic. */
5475 return lookup_mnemonic (base, len);
5476 }
5477 else if (len <= 13)
5478 {
5479 /* append ".c" to mnemonic if conditional */
5480 memcpy (condname, base, len);
5481 memcpy (condname + len, ".c", 2);
5482 base = condname;
5483 len += 2;
5484 return lookup_mnemonic (base, len);
5485 }
5486
5487 return NULL;
5488 }
5489
5490 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5491 to a corresponding operand qualifier. */
5492
5493 static inline aarch64_opnd_qualifier_t
5494 vectype_to_qualifier (const struct vector_type_el *vectype)
5495 {
5496 /* Element size in bytes indexed by vector_el_type. */
5497 const unsigned char ele_size[5]
5498 = {1, 2, 4, 8, 16};
5499 const unsigned int ele_base [5] =
5500 {
5501 AARCH64_OPND_QLF_V_4B,
5502 AARCH64_OPND_QLF_V_2H,
5503 AARCH64_OPND_QLF_V_2S,
5504 AARCH64_OPND_QLF_V_1D,
5505 AARCH64_OPND_QLF_V_1Q
5506 };
5507
5508 if (!vectype->defined || vectype->type == NT_invtype)
5509 goto vectype_conversion_fail;
5510
5511 if (vectype->type == NT_zero)
5512 return AARCH64_OPND_QLF_P_Z;
5513 if (vectype->type == NT_merge)
5514 return AARCH64_OPND_QLF_P_M;
5515
5516 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5517
5518 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5519 {
5520 /* Special case S_4B. */
5521 if (vectype->type == NT_b && vectype->width == 4)
5522 return AARCH64_OPND_QLF_S_4B;
5523
5524 /* Special case S_2H. */
5525 if (vectype->type == NT_h && vectype->width == 2)
5526 return AARCH64_OPND_QLF_S_2H;
5527
5528 /* Vector element register. */
5529 return AARCH64_OPND_QLF_S_B + vectype->type;
5530 }
5531 else
5532 {
5533 /* Vector register. */
5534 int reg_size = ele_size[vectype->type] * vectype->width;
5535 unsigned offset;
5536 unsigned shift;
5537 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5538 goto vectype_conversion_fail;
5539
5540 /* The conversion is by calculating the offset from the base operand
5541 qualifier for the vector type. The operand qualifiers are regular
5542 enough that the offset can established by shifting the vector width by
5543 a vector-type dependent amount. */
5544 shift = 0;
5545 if (vectype->type == NT_b)
5546 shift = 3;
5547 else if (vectype->type == NT_h || vectype->type == NT_s)
5548 shift = 2;
5549 else if (vectype->type >= NT_d)
5550 shift = 1;
5551 else
5552 gas_assert (0);
5553
5554 offset = ele_base [vectype->type] + (vectype->width >> shift);
5555 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5556 && offset <= AARCH64_OPND_QLF_V_1Q);
5557 return offset;
5558 }
5559
5560 vectype_conversion_fail:
5561 first_error (_("bad vector arrangement type"));
5562 return AARCH64_OPND_QLF_NIL;
5563 }
5564
5565 /* Process an optional operand that is found omitted from the assembly line.
5566 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5567 instruction's opcode entry while IDX is the index of this omitted operand.
5568 */
5569
5570 static void
5571 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5572 int idx, aarch64_opnd_info *operand)
5573 {
5574 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5575 gas_assert (optional_operand_p (opcode, idx));
5576 gas_assert (!operand->present);
5577
5578 switch (type)
5579 {
5580 case AARCH64_OPND_Can:
5581 case AARCH64_OPND_Cat_SYS:
5582 case AARCH64_OPND_Rd:
5583 case AARCH64_OPND_Rn:
5584 case AARCH64_OPND_Rm:
5585 case AARCH64_OPND_Rt:
5586 case AARCH64_OPND_Rt2:
5587 case AARCH64_OPND_Rt_SP:
5588 case AARCH64_OPND_Rs:
5589 case AARCH64_OPND_Ra:
5590 case AARCH64_OPND_Rt_SYS:
5591 case AARCH64_OPND_Rd_SP:
5592 case AARCH64_OPND_Rn_SP:
5593 case AARCH64_OPND_Rm_SP:
5594 case AARCH64_OPND_Fd:
5595 case AARCH64_OPND_Fn:
5596 case AARCH64_OPND_Fm:
5597 case AARCH64_OPND_Fa:
5598 case AARCH64_OPND_Ft:
5599 case AARCH64_OPND_Ft2:
5600 case AARCH64_OPND_Sd:
5601 case AARCH64_OPND_Sn:
5602 case AARCH64_OPND_Sm:
5603 case AARCH64_OPND_Va:
5604 case AARCH64_OPND_Vd:
5605 case AARCH64_OPND_Vn:
5606 case AARCH64_OPND_Vm:
5607 case AARCH64_OPND_VdD1:
5608 case AARCH64_OPND_VnD1:
5609 operand->reg.regno = default_value;
5610 break;
5611
5612 case AARCH64_OPND_Ed:
5613 case AARCH64_OPND_En:
5614 case AARCH64_OPND_Em:
5615 case AARCH64_OPND_Em16:
5616 case AARCH64_OPND_SM3_IMM2:
5617 operand->reglane.regno = default_value;
5618 break;
5619
5620 case AARCH64_OPND_IDX:
5621 case AARCH64_OPND_BIT_NUM:
5622 case AARCH64_OPND_IMMR:
5623 case AARCH64_OPND_IMMS:
5624 case AARCH64_OPND_SHLL_IMM:
5625 case AARCH64_OPND_IMM_VLSL:
5626 case AARCH64_OPND_IMM_VLSR:
5627 case AARCH64_OPND_CCMP_IMM:
5628 case AARCH64_OPND_FBITS:
5629 case AARCH64_OPND_UIMM4:
5630 case AARCH64_OPND_UIMM3_OP1:
5631 case AARCH64_OPND_UIMM3_OP2:
5632 case AARCH64_OPND_IMM:
5633 case AARCH64_OPND_IMM_2:
5634 case AARCH64_OPND_WIDTH:
5635 case AARCH64_OPND_UIMM7:
5636 case AARCH64_OPND_NZCV:
5637 case AARCH64_OPND_SVE_PATTERN:
5638 case AARCH64_OPND_SVE_PRFOP:
5639 operand->imm.value = default_value;
5640 break;
5641
5642 case AARCH64_OPND_SVE_PATTERN_SCALED:
5643 operand->imm.value = default_value;
5644 operand->shifter.kind = AARCH64_MOD_MUL;
5645 operand->shifter.amount = 1;
5646 break;
5647
5648 case AARCH64_OPND_EXCEPTION:
5649 inst.reloc.type = BFD_RELOC_UNUSED;
5650 break;
5651
5652 case AARCH64_OPND_BARRIER_ISB:
5653 operand->barrier = aarch64_barrier_options + default_value;
5654 break;
5655
5656 case AARCH64_OPND_BTI_TARGET:
5657 operand->hint_option = aarch64_hint_options + default_value;
5658 break;
5659
5660 default:
5661 break;
5662 }
5663 }
5664
5665 /* Process the relocation type for move wide instructions.
5666 Return TRUE on success; otherwise return FALSE. */
5667
5668 static bfd_boolean
5669 process_movw_reloc_info (void)
5670 {
5671 int is32;
5672 unsigned shift;
5673
5674 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5675
5676 if (inst.base.opcode->op == OP_MOVK)
5677 switch (inst.reloc.type)
5678 {
5679 case BFD_RELOC_AARCH64_MOVW_G0_S:
5680 case BFD_RELOC_AARCH64_MOVW_G1_S:
5681 case BFD_RELOC_AARCH64_MOVW_G2_S:
5682 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5683 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5684 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5685 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5686 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5687 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5688 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5689 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5690 set_syntax_error
5691 (_("the specified relocation type is not allowed for MOVK"));
5692 return FALSE;
5693 default:
5694 break;
5695 }
5696
5697 switch (inst.reloc.type)
5698 {
5699 case BFD_RELOC_MORELLO_MOVW_SIZE_G0:
5700 case BFD_RELOC_MORELLO_MOVW_SIZE_G0_NC:
5701 case BFD_RELOC_AARCH64_MOVW_G0:
5702 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5703 case BFD_RELOC_AARCH64_MOVW_G0_S:
5704 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5705 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5706 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5707 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5708 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5709 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5710 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5711 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5712 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5713 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5714 shift = 0;
5715 break;
5716 case BFD_RELOC_MORELLO_MOVW_SIZE_G1:
5717 case BFD_RELOC_MORELLO_MOVW_SIZE_G1_NC:
5718 case BFD_RELOC_AARCH64_MOVW_G1:
5719 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5720 case BFD_RELOC_AARCH64_MOVW_G1_S:
5721 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5722 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5723 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5724 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5725 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5726 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5727 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5728 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5729 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5730 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5731 shift = 16;
5732 break;
5733 case BFD_RELOC_MORELLO_MOVW_SIZE_G2:
5734 case BFD_RELOC_MORELLO_MOVW_SIZE_G2_NC:
5735 case BFD_RELOC_AARCH64_MOVW_G2:
5736 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5737 case BFD_RELOC_AARCH64_MOVW_G2_S:
5738 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5739 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5740 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5741 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5742 if (is32)
5743 {
5744 set_fatal_syntax_error
5745 (_("the specified relocation type is not allowed for 32-bit "
5746 "register"));
5747 return FALSE;
5748 }
5749 shift = 32;
5750 break;
5751 case BFD_RELOC_MORELLO_MOVW_SIZE_G3:
5752 case BFD_RELOC_AARCH64_MOVW_G3:
5753 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5754 if (is32)
5755 {
5756 set_fatal_syntax_error
5757 (_("the specified relocation type is not allowed for 32-bit "
5758 "register"));
5759 return FALSE;
5760 }
5761 shift = 48;
5762 break;
5763 default:
5764 /* More cases should be added when more MOVW-related relocation types
5765 are supported in GAS. */
5766 gas_assert (aarch64_gas_internal_fixup_p ());
5767 /* The shift amount should have already been set by the parser. */
5768 return TRUE;
5769 }
5770 inst.base.operands[1].shifter.amount = shift;
5771 return TRUE;
5772 }
5773
5774 /* A primitive log calculator. */
5775
5776 static inline unsigned int
5777 get_logsz (unsigned int size)
5778 {
5779 const unsigned char ls[16] =
5780 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5781 if (size > 16)
5782 {
5783 gas_assert (0);
5784 return -1;
5785 }
5786 gas_assert (ls[size - 1] != (unsigned char)-1);
5787 return ls[size - 1];
5788 }
5789
5790 /* Determine and return the real reloc type code for an instruction
5791 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5792
5793 static inline bfd_reloc_code_real_type
5794 ldst_lo12_determine_real_reloc_type (void)
5795 {
5796 unsigned logsz;
5797 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5798 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5799
5800 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5801 {
5802 BFD_RELOC_AARCH64_LDST8_LO12,
5803 BFD_RELOC_AARCH64_LDST16_LO12,
5804 BFD_RELOC_AARCH64_LDST32_LO12,
5805 BFD_RELOC_AARCH64_LDST64_LO12,
5806 BFD_RELOC_AARCH64_LDST128_LO12
5807 },
5808 {
5809 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5810 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5811 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5812 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5813 BFD_RELOC_AARCH64_NONE
5814 },
5815 {
5816 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5817 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5818 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5819 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5820 BFD_RELOC_AARCH64_NONE
5821 },
5822 {
5823 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5824 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5825 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5826 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5827 BFD_RELOC_AARCH64_NONE
5828 },
5829 {
5830 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5831 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5832 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5833 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5834 BFD_RELOC_AARCH64_NONE
5835 }
5836 };
5837
5838 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5839 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5840 || (inst.reloc.type
5841 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5842 || (inst.reloc.type
5843 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5844 || (inst.reloc.type
5845 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5846 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12
5847 || inst.base.opcode->operands[1] == AARCH64_OPND_CAPADDR_UIMM9);
5848
5849 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5850 opd1_qlf =
5851 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5852 1, opd0_qlf, 0);
5853 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5854
5855 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5856 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5857 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5858 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5859 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5860 gas_assert (logsz <= 3);
5861 else
5862 gas_assert (logsz <= 4);
5863
5864 /* In reloc.c, these pseudo relocation types should be defined in similar
5865 order as above reloc_ldst_lo12 array. Because the array index calculation
5866 below relies on this. */
5867 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5868 }
5869
5870 /* Check whether a register list REGINFO is valid. The registers must be
5871 numbered in increasing order (modulo 32), in increments of one or two.
5872
5873 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5874 increments of two.
5875
5876 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5877
5878 static bfd_boolean
5879 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5880 {
5881 uint32_t i, nb_regs, prev_regno, incr;
5882
5883 nb_regs = 1 + (reginfo & 0x3);
5884 reginfo >>= 2;
5885 prev_regno = reginfo & 0x1f;
5886 incr = accept_alternate ? 2 : 1;
5887
5888 for (i = 1; i < nb_regs; ++i)
5889 {
5890 uint32_t curr_regno;
5891 reginfo >>= 5;
5892 curr_regno = reginfo & 0x1f;
5893 if (curr_regno != ((prev_regno + incr) & 0x1f))
5894 return FALSE;
5895 prev_regno = curr_regno;
5896 }
5897
5898 return TRUE;
5899 }
5900
5901 static bfd_boolean
5902 parse_perms (char **str, aarch64_opnd_info *info)
5903 {
5904 char *p = *str;
5905 char c;
5906 aarch64_insn perms = 0;
5907
5908 /* Numeric value of permissions. */
5909 if (ISDIGIT (*p) || (*p == '#' && p++))
5910 {
5911 perms = *p - '0';
5912 if (p[1] > 0 || perms > 7)
5913 {
5914 set_syntax_error (_("invalid permission value"));
5915 return FALSE;
5916 }
5917 p += 2;
5918 goto out;
5919 }
5920
5921 /* Permission specifier mnemonics r, w and x, in that order. Do not accept
5922 jumbled up sequences such as rxw, wrx, etc. and also reject duplicate
5923 permissions such as rrxw. */
5924 while ((c = *p++) != '\0')
5925 {
5926 aarch64_insn i = get_perm_bit (c);
5927 if (i > 7 || i & perms || (i - 1) & perms)
5928 {
5929 set_syntax_error (_("invalid permissions"));
5930 return FALSE;
5931 }
5932 perms |= i;
5933 }
5934
5935 out:
5936 *str = p - 1;
5937 info->perm = perms;
5938 return TRUE;
5939 }
5940
5941 /* Generic instruction operand parser. This does no encoding and no
5942 semantic validation; it merely squirrels values away in the inst
5943 structure. Returns TRUE or FALSE depending on whether the
5944 specified grammar matched. */
5945
5946 static bfd_boolean
5947 parse_operands (char *str, const aarch64_opcode *opcode)
5948 {
5949 int i;
5950 char *backtrack_pos = 0;
5951 const enum aarch64_opnd *operands = opcode->operands;
5952 aarch64_reg_type imm_reg_type;
5953
5954 clear_error ();
5955 skip_whitespace (str);
5956
5957 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5958 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5959 else
5960 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5961
5962 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5963 {
5964 int64_t val;
5965 const reg_entry *reg;
5966 int comma_skipped_p = 0;
5967 aarch64_reg_type rtype;
5968 struct vector_type_el vectype;
5969 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5970 aarch64_opnd_info *info = &inst.base.operands[i];
5971 aarch64_reg_type reg_type;
5972
5973 DEBUG_TRACE ("parse operand %d", i);
5974
5975 /* Assign the operand code. */
5976 info->type = operands[i];
5977
5978 if (optional_operand_p (opcode, i))
5979 {
5980 /* Remember where we are in case we need to backtrack. */
5981 gas_assert (!backtrack_pos);
5982 backtrack_pos = str;
5983 }
5984
5985 /* Expect comma between operands; the backtrack mechanism will take
5986 care of cases of omitted optional operand. */
5987 if (i > 0 && ! skip_past_char (&str, ','))
5988 {
5989 set_syntax_error (_("comma expected between operands"));
5990 goto failure;
5991 }
5992 else
5993 comma_skipped_p = 1;
5994
5995 switch (operands[i])
5996 {
5997 case AARCH64_OPND_Rsz:
5998 case AARCH64_OPND_Rsz2:
5999 case AARCH64_OPND_Rd:
6000 case AARCH64_OPND_Rn:
6001 case AARCH64_OPND_Rm:
6002 case AARCH64_OPND_Rt:
6003 case AARCH64_OPND_Rt2:
6004 case AARCH64_OPND_Rs:
6005 case AARCH64_OPND_Ra:
6006 case AARCH64_OPND_Rt_SYS:
6007 case AARCH64_OPND_PAIRREG:
6008 case AARCH64_OPND_SVE_Rm:
6009 po_int_reg_or_fail (REG_TYPE_R_Z);
6010 break;
6011
6012 case AARCH64_OPND_Rd_SP:
6013 case AARCH64_OPND_Rn_SP:
6014 case AARCH64_OPND_Rt_SP:
6015 case AARCH64_OPND_SVE_Rn_SP:
6016 case AARCH64_OPND_Rm_SP:
6017 po_int_reg_or_fail (REG_TYPE_R_SP);
6018 break;
6019
6020 case AARCH64_OPND_A64C_Rm_EXT:
6021 case AARCH64_OPND_Rm_EXT:
6022 case AARCH64_OPND_Rm_SFT:
6023 po_misc_or_fail (parse_shifter_operand
6024 (&str, info, (operands[i] == AARCH64_OPND_Rm_SFT
6025 ? SHIFTED_LOGIC_IMM
6026 : SHIFTED_ARITH_IMM)));
6027 if (!info->shifter.operator_present)
6028 {
6029 /* Default to LSL if not present. Libopcodes prefers shifter
6030 kind to be explicit. */
6031 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6032 info->shifter.kind = AARCH64_MOD_LSL;
6033 /* For Rm_EXT, libopcodes will carry out further check on whether
6034 or not stack pointer is used in the instruction (Recall that
6035 "the extend operator is not optional unless at least one of
6036 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6037 }
6038 break;
6039
6040 case AARCH64_OPND_Fsz:
6041 case AARCH64_OPND_Fd:
6042 case AARCH64_OPND_Fn:
6043 case AARCH64_OPND_Fm:
6044 case AARCH64_OPND_Fa:
6045 case AARCH64_OPND_Ft:
6046 case AARCH64_OPND_Ft2:
6047 case AARCH64_OPND_Sd:
6048 case AARCH64_OPND_Sn:
6049 case AARCH64_OPND_Sm:
6050 case AARCH64_OPND_St:
6051 case AARCH64_OPND_SVE_VZn:
6052 case AARCH64_OPND_SVE_Vd:
6053 case AARCH64_OPND_SVE_Vm:
6054 case AARCH64_OPND_SVE_Vn:
6055 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6056 if (val == PARSE_FAIL)
6057 {
6058 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6059 goto failure;
6060 }
6061 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6062
6063 info->reg.regno = val;
6064 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6065 break;
6066
6067 case AARCH64_OPND_SVE_Pd:
6068 case AARCH64_OPND_SVE_Pg3:
6069 case AARCH64_OPND_SVE_Pg4_5:
6070 case AARCH64_OPND_SVE_Pg4_10:
6071 case AARCH64_OPND_SVE_Pg4_16:
6072 case AARCH64_OPND_SVE_Pm:
6073 case AARCH64_OPND_SVE_Pn:
6074 case AARCH64_OPND_SVE_Pt:
6075 reg_type = REG_TYPE_PN;
6076 goto vector_reg;
6077
6078 case AARCH64_OPND_SVE_Za_5:
6079 case AARCH64_OPND_SVE_Za_16:
6080 case AARCH64_OPND_SVE_Zd:
6081 case AARCH64_OPND_SVE_Zm_5:
6082 case AARCH64_OPND_SVE_Zm_16:
6083 case AARCH64_OPND_SVE_Zn:
6084 case AARCH64_OPND_SVE_Zt:
6085 reg_type = REG_TYPE_ZN;
6086 goto vector_reg;
6087
6088 case AARCH64_OPND_Va:
6089 case AARCH64_OPND_Vd:
6090 case AARCH64_OPND_Vn:
6091 case AARCH64_OPND_Vm:
6092 reg_type = REG_TYPE_VN;
6093 vector_reg:
6094 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6095 if (val == PARSE_FAIL)
6096 {
6097 first_error (_(get_reg_expected_msg (reg_type)));
6098 goto failure;
6099 }
6100 if (vectype.defined & NTA_HASINDEX)
6101 goto failure;
6102
6103 info->reg.regno = val;
6104 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6105 && vectype.type == NT_invtype)
6106 /* Unqualified Pn and Zn registers are allowed in certain
6107 contexts. Rely on F_STRICT qualifier checking to catch
6108 invalid uses. */
6109 info->qualifier = AARCH64_OPND_QLF_NIL;
6110 else
6111 {
6112 info->qualifier = vectype_to_qualifier (&vectype);
6113 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6114 goto failure;
6115 }
6116 break;
6117
6118 case AARCH64_OPND_VdD1:
6119 case AARCH64_OPND_VnD1:
6120 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6121 if (val == PARSE_FAIL)
6122 {
6123 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6124 goto failure;
6125 }
6126 if (vectype.type != NT_d || vectype.index != 1)
6127 {
6128 set_fatal_syntax_error
6129 (_("the top half of a 128-bit FP/SIMD register is expected"));
6130 goto failure;
6131 }
6132 info->reg.regno = val;
6133 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6134 here; it is correct for the purpose of encoding/decoding since
6135 only the register number is explicitly encoded in the related
6136 instructions, although this appears a bit hacky. */
6137 info->qualifier = AARCH64_OPND_QLF_S_D;
6138 break;
6139
6140 case AARCH64_OPND_SVE_Zm3_INDEX:
6141 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6142 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6143 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6144 case AARCH64_OPND_SVE_Zm4_INDEX:
6145 case AARCH64_OPND_SVE_Zn_INDEX:
6146 reg_type = REG_TYPE_ZN;
6147 goto vector_reg_index;
6148
6149 case AARCH64_OPND_Ed:
6150 case AARCH64_OPND_En:
6151 case AARCH64_OPND_Em:
6152 case AARCH64_OPND_Em16:
6153 case AARCH64_OPND_SM3_IMM2:
6154 reg_type = REG_TYPE_VN;
6155 vector_reg_index:
6156 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6157 if (val == PARSE_FAIL)
6158 {
6159 first_error (_(get_reg_expected_msg (reg_type)));
6160 goto failure;
6161 }
6162 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6163 goto failure;
6164
6165 info->reglane.regno = val;
6166 info->reglane.index = vectype.index;
6167 info->qualifier = vectype_to_qualifier (&vectype);
6168 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6169 goto failure;
6170 break;
6171
6172 case AARCH64_OPND_SVE_ZnxN:
6173 case AARCH64_OPND_SVE_ZtxN:
6174 reg_type = REG_TYPE_ZN;
6175 goto vector_reg_list;
6176
6177 case AARCH64_OPND_LVn:
6178 case AARCH64_OPND_LVt:
6179 case AARCH64_OPND_LVt_AL:
6180 case AARCH64_OPND_LEt:
6181 reg_type = REG_TYPE_VN;
6182 vector_reg_list:
6183 if (reg_type == REG_TYPE_ZN
6184 && get_opcode_dependent_value (opcode) == 1
6185 && *str != '{')
6186 {
6187 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6188 if (val == PARSE_FAIL)
6189 {
6190 first_error (_(get_reg_expected_msg (reg_type)));
6191 goto failure;
6192 }
6193 info->reglist.first_regno = val;
6194 info->reglist.num_regs = 1;
6195 }
6196 else
6197 {
6198 val = parse_vector_reg_list (&str, reg_type, &vectype);
6199 if (val == PARSE_FAIL)
6200 goto failure;
6201
6202 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6203 {
6204 set_fatal_syntax_error (_("invalid register list"));
6205 goto failure;
6206 }
6207
6208 if (vectype.width != 0 && *str != ',')
6209 {
6210 set_fatal_syntax_error
6211 (_("expected element type rather than vector type"));
6212 goto failure;
6213 }
6214
6215 info->reglist.first_regno = (val >> 2) & 0x1f;
6216 info->reglist.num_regs = (val & 0x3) + 1;
6217 }
6218 if (operands[i] == AARCH64_OPND_LEt)
6219 {
6220 if (!(vectype.defined & NTA_HASINDEX))
6221 goto failure;
6222 info->reglist.has_index = 1;
6223 info->reglist.index = vectype.index;
6224 }
6225 else
6226 {
6227 if (vectype.defined & NTA_HASINDEX)
6228 goto failure;
6229 if (!(vectype.defined & NTA_HASTYPE))
6230 {
6231 if (reg_type == REG_TYPE_ZN)
6232 set_fatal_syntax_error (_("missing type suffix"));
6233 goto failure;
6234 }
6235 }
6236 info->qualifier = vectype_to_qualifier (&vectype);
6237 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6238 goto failure;
6239 break;
6240
6241 case AARCH64_OPND_Can:
6242 case AARCH64_OPND_Cam:
6243 case AARCH64_OPND_Cas:
6244 case AARCH64_OPND_Cad:
6245 case AARCH64_OPND_Cat:
6246 case AARCH64_OPND_Cat2:
6247 case AARCH64_OPND_Cat_SYS:
6248 po_reg_or_fail (REG_TYPE_CA_N_Z);
6249 if (opcode->op == OP_MOV_C_ZR && operands[i] == AARCH64_OPND_Can
6250 && val != 31)
6251 {
6252 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CA_Z)));
6253 goto failure;
6254 }
6255 if (val > 31)
6256 {
6257 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CA_N)));
6258 goto failure;
6259 }
6260 info->reg.regno = val;
6261 info->qualifier = AARCH64_OPND_QLF_CA;
6262 break;
6263
6264 case AARCH64_OPND_A64C_CST_REG:
6265 po_reg_or_fail (REG_TYPE_CA_N);
6266 if (val != 29
6267 && (opcode->iclass == br_sealed))
6268 {
6269 set_fatal_syntax_error
6270 (_(N_ ("Capability register c29 expected")));
6271 goto failure;
6272 }
6273 info->reg.regno = val;
6274 info->qualifier = AARCH64_OPND_QLF_CA;
6275 break;
6276
6277 case AARCH64_OPND_Cam_SP:
6278 case AARCH64_OPND_Can_SP:
6279 case AARCH64_OPND_Cad_SP:
6280 po_reg_or_fail (REG_TYPE_CA_N_SP);
6281 info->reg.regno = val;
6282 info->qualifier = AARCH64_OPND_QLF_CA;
6283 break;
6284
6285 case AARCH64_OPND_CRn:
6286 case AARCH64_OPND_CRm:
6287 {
6288 char prefix = *(str++);
6289 if (prefix != 'c' && prefix != 'C')
6290 goto failure;
6291
6292 po_imm_nc_or_fail ();
6293 if (val > 15)
6294 {
6295 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6296 goto failure;
6297 }
6298 info->qualifier = AARCH64_OPND_QLF_CR;
6299 info->imm.value = val;
6300 break;
6301 }
6302
6303 case AARCH64_OPND_SHLL_IMM:
6304 case AARCH64_OPND_IMM_VLSR:
6305 po_imm_or_fail (1, 64);
6306 info->imm.value = val;
6307 break;
6308
6309 case AARCH64_OPND_A64C_IMM8:
6310 case AARCH64_OPND_CCMP_IMM:
6311 case AARCH64_OPND_SIMM5:
6312 case AARCH64_OPND_FBITS:
6313 case AARCH64_OPND_TME_UIMM16:
6314 case AARCH64_OPND_UIMM4:
6315 case AARCH64_OPND_UIMM4_ADDG:
6316 case AARCH64_OPND_UIMM10:
6317 case AARCH64_OPND_UIMM3_OP1:
6318 case AARCH64_OPND_UIMM3_OP2:
6319 case AARCH64_OPND_IMM_VLSL:
6320 case AARCH64_OPND_IMM:
6321 case AARCH64_OPND_IMM_2:
6322 case AARCH64_OPND_WIDTH:
6323 case AARCH64_OPND_SVE_INV_LIMM:
6324 case AARCH64_OPND_SVE_LIMM:
6325 case AARCH64_OPND_SVE_LIMM_MOV:
6326 case AARCH64_OPND_SVE_SHLIMM_PRED:
6327 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6328 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6329 case AARCH64_OPND_SVE_SHRIMM_PRED:
6330 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6331 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6332 case AARCH64_OPND_SVE_SIMM5:
6333 case AARCH64_OPND_SVE_SIMM5B:
6334 case AARCH64_OPND_SVE_SIMM6:
6335 case AARCH64_OPND_SVE_SIMM8:
6336 case AARCH64_OPND_SVE_UIMM3:
6337 case AARCH64_OPND_SVE_UIMM7:
6338 case AARCH64_OPND_SVE_UIMM8:
6339 case AARCH64_OPND_SVE_UIMM8_53:
6340 case AARCH64_OPND_IMM_ROT1:
6341 case AARCH64_OPND_IMM_ROT2:
6342 case AARCH64_OPND_IMM_ROT3:
6343 case AARCH64_OPND_SVE_IMM_ROT1:
6344 case AARCH64_OPND_SVE_IMM_ROT2:
6345 case AARCH64_OPND_SVE_IMM_ROT3:
6346 po_imm_nc_or_fail ();
6347 info->imm.value = val;
6348 break;
6349
6350 case AARCH64_OPND_SVE_AIMM:
6351 case AARCH64_OPND_SVE_ASIMM:
6352 po_imm_nc_or_fail ();
6353 info->imm.value = val;
6354 skip_whitespace (str);
6355 if (skip_past_comma (&str))
6356 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6357 else
6358 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6359 break;
6360
6361 case AARCH64_OPND_SVE_PATTERN:
6362 po_enum_or_fail (aarch64_sve_pattern_array);
6363 info->imm.value = val;
6364 break;
6365
6366 case AARCH64_OPND_SVE_PATTERN_SCALED:
6367 po_enum_or_fail (aarch64_sve_pattern_array);
6368 info->imm.value = val;
6369 if (skip_past_comma (&str)
6370 && !parse_shift (&str, info, SHIFTED_MUL))
6371 goto failure;
6372 if (!info->shifter.operator_present)
6373 {
6374 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6375 info->shifter.kind = AARCH64_MOD_MUL;
6376 info->shifter.amount = 1;
6377 }
6378 break;
6379
6380 case AARCH64_OPND_SVE_PRFOP:
6381 po_enum_or_fail (aarch64_sve_prfop_array);
6382 info->imm.value = val;
6383 break;
6384
6385 case AARCH64_OPND_UIMM7:
6386 po_imm_or_fail (0, 127);
6387 info->imm.value = val;
6388 break;
6389
6390 case AARCH64_OPND_IDX:
6391 case AARCH64_OPND_MASK:
6392 case AARCH64_OPND_BIT_NUM:
6393 case AARCH64_OPND_IMMR:
6394 case AARCH64_OPND_IMMS:
6395 po_imm_or_fail (0, 63);
6396 info->imm.value = val;
6397 break;
6398
6399 case AARCH64_OPND_A64C_IMMV4:
6400 po_imm_nc_or_fail ();
6401 if (val != 4)
6402 {
6403 set_fatal_syntax_error (_("immediate #4 expected"));
6404 goto failure;
6405 }
6406 info->imm.value = 4;
6407 break;
6408
6409 case AARCH64_OPND_IMM0:
6410 po_imm_nc_or_fail ();
6411 if (val != 0)
6412 {
6413 set_fatal_syntax_error (_("immediate zero expected"));
6414 goto failure;
6415 }
6416 info->imm.value = 0;
6417 break;
6418
6419 case AARCH64_OPND_FPIMM0:
6420 {
6421 int qfloat;
6422 bfd_boolean res1 = FALSE, res2 = FALSE;
6423 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6424 it is probably not worth the effort to support it. */
6425 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
6426 imm_reg_type))
6427 && (error_p ()
6428 || !(res2 = parse_constant_immediate (&str, &val,
6429 imm_reg_type))))
6430 goto failure;
6431 if ((res1 && qfloat == 0) || (res2 && val == 0))
6432 {
6433 info->imm.value = 0;
6434 info->imm.is_fp = 1;
6435 break;
6436 }
6437 set_fatal_syntax_error (_("immediate zero expected"));
6438 goto failure;
6439 }
6440
6441 case AARCH64_OPND_IMM_MOV:
6442 {
6443 char *saved = str;
6444 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6445 reg_name_p (str, REG_TYPE_VN))
6446 goto failure;
6447 str = saved;
6448 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6449 GE_OPT_PREFIX, 1));
6450 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6451 later. fix_mov_imm_insn will try to determine a machine
6452 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6453 message if the immediate cannot be moved by a single
6454 instruction. */
6455 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6456 inst.base.operands[i].skip = 1;
6457 }
6458 break;
6459
6460 case AARCH64_OPND_SIMD_IMM:
6461 case AARCH64_OPND_SIMD_IMM_SFT:
6462 if (! parse_big_immediate (&str, &val, imm_reg_type))
6463 goto failure;
6464 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6465 /* addr_off_p */ 0,
6466 /* need_libopcodes_p */ 1,
6467 /* skip_p */ 1);
6468 /* Parse shift.
6469 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6470 shift, we don't check it here; we leave the checking to
6471 the libopcodes (operand_general_constraint_met_p). By
6472 doing this, we achieve better diagnostics. */
6473 if (skip_past_comma (&str)
6474 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6475 goto failure;
6476 if (!info->shifter.operator_present
6477 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6478 {
6479 /* Default to LSL if not present. Libopcodes prefers shifter
6480 kind to be explicit. */
6481 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6482 info->shifter.kind = AARCH64_MOD_LSL;
6483 }
6484 break;
6485
6486 case AARCH64_OPND_FPIMM:
6487 case AARCH64_OPND_SIMD_FPIMM:
6488 case AARCH64_OPND_SVE_FPIMM8:
6489 {
6490 int qfloat;
6491 bfd_boolean dp_p;
6492
6493 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6494 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6495 || !aarch64_imm_float_p (qfloat))
6496 {
6497 if (!error_p ())
6498 set_fatal_syntax_error (_("invalid floating-point"
6499 " constant"));
6500 goto failure;
6501 }
6502 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6503 inst.base.operands[i].imm.is_fp = 1;
6504 }
6505 break;
6506
6507 case AARCH64_OPND_SVE_I1_HALF_ONE:
6508 case AARCH64_OPND_SVE_I1_HALF_TWO:
6509 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6510 {
6511 int qfloat;
6512 bfd_boolean dp_p;
6513
6514 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6515 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6516 {
6517 if (!error_p ())
6518 set_fatal_syntax_error (_("invalid floating-point"
6519 " constant"));
6520 goto failure;
6521 }
6522 inst.base.operands[i].imm.value = qfloat;
6523 inst.base.operands[i].imm.is_fp = 1;
6524 }
6525 break;
6526
6527 case AARCH64_OPND_LIMM:
6528 po_misc_or_fail (parse_shifter_operand (&str, info,
6529 SHIFTED_LOGIC_IMM));
6530 if (info->shifter.operator_present)
6531 {
6532 set_fatal_syntax_error
6533 (_("shift not allowed for bitmask immediate"));
6534 goto failure;
6535 }
6536 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6537 /* addr_off_p */ 0,
6538 /* need_libopcodes_p */ 1,
6539 /* skip_p */ 1);
6540 break;
6541
6542 case AARCH64_OPND_A64C_IMM6_EXT:
6543 po_misc_or_fail (parse_shifter_operand_imm (&str, info,
6544 SHIFTED_ARITH_IMM));
6545
6546 if (inst.reloc.exp.X_op != O_constant)
6547 {
6548 set_fatal_syntax_error (_(inst.reloc.exp.X_op == O_big
6549 ? "immediate out of range"
6550 : "expected constant expression"));
6551 goto failure;
6552 }
6553
6554 /* Try to coerce into shifted form if the immediate is out of
6555 range. */
6556 if (inst.reloc.exp.X_add_number > 63
6557 && (inst.reloc.exp.X_add_number & 0xf) == 0
6558 && (inst.reloc.exp.X_add_number >> 4) <= 63
6559 && info->shifter.amount == 0)
6560 {
6561 info->shifter.amount = 4;
6562 info->shifter.kind = AARCH64_MOD_LSL;
6563 info->imm.value = inst.reloc.exp.X_add_number >> 4;
6564 }
6565 else
6566 info->imm.value = inst.reloc.exp.X_add_number;
6567 break;
6568
6569 case AARCH64_OPND_AIMM:
6570 case AARCH64_OPND_A64C_AIMM:
6571 if (opcode->op == OP_ADD || opcode->op == OP_A64C_ADD)
6572 /* ADD may have relocation types. */
6573 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6574 SHIFTED_ARITH_IMM));
6575 else
6576 po_misc_or_fail (parse_shifter_operand (&str, info,
6577 SHIFTED_ARITH_IMM));
6578 switch (inst.reloc.type)
6579 {
6580 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6581 info->shifter.amount = 12;
6582 break;
6583 case BFD_RELOC_UNUSED:
6584 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6585 if (info->shifter.kind != AARCH64_MOD_NONE)
6586 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6587 inst.reloc.pc_rel = 0;
6588 break;
6589 default:
6590 break;
6591 }
6592 info->imm.value = 0;
6593 if (!info->shifter.operator_present)
6594 {
6595 /* Default to LSL if not present. Libopcodes prefers shifter
6596 kind to be explicit. */
6597 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6598 info->shifter.kind = AARCH64_MOD_LSL;
6599 }
6600 break;
6601
6602 case AARCH64_OPND_HALF:
6603 {
6604 /* #<imm16> or relocation. */
6605 int internal_fixup_p;
6606 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6607 if (internal_fixup_p)
6608 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6609 skip_whitespace (str);
6610 if (skip_past_comma (&str))
6611 {
6612 /* {, LSL #<shift>} */
6613 if (! aarch64_gas_internal_fixup_p ())
6614 {
6615 set_fatal_syntax_error (_("can't mix relocation modifier "
6616 "with explicit shift"));
6617 goto failure;
6618 }
6619 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6620 }
6621 else
6622 inst.base.operands[i].shifter.amount = 0;
6623 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6624 inst.base.operands[i].imm.value = 0;
6625 if (! process_movw_reloc_info ())
6626 goto failure;
6627 }
6628 break;
6629
6630 case AARCH64_OPND_EXCEPTION:
6631 case AARCH64_OPND_UNDEFINED:
6632 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6633 imm_reg_type));
6634 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6635 /* addr_off_p */ 0,
6636 /* need_libopcodes_p */ 0,
6637 /* skip_p */ 1);
6638 break;
6639
6640 case AARCH64_OPND_NZCV:
6641 {
6642 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6643 if (nzcv != NULL)
6644 {
6645 str += 4;
6646 info->imm.value = nzcv->value;
6647 break;
6648 }
6649 po_imm_or_fail (0, 15);
6650 info->imm.value = val;
6651 }
6652 break;
6653
6654 case AARCH64_OPND_PERM:
6655 po_misc_or_fail (parse_perms (&str, info));
6656 break;
6657
6658 case AARCH64_OPND_FORM:
6659 {
6660 char *start = str;
6661 do
6662 str++;
6663 while (ISALPHA (*str));
6664 info->form = get_form_from_str (start, str - start);
6665 if (info->form == NULL)
6666 {
6667 set_syntax_error (_("invalid form"));
6668 goto failure;
6669 }
6670 }
6671 break;
6672
6673 case AARCH64_OPND_COND:
6674 case AARCH64_OPND_COND1:
6675 {
6676 char *start = str;
6677 do
6678 str++;
6679 while (ISALPHA (*str));
6680 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6681 if (info->cond == NULL)
6682 {
6683 set_syntax_error (_("invalid condition"));
6684 goto failure;
6685 }
6686 else if (operands[i] == AARCH64_OPND_COND1
6687 && (info->cond->value & 0xe) == 0xe)
6688 {
6689 /* Do not allow AL or NV. */
6690 set_default_error ();
6691 goto failure;
6692 }
6693 }
6694 break;
6695
6696 /* ADRP variants. Clear the value as operand needs to be
6697 relocated. */
6698 case AARCH64_OPND_A64C_ADDR_ADRDP:
6699 if (!AARCH64_CPU_HAS_FEATURE (cpu_variant, AARCH64_FEATURE_C64))
6700 {
6701 as_bad (_("instruction not allowed on this processor"));
6702 goto failure;
6703 }
6704 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6705 imm_reg_type));
6706 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6707 {
6708 info->imm.value = inst.reloc.exp.X_add_number;
6709 inst.reloc.type = BFD_RELOC_UNUSED;
6710 if (info->imm.value & 0xfff)
6711 goto bad_adrdp;
6712
6713 info->imm.value >>= 12;
6714 break;
6715 }
6716 bad_adrdp:
6717 set_syntax_error
6718 (_("20-bit 4K page aligned integer constant expected"));
6719 goto failure;
6720
6721 case AARCH64_OPND_ADDR_ADRP:
6722 if (AARCH64_CPU_HAS_FEATURE (cpu_variant, AARCH64_FEATURE_C64))
6723 info->imm.value = 1UL << 20;
6724 else
6725 info->imm.value = 0;
6726
6727 po_misc_or_fail (parse_adrp (&str));
6728 break;
6729
6730 case AARCH64_OPND_ADDR_PCREL14:
6731 case AARCH64_OPND_ADDR_PCREL17:
6732 case AARCH64_OPND_ADDR_PCREL19:
6733 case AARCH64_OPND_ADDR_PCREL21:
6734 case AARCH64_OPND_ADDR_PCREL26:
6735 po_misc_or_fail (parse_address (&str, info));
6736 if (!info->addr.pcrel)
6737 {
6738 set_syntax_error (_("invalid pc-relative address"));
6739 goto failure;
6740 }
6741 if (inst.gen_lit_pool
6742 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6743 {
6744 /* Only permit "=value" in the literal load instructions.
6745 The literal will be generated by programmer_friendly_fixup. */
6746 set_syntax_error (_("invalid use of \"=immediate\""));
6747 goto failure;
6748 }
6749 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6750 {
6751 set_syntax_error (_("unrecognized relocation suffix"));
6752 goto failure;
6753 }
6754 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6755 {
6756 info->imm.value = inst.reloc.exp.X_add_number;
6757 inst.reloc.type = BFD_RELOC_UNUSED;
6758 }
6759 else
6760 {
6761 bfd_boolean c64 = AARCH64_CPU_HAS_FEATURE (cpu_variant,
6762 AARCH64_FEATURE_C64);
6763
6764 info->imm.value = 0;
6765 if (inst.reloc.type == BFD_RELOC_UNUSED)
6766 switch (opcode->iclass)
6767 {
6768 case compbranch:
6769 case condbranch:
6770 /* e.g. CBZ or B.COND */
6771 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6772 inst.reloc.type = (c64 ? BFD_RELOC_MORELLO_BRANCH19
6773 : BFD_RELOC_AARCH64_BRANCH19);
6774 break;
6775 case testbranch:
6776 /* e.g. TBZ */
6777 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6778 inst.reloc.type = (c64 ? BFD_RELOC_MORELLO_TSTBR14
6779 : BFD_RELOC_AARCH64_TSTBR14);
6780 break;
6781 case branch_imm:
6782 /* e.g. B or BL */
6783 {
6784 bfd_reloc_code_real_type jump, call;
6785
6786 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6787
6788 jump = (c64 ? BFD_RELOC_MORELLO_JUMP26
6789 : BFD_RELOC_AARCH64_JUMP26);
6790 call = (c64 ? BFD_RELOC_MORELLO_CALL26
6791 : BFD_RELOC_AARCH64_CALL26);
6792
6793 inst.reloc.type = opcode->op == OP_BL ? call : jump;
6794 }
6795 break;
6796 case loadlit:
6797 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19
6798 || operands[i] == AARCH64_OPND_ADDR_PCREL17);
6799 inst.reloc.type = (operands[i] == AARCH64_OPND_ADDR_PCREL19
6800 ? BFD_RELOC_AARCH64_LD_LO19_PCREL
6801 : BFD_RELOC_MORELLO_LD_LO17_PCREL);
6802 break;
6803 case pcreladdr:
6804 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6805 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6806 break;
6807 default:
6808 gas_assert (0);
6809 abort ();
6810 }
6811 if (c64)
6812 inst.reloc.flags = FIXUP_F_C64;
6813 inst.reloc.pc_rel = 1;
6814 }
6815 break;
6816
6817 case AARCH64_OPND_ADDR_SIMPLE:
6818 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6819 {
6820 /* [<Xn|SP>{, #<simm>}] */
6821 char *start = str;
6822 /* First use the normal address-parsing routines, to get
6823 the usual syntax errors. */
6824 po_misc_or_fail (parse_address (&str, info));
6825 if (info->addr.pcrel || info->addr.offset.is_reg
6826 || !info->addr.preind || info->addr.postind
6827 || info->addr.writeback)
6828 {
6829 set_syntax_error (_("invalid addressing mode"));
6830 goto failure;
6831 }
6832
6833 /* Then retry, matching the specific syntax of these addresses. */
6834 str = start;
6835 po_char_or_fail ('[');
6836 po_reg_or_fail (AARCH64_CPU_HAS_FEATURE (cpu_variant,
6837 AARCH64_FEATURE_C64)
6838 ? REG_TYPE_CA_N_SP : REG_TYPE_R64_SP);
6839
6840 /* Accept optional ", #0". */
6841 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6842 && skip_past_char (&str, ','))
6843 {
6844 skip_past_char (&str, '#');
6845 if (! skip_past_char (&str, '0'))
6846 {
6847 set_fatal_syntax_error
6848 (_("the optional immediate offset can only be 0"));
6849 goto failure;
6850 }
6851 }
6852 po_char_or_fail (']');
6853 break;
6854 }
6855
6856 case AARCH64_OPND_CAPADDR_REGOFF:
6857 po_misc_or_fail (parse_cap_address (&str, info, opcode->iclass));
6858 goto regoff_addr;
6859
6860 case AARCH64_OPND_ADDR_REGOFF:
6861 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6862 po_misc_or_fail (parse_address (&str, info));
6863 regoff_addr:
6864 if (info->addr.pcrel || !info->addr.offset.is_reg
6865 || !info->addr.preind || info->addr.postind
6866 || info->addr.writeback)
6867 {
6868 set_syntax_error (_("invalid addressing mode"));
6869 goto failure;
6870 }
6871 if (!info->shifter.operator_present)
6872 {
6873 /* Default to LSL if not present. Libopcodes prefers shifter
6874 kind to be explicit. */
6875 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6876 info->shifter.kind = AARCH64_MOD_LSL;
6877 }
6878 /* Qualifier to be deduced by libopcodes. */
6879 break;
6880
6881 case AARCH64_OPND_CAPADDR_SIMPLE:
6882 case AARCH64_OPND_CAPADDR_SIMM7:
6883 {
6884 /* A little hack to prevent the address parser from trying to
6885 pretend that a BLR with a register may be a BLR with an
6886 address. It fails the addressing mode test below, but still
6887 ends up adding a symbol with the name of the register. */
6888 char *start = str;
6889 po_char_or_fail ('[');
6890 str = start;
6891
6892 po_misc_or_fail (parse_cap_address (&str, info, opcode->iclass));
6893 if (info->addr.pcrel || info->addr.offset.is_reg
6894 || (!info->addr.preind && !info->addr.postind)
6895 || info->addr.writeback)
6896 {
6897 set_syntax_error (_("invalid addressing mode"));
6898 goto failure;
6899 }
6900 if (inst.reloc.type != BFD_RELOC_UNUSED)
6901 {
6902 set_syntax_error (_("relocation not allowed"));
6903 goto failure;
6904 }
6905 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6906 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6907 else
6908 {
6909 set_syntax_error (_("Invalid offset constant"));
6910 goto failure;
6911 }
6912 if (info->type == AARCH64_OPND_CAPADDR_SIMPLE
6913 && info->addr.offset.imm != 0)
6914 {
6915 set_syntax_error (_("non-zero offset not allowed"));
6916 goto failure;
6917 }
6918 break;
6919 }
6920
6921 case AARCH64_OPND_A64C_ADDR_SIMM7:
6922 case AARCH64_OPND_ADDR_SIMM7:
6923 po_misc_or_fail (parse_address (&str, info));
6924 if (info->addr.pcrel || info->addr.offset.is_reg
6925 || (!info->addr.preind && !info->addr.postind))
6926 {
6927 set_syntax_error (_("invalid addressing mode"));
6928 goto failure;
6929 }
6930 if (inst.reloc.type != BFD_RELOC_UNUSED)
6931 {
6932 set_syntax_error (_("relocation not allowed"));
6933 goto failure;
6934 }
6935 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6936 /* addr_off_p */ 1,
6937 /* need_libopcodes_p */ 1,
6938 /* skip_p */ 0);
6939 break;
6940
6941 case AARCH64_OPND_CAPADDR_SIMM9:
6942 po_misc_or_fail (parse_cap_address (&str, info, opcode->iclass));
6943 goto addr_simm;
6944
6945 case AARCH64_OPND_A64C_ADDR_SIMM9:
6946 case AARCH64_OPND_ADDR_SIMM9:
6947 case AARCH64_OPND_ADDR_SIMM9_2:
6948 case AARCH64_OPND_ADDR_SIMM11:
6949 case AARCH64_OPND_ADDR_SIMM13:
6950 po_misc_or_fail (parse_address (&str, info));
6951 addr_simm:
6952 if (info->addr.pcrel || info->addr.offset.is_reg
6953 || (!info->addr.preind && !info->addr.postind)
6954 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6955 && info->addr.writeback))
6956 {
6957 set_syntax_error (_("invalid addressing mode"));
6958 goto failure;
6959 }
6960 if (inst.reloc.type != BFD_RELOC_UNUSED)
6961 {
6962 set_syntax_error (_("relocation not allowed"));
6963 goto failure;
6964 }
6965 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6966 /* addr_off_p */ 1,
6967 /* need_libopcodes_p */ 1,
6968 /* skip_p */ 0);
6969 break;
6970
6971 case AARCH64_OPND_ADDR_SIMM10:
6972 case AARCH64_OPND_ADDR_OFFSET:
6973 po_misc_or_fail (parse_address (&str, info));
6974 if (info->addr.pcrel || info->addr.offset.is_reg
6975 || !info->addr.preind || info->addr.postind)
6976 {
6977 set_syntax_error (_("invalid addressing mode"));
6978 goto failure;
6979 }
6980 if (inst.reloc.type != BFD_RELOC_UNUSED)
6981 {
6982 set_syntax_error (_("relocation not allowed"));
6983 goto failure;
6984 }
6985 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6986 /* addr_off_p */ 1,
6987 /* need_libopcodes_p */ 1,
6988 /* skip_p */ 0);
6989 break;
6990
6991 case AARCH64_OPND_CAPADDR_UIMM9:
6992 po_misc_or_fail (parse_cap_address (&str, info, opcode->iclass));
6993 goto addr_uimm;
6994
6995 case AARCH64_OPND_ADDR_UIMM12:
6996 po_misc_or_fail (parse_address (&str, info));
6997 addr_uimm:
6998 if (info->addr.pcrel || info->addr.offset.is_reg
6999 || !info->addr.preind || info->addr.writeback)
7000 {
7001 set_syntax_error (_("invalid addressing mode"));
7002 goto failure;
7003 }
7004 if (inst.reloc.type == BFD_RELOC_UNUSED)
7005 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7006 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7007 || (inst.reloc.type
7008 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7009 || (inst.reloc.type
7010 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7011 || (inst.reloc.type
7012 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7013 || (inst.reloc.type
7014 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7015 {
7016 /* The altbase ldrb instruction does not have enough range to
7017 accommodate a LO12 relocation. */
7018 if (opcode->flags & F_NOSHIFT && opcode->iclass == ldst_altbase)
7019 {
7020 set_syntax_error (_("relocation not allowed"));
7021 goto failure;
7022 }
7023
7024 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7025 }
7026 else if ((inst.reloc.type == BFD_RELOC_AARCH64_LD_GOT_LO12_NC
7027 || inst.reloc.type == BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC)
7028 && inst.base.operands[0].qualifier == AARCH64_OPND_QLF_CA)
7029 inst.reloc.flags = FIXUP_F_C64;
7030
7031 /* Leave qualifier to be determined by libopcodes. */
7032 break;
7033
7034 case AARCH64_OPND_SIMD_ADDR_POST:
7035 /* [<Xn|SP>], <Xm|#<amount>> */
7036 po_misc_or_fail (parse_address (&str, info));
7037 if (!info->addr.postind || !info->addr.writeback)
7038 {
7039 set_syntax_error (_("invalid addressing mode"));
7040 goto failure;
7041 }
7042 if (!info->addr.offset.is_reg)
7043 {
7044 if (inst.reloc.exp.X_op == O_constant)
7045 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7046 else
7047 {
7048 set_fatal_syntax_error
7049 (_("writeback value must be an immediate constant"));
7050 goto failure;
7051 }
7052 }
7053 /* No qualifier. */
7054 break;
7055
7056 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7057 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7058 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7059 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7060 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7061 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7062 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7063 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7064 case AARCH64_OPND_SVE_ADDR_RI_U6:
7065 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7066 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7067 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7068 /* [X<n>{, #imm, MUL VL}]
7069 [X<n>{, #imm}]
7070 but recognizing SVE registers. */
7071 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7072 &offset_qualifier));
7073 if (base_qualifier != AARCH64_OPND_QLF_X)
7074 {
7075 set_syntax_error (_("invalid addressing mode"));
7076 goto failure;
7077 }
7078 sve_regimm:
7079 if (info->addr.pcrel || info->addr.offset.is_reg
7080 || !info->addr.preind || info->addr.writeback)
7081 {
7082 set_syntax_error (_("invalid addressing mode"));
7083 goto failure;
7084 }
7085 if (inst.reloc.type != BFD_RELOC_UNUSED
7086 || inst.reloc.exp.X_op != O_constant)
7087 {
7088 /* Make sure this has priority over
7089 "invalid addressing mode". */
7090 set_fatal_syntax_error (_("constant offset required"));
7091 goto failure;
7092 }
7093 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7094 break;
7095
7096 case AARCH64_OPND_SVE_ADDR_R:
7097 /* [<Xn|SP>{, <R><m>}]
7098 but recognizing SVE registers. */
7099 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7100 &offset_qualifier));
7101 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7102 {
7103 offset_qualifier = AARCH64_OPND_QLF_X;
7104 info->addr.offset.is_reg = 1;
7105 info->addr.offset.regno = 31;
7106 }
7107 else if (base_qualifier != AARCH64_OPND_QLF_X
7108 || offset_qualifier != AARCH64_OPND_QLF_X)
7109 {
7110 set_syntax_error (_("invalid addressing mode"));
7111 goto failure;
7112 }
7113 goto regoff_addr;
7114
7115 case AARCH64_OPND_SVE_ADDR_RR:
7116 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7117 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7118 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7119 case AARCH64_OPND_SVE_ADDR_RX:
7120 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7121 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7122 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7123 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7124 but recognizing SVE registers. */
7125 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7126 &offset_qualifier));
7127 if (base_qualifier != AARCH64_OPND_QLF_X
7128 || offset_qualifier != AARCH64_OPND_QLF_X)
7129 {
7130 set_syntax_error (_("invalid addressing mode"));
7131 goto failure;
7132 }
7133 goto regoff_addr;
7134
7135 case AARCH64_OPND_SVE_ADDR_RZ:
7136 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7137 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7138 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7139 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7140 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7141 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7142 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7143 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7144 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7145 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7146 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7147 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7148 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7149 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7150 &offset_qualifier));
7151 if (base_qualifier != AARCH64_OPND_QLF_X
7152 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7153 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7154 {
7155 set_syntax_error (_("invalid addressing mode"));
7156 goto failure;
7157 }
7158 info->qualifier = offset_qualifier;
7159 goto regoff_addr;
7160
7161 case AARCH64_OPND_SVE_ADDR_ZX:
7162 /* [Zn.<T>{, <Xm>}]. */
7163 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7164 &offset_qualifier));
7165 /* Things to check:
7166 base_qualifier either S_S or S_D
7167 offset_qualifier must be X
7168 */
7169 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7170 && base_qualifier != AARCH64_OPND_QLF_S_D)
7171 || offset_qualifier != AARCH64_OPND_QLF_X)
7172 {
7173 set_syntax_error (_("invalid addressing mode"));
7174 goto failure;
7175 }
7176 info->qualifier = base_qualifier;
7177 if (!info->addr.offset.is_reg || info->addr.pcrel
7178 || !info->addr.preind || info->addr.writeback
7179 || info->shifter.operator_present != 0)
7180 {
7181 set_syntax_error (_("invalid addressing mode"));
7182 goto failure;
7183 }
7184 info->shifter.kind = AARCH64_MOD_LSL;
7185 break;
7186
7187
7188 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7189 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7190 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7191 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7192 /* [Z<n>.<T>{, #imm}] */
7193 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7194 &offset_qualifier));
7195 if (base_qualifier != AARCH64_OPND_QLF_S_S
7196 && base_qualifier != AARCH64_OPND_QLF_S_D)
7197 {
7198 set_syntax_error (_("invalid addressing mode"));
7199 goto failure;
7200 }
7201 info->qualifier = base_qualifier;
7202 goto sve_regimm;
7203
7204 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7205 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7206 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7207 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7208 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7209
7210 We don't reject:
7211
7212 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7213
7214 here since we get better error messages by leaving it to
7215 the qualifier checking routines. */
7216 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7217 &offset_qualifier));
7218 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7219 && base_qualifier != AARCH64_OPND_QLF_S_D)
7220 || offset_qualifier != base_qualifier)
7221 {
7222 set_syntax_error (_("invalid addressing mode"));
7223 goto failure;
7224 }
7225 info->qualifier = base_qualifier;
7226 goto regoff_addr;
7227
7228 case AARCH64_OPND_SYSREG:
7229 {
7230 uint32_t sysreg_flags;
7231
7232 if ((val = parse_sys_reg (opcode, &str, aarch64_sys_regs_hsh, 1, 0,
7233 &sysreg_flags)) == PARSE_FAIL)
7234 {
7235 set_syntax_error (_("unknown or missing system register name"));
7236 goto failure;
7237 }
7238 inst.base.operands[i].sysreg.value = val;
7239 inst.base.operands[i].sysreg.flags = sysreg_flags;
7240 break;
7241 }
7242
7243 case AARCH64_OPND_PSTATEFIELD:
7244 if ((val = parse_sys_reg (opcode, &str, aarch64_pstatefield_hsh, 0, 1,
7245 NULL))
7246 == PARSE_FAIL)
7247 {
7248 set_syntax_error (_("unknown or missing PSTATE field name"));
7249 goto failure;
7250 }
7251 inst.base.operands[i].pstatefield = val;
7252 break;
7253
7254 case AARCH64_OPND_SYSREG_IC:
7255 inst.base.operands[i].sysins_op =
7256 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7257 goto sys_reg_ins;
7258
7259 case AARCH64_OPND_SYSREG_DC:
7260 inst.base.operands[i].sysins_op =
7261 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7262 goto sys_reg_ins;
7263
7264 case AARCH64_OPND_SYSREG_AT:
7265 inst.base.operands[i].sysins_op =
7266 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7267 goto sys_reg_ins;
7268
7269 case AARCH64_OPND_SYSREG_SR:
7270 inst.base.operands[i].sysins_op =
7271 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7272 goto sys_reg_ins;
7273
7274 case AARCH64_OPND_SYSREG_TLBI:
7275 inst.base.operands[i].sysins_op =
7276 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7277 sys_reg_ins:
7278 if (inst.base.operands[i].sysins_op == NULL)
7279 {
7280 set_fatal_syntax_error ( _("unknown or missing operation name"));
7281 goto failure;
7282 }
7283 break;
7284
7285 case AARCH64_OPND_BARRIER:
7286 case AARCH64_OPND_BARRIER_ISB:
7287 val = parse_barrier (&str);
7288 if (val != PARSE_FAIL
7289 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7290 {
7291 /* ISB only accepts options name 'sy'. */
7292 set_syntax_error
7293 (_("the specified option is not accepted in ISB"));
7294 /* Turn off backtrack as this optional operand is present. */
7295 backtrack_pos = 0;
7296 goto failure;
7297 }
7298 /* This is an extension to accept a 0..15 immediate. */
7299 if (val == PARSE_FAIL)
7300 po_imm_or_fail (0, 15);
7301 info->barrier = aarch64_barrier_options + val;
7302 break;
7303
7304 case AARCH64_OPND_PRFOP:
7305 val = parse_pldop (&str);
7306 /* This is an extension to accept a 0..31 immediate. */
7307 if (val == PARSE_FAIL)
7308 po_imm_or_fail (0, 31);
7309 inst.base.operands[i].prfop = aarch64_prfops + val;
7310 break;
7311
7312 case AARCH64_OPND_BARRIER_PSB:
7313 val = parse_barrier_psb (&str, &(info->hint_option));
7314 if (val == PARSE_FAIL)
7315 goto failure;
7316 break;
7317
7318 case AARCH64_OPND_BTI_TARGET:
7319 val = parse_bti_operand (&str, &(info->hint_option));
7320 if (val == PARSE_FAIL)
7321 goto failure;
7322 break;
7323
7324 default:
7325 as_fatal (_("unhandled operand code %d"), operands[i]);
7326 }
7327
7328 /* If we get here, this operand was successfully parsed. */
7329 inst.base.operands[i].present = 1;
7330 continue;
7331
7332 failure:
7333 /* The parse routine should already have set the error, but in case
7334 not, set a default one here. */
7335 if (! error_p ())
7336 set_default_error ();
7337
7338 if (! backtrack_pos)
7339 goto parse_operands_return;
7340
7341 {
7342 /* We reach here because this operand is marked as optional, and
7343 either no operand was supplied or the operand was supplied but it
7344 was syntactically incorrect. In the latter case we report an
7345 error. In the former case we perform a few more checks before
7346 dropping through to the code to insert the default operand. */
7347
7348 char *tmp = backtrack_pos;
7349 char endchar = END_OF_INSN;
7350
7351 if (i != (aarch64_num_of_operands (opcode) - 1))
7352 endchar = ',';
7353 skip_past_char (&tmp, ',');
7354
7355 if (*tmp != endchar)
7356 /* The user has supplied an operand in the wrong format. */
7357 goto parse_operands_return;
7358
7359 /* Make sure there is not a comma before the optional operand.
7360 For example the fifth operand of 'sys' is optional:
7361
7362 sys #0,c0,c0,#0, <--- wrong
7363 sys #0,c0,c0,#0 <--- correct. */
7364 if (comma_skipped_p && i && endchar == END_OF_INSN)
7365 {
7366 set_fatal_syntax_error
7367 (_("unexpected comma before the omitted optional operand"));
7368 goto parse_operands_return;
7369 }
7370 }
7371
7372 /* Reaching here means we are dealing with an optional operand that is
7373 omitted from the assembly line. */
7374 gas_assert (optional_operand_p (opcode, i));
7375 info->present = 0;
7376 process_omitted_operand (operands[i], opcode, i, info);
7377
7378 /* Try again, skipping the optional operand at backtrack_pos. */
7379 str = backtrack_pos;
7380 backtrack_pos = 0;
7381
7382 /* Clear any error record after the omitted optional operand has been
7383 successfully handled. */
7384 clear_error ();
7385 }
7386
7387 /* Check if we have parsed all the operands. */
7388 if (*str != '\0' && ! error_p ())
7389 {
7390 /* Set I to the index of the last present operand; this is
7391 for the purpose of diagnostics. */
7392 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7393 ;
7394 set_fatal_syntax_error
7395 (_("unexpected characters following instruction"));
7396 }
7397
7398 parse_operands_return:
7399
7400 if (error_p ())
7401 {
7402 DEBUG_TRACE ("parsing FAIL: %s - %s",
7403 operand_mismatch_kind_names[get_error_kind ()],
7404 get_error_message ());
7405 /* Record the operand error properly; this is useful when there
7406 are multiple instruction templates for a mnemonic name, so that
7407 later on, we can select the error that most closely describes
7408 the problem. */
7409 record_operand_error (opcode, i, get_error_kind (),
7410 get_error_message ());
7411 return FALSE;
7412 }
7413 else
7414 {
7415 DEBUG_TRACE ("parsing SUCCESS");
7416 return TRUE;
7417 }
7418 }
7419
7420 /* It does some fix-up to provide some programmer friendly feature while
7421 keeping the libopcodes happy, i.e. libopcodes only accepts
7422 the preferred architectural syntax.
7423 Return FALSE if there is any failure; otherwise return TRUE. */
7424
7425 static bfd_boolean
7426 programmer_friendly_fixup (aarch64_instruction *instr)
7427 {
7428 aarch64_inst *base = &instr->base;
7429 const aarch64_opcode *opcode = base->opcode;
7430 enum aarch64_op op = opcode->op;
7431 aarch64_opnd_info *operands = base->operands;
7432
7433 DEBUG_TRACE ("enter");
7434
7435 switch (opcode->iclass)
7436 {
7437 case testbranch:
7438 /* TBNZ Xn|Wn, #uimm6, label
7439 Test and Branch Not Zero: conditionally jumps to label if bit number
7440 uimm6 in register Xn is not zero. The bit number implies the width of
7441 the register, which may be written and should be disassembled as Wn if
7442 uimm is less than 32. */
7443 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7444 {
7445 if (operands[1].imm.value >= 32)
7446 {
7447 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7448 0, 31);
7449 return FALSE;
7450 }
7451 operands[0].qualifier = AARCH64_OPND_QLF_X;
7452 }
7453 break;
7454 case loadlit:
7455 /* LDR Wt, label | =value
7456 As a convenience assemblers will typically permit the notation
7457 "=value" in conjunction with the pc-relative literal load instructions
7458 to automatically place an immediate value or symbolic address in a
7459 nearby literal pool and generate a hidden label which references it.
7460 ISREG has been set to 0 in the case of =value. */
7461 if (instr->gen_lit_pool
7462 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT
7463 || op == OP_LDR_LIT_2))
7464 {
7465 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7466 if (op == OP_LDRSW_LIT)
7467 size = 4;
7468 if (instr->reloc.exp.X_op != O_constant
7469 && instr->reloc.exp.X_op != O_big
7470 && instr->reloc.exp.X_op != O_symbol)
7471 {
7472 record_operand_error (opcode, 1,
7473 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7474 _("constant expression expected"));
7475 return FALSE;
7476 }
7477 if (! add_to_lit_pool (&instr->reloc.exp, size))
7478 {
7479 record_operand_error (opcode, 1,
7480 AARCH64_OPDE_OTHER_ERROR,
7481 _("literal pool insertion failed"));
7482 return FALSE;
7483 }
7484 }
7485 break;
7486 case log_shift:
7487 case bitfield:
7488 /* UXT[BHW] Wd, Wn
7489 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7490 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7491 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7492 A programmer-friendly assembler should accept a destination Xd in
7493 place of Wd, however that is not the preferred form for disassembly.
7494 */
7495 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7496 && operands[1].qualifier == AARCH64_OPND_QLF_W
7497 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7498 operands[0].qualifier = AARCH64_OPND_QLF_W;
7499 break;
7500
7501 case addsub_ext:
7502 {
7503 /* In the 64-bit form, the final register operand is written as Wm
7504 for all but the (possibly omitted) UXTX/LSL and SXTX
7505 operators.
7506 As a programmer-friendly assembler, we accept e.g.
7507 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7508 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7509 int idx = aarch64_operand_index (opcode->operands,
7510 AARCH64_OPND_Rm_EXT);
7511 gas_assert (idx == 1 || idx == 2);
7512 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7513 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7514 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7515 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7516 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7517 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7518 }
7519 break;
7520
7521 default:
7522 break;
7523 }
7524
7525 DEBUG_TRACE ("exit with SUCCESS");
7526 return TRUE;
7527 }
7528
7529 /* Check for loads and stores that will cause unpredictable behavior. */
7530
7531 static void
7532 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7533 {
7534 aarch64_inst *base = &instr->base;
7535 const aarch64_opcode *opcode = base->opcode;
7536 const aarch64_opnd_info *opnds = base->operands;
7537 switch (opcode->iclass)
7538 {
7539 case ldst_pos:
7540 case ldst_imm9:
7541 case ldst_imm10:
7542 case ldst_unscaled:
7543 case ldst_unpriv:
7544 /* Loading/storing the base register is unpredictable if writeback. */
7545 if ((aarch64_get_operand_class (opnds[0].type)
7546 == AARCH64_OPND_CLASS_INT_REG)
7547 && opnds[0].reg.regno == opnds[1].addr.base_regno
7548 && opnds[1].addr.base_regno != REG_SP
7549 /* Exempt STG/STZG/ST2G/STZ2G. */
7550 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7551 && opnds[1].addr.writeback)
7552 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7553 break;
7554
7555 case ldstpair_off:
7556 case ldstnapair_offs:
7557 case ldstpair_indexed:
7558 /* Loading/storing the base register is unpredictable if writeback. */
7559 if ((aarch64_get_operand_class (opnds[0].type)
7560 == AARCH64_OPND_CLASS_INT_REG)
7561 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7562 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7563 && opnds[2].addr.base_regno != REG_SP
7564 /* Exempt STGP. */
7565 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7566 && opnds[2].addr.writeback)
7567 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7568 /* Load operations must load different registers. */
7569 if ((opcode->opcode & (1 << 22))
7570 && opnds[0].reg.regno == opnds[1].reg.regno)
7571 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7572 break;
7573
7574 case ldstexcl:
7575 /* It is unpredictable if the destination and status registers are the
7576 same. */
7577 if ((aarch64_get_operand_class (opnds[0].type)
7578 == AARCH64_OPND_CLASS_INT_REG)
7579 && (aarch64_get_operand_class (opnds[1].type)
7580 == AARCH64_OPND_CLASS_INT_REG
7581 || (aarch64_get_operand_class (opnds[1].type)
7582 == AARCH64_OPND_CLASS_CAP_REG))
7583 && (opnds[0].reg.regno == opnds[1].reg.regno
7584 || opnds[0].reg.regno == opnds[2].reg.regno))
7585 as_warn (_("unpredictable: identical transfer and status registers"
7586 " --`%s'"),
7587 str);
7588
7589 break;
7590
7591 default:
7592 break;
7593 }
7594 }
7595
7596 static void
7597 force_automatic_sequence_close (void)
7598 {
7599 if (now_instr_sequence.instr)
7600 {
7601 as_warn (_("previous `%s' sequence has not been closed"),
7602 now_instr_sequence.instr->opcode->name);
7603 init_insn_sequence (NULL, &now_instr_sequence);
7604 }
7605 }
7606
7607 /* A wrapper function to interface with libopcodes on encoding and
7608 record the error message if there is any.
7609
7610 Return TRUE on success; otherwise return FALSE. */
7611
7612 static bfd_boolean
7613 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7614 aarch64_insn *code)
7615 {
7616 aarch64_operand_error error_info;
7617 memset (&error_info, '\0', sizeof (error_info));
7618 error_info.kind = AARCH64_OPDE_NIL;
7619 if (aarch64_opcode_encode (cpu_variant, opcode, instr, code, NULL,
7620 &error_info, insn_sequence)
7621 && !error_info.non_fatal)
7622 return TRUE;
7623
7624 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7625 record_operand_error_info (opcode, &error_info);
7626 return error_info.non_fatal;
7627 }
7628
7629 #ifdef DEBUG_AARCH64
7630 static inline void
7631 dump_opcode_operands (const aarch64_opcode *opcode)
7632 {
7633 int i = 0;
7634 while (opcode->operands[i] != AARCH64_OPND_NIL)
7635 {
7636 aarch64_verbose ("\t\t opnd%d: %s", i,
7637 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7638 ? aarch64_get_operand_name (opcode->operands[i])
7639 : aarch64_get_operand_desc (opcode->operands[i]));
7640 ++i;
7641 }
7642 }
7643 #endif /* DEBUG_AARCH64 */
7644
7645 /* With the introduction of Morello, some CORE_INSNs are no longer
7646 valid if IS_C64 is true. It is important that such instructions
7647 are no longer treated as core in such contexts and are
7648 disconsidered, rather being treated as belonging to any other
7649 unavailable architectural extension. Likewise, reject purecap-specific
7650 instructions when assembling for hybrid (or any other) tartgets. */
7651
7652 static bfd_boolean
7653 validate_opcode_for_feature (const aarch64_opcode *opcode,
7654 aarch64_feature_set features)
7655 {
7656 /* If opcode is memory-related, Ensure this CPU does not impose any
7657 restriction on allowed operands. */
7658 if (opcode->flags & F_NONC64
7659 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_C64))
7660 {
7661 set_default_error ();
7662 return FALSE;
7663 }
7664 /* Reject purecap-specific instructions when assembling for any other
7665 target. */
7666 if (opcode->flags & F_C64ONLY
7667 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_C64)))
7668 {
7669 set_default_error ();
7670 return FALSE;
7671 }
7672 return TRUE;
7673 }
7674
7675 /* This is the guts of the machine-dependent assembler. STR points to a
7676 machine dependent instruction. This function is supposed to emit
7677 the frags/bytes it assembles to. */
7678
7679 void
7680 md_assemble (char *str)
7681 {
7682 char *p = str;
7683 templates *template;
7684 aarch64_opcode *opcode;
7685 aarch64_inst *inst_base;
7686 unsigned saved_cond;
7687
7688 /* Align the previous label if needed. */
7689 if (last_label_seen != NULL)
7690 {
7691 symbol_set_frag (last_label_seen, frag_now);
7692 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7693 S_SET_SEGMENT (last_label_seen, now_seg);
7694 }
7695
7696 /* Update the current insn_sequence from the segment. */
7697 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7698
7699 inst.reloc.type = BFD_RELOC_UNUSED;
7700
7701 DEBUG_TRACE ("\n\n");
7702 DEBUG_TRACE ("==============================");
7703 DEBUG_TRACE ("Enter md_assemble with %s", str);
7704
7705 template = opcode_lookup (&p);
7706 if (!template)
7707 {
7708 /* It wasn't an instruction, but it might be a register alias of
7709 the form alias .req reg directive. */
7710 if (!create_register_alias (str, p))
7711 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7712 str);
7713 return;
7714 }
7715
7716 skip_whitespace (p);
7717 if (*p == ',')
7718 {
7719 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7720 get_mnemonic_name (str), str);
7721 return;
7722 }
7723
7724 init_operand_error_report ();
7725
7726 /* Sections are assumed to start aligned. In executable section, there is no
7727 MAP_DATA symbol pending. So we only align the address during
7728 MAP_DATA --> MAP_CUR_INSN transition.
7729 For other sections, this is not guaranteed. */
7730 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7731 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7732 frag_align_code (2, 0);
7733
7734 saved_cond = inst.cond;
7735 reset_aarch64_instruction (&inst);
7736 inst.cond = saved_cond;
7737
7738 /* Iterate through all opcode entries with the same mnemonic name. */
7739 do
7740 {
7741 opcode = template->opcode;
7742
7743 DEBUG_TRACE ("opcode %s found", opcode->name);
7744 #ifdef DEBUG_AARCH64
7745 if (debug_dump)
7746 dump_opcode_operands (opcode);
7747 #endif /* DEBUG_AARCH64 */
7748
7749 mapping_state (MAP_CUR_INSN);
7750
7751 inst_base = &inst.base;
7752 inst_base->opcode = opcode;
7753
7754 /* Truly conditionally executed instructions, e.g. b.cond. */
7755 if (opcode->flags & F_COND)
7756 {
7757 gas_assert (inst.cond != COND_ALWAYS);
7758 inst_base->cond = get_cond_from_value (inst.cond);
7759 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7760 }
7761 else if (inst.cond != COND_ALWAYS)
7762 {
7763 /* It shouldn't arrive here, where the assembly looks like a
7764 conditional instruction but the found opcode is unconditional. */
7765 gas_assert (0);
7766 continue;
7767 }
7768
7769 if (validate_opcode_for_feature (opcode, cpu_variant)
7770 && parse_operands (p, opcode)
7771 && programmer_friendly_fixup (&inst)
7772 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7773 {
7774 /* Check that this instruction is supported for this CPU. */
7775 if (!opcode->avariant
7776 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7777 {
7778 as_bad (_("selected processor does not support `%s'"), str);
7779 return;
7780 }
7781
7782 warn_unpredictable_ldst (&inst, str);
7783
7784 if (inst.reloc.type == BFD_RELOC_UNUSED
7785 || !inst.reloc.need_libopcodes_p)
7786 output_inst (NULL);
7787 else
7788 {
7789 /* If there is relocation generated for the instruction,
7790 store the instruction information for the future fix-up. */
7791 struct aarch64_inst *copy;
7792 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7793 copy = XNEW (struct aarch64_inst);
7794 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7795 output_inst (copy);
7796 }
7797
7798 /* Issue non-fatal messages if any. */
7799 output_operand_error_report (str, TRUE);
7800 return;
7801 }
7802
7803 template = template->next;
7804 if (template != NULL)
7805 {
7806 reset_aarch64_instruction (&inst);
7807 inst.cond = saved_cond;
7808 }
7809 }
7810 while (template != NULL);
7811
7812 /* Issue the error messages if any. */
7813 output_operand_error_report (str, FALSE);
7814 }
7815
7816 /* Various frobbings of labels and their addresses. */
7817
7818 void
7819 aarch64_start_line_hook (void)
7820 {
7821 last_label_seen = NULL;
7822 }
7823
7824 void
7825 aarch64_frob_label (symbolS * sym)
7826 {
7827 last_label_seen = sym;
7828
7829 AARCH64_SET_C64 (sym, IS_C64);
7830 if (AARCH64_IS_C64 (sym) && S_IS_FUNCTION (sym))
7831 {
7832 gas_assert ((*symbol_X_add_number (sym) & 1) == 0);
7833 *symbol_X_add_number (sym) += 1;
7834 }
7835
7836 dwarf2_emit_label (sym);
7837 }
7838
7839 void
7840 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7841 {
7842 /* Check to see if we have a block to close. */
7843 force_automatic_sequence_close ();
7844 }
7845
7846 int
7847 aarch64_data_in_code (void)
7848 {
7849 if (!strncmp (input_line_pointer + 1, "data:", 5))
7850 {
7851 *input_line_pointer = '/';
7852 input_line_pointer += 5;
7853 *input_line_pointer = 0;
7854 return 1;
7855 }
7856
7857 return 0;
7858 }
7859
7860 char *
7861 aarch64_canonicalize_symbol_name (char *name)
7862 {
7863 int len;
7864
7865 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7866 *(name + len - 5) = 0;
7867
7868 return name;
7869 }
7870 \f
7871 /* Table of all register names defined by default. The user can
7872 define additional names with .req. Note that all register names
7873 should appear in both upper and lowercase variants. Some registers
7874 also have mixed-case names. */
7875
7876 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7877 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7878 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7879 #define REGSET16(p,t) \
7880 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7881 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7882 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7883 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7884 #define REGSET31(p,t) \
7885 REGSET16(p, t), \
7886 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7887 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7888 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7889 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7890 #define REGSET(p,t) \
7891 REGSET31(p,t), REGNUM(p,31,t)
7892
7893 /* These go into aarch64_reg_hsh hash-table. */
7894 static const reg_entry reg_names[] = {
7895 /* Integer registers. */
7896 REGSET31 (x, R_64), REGSET31 (X, R_64),
7897 REGSET31 (w, R_32), REGSET31 (W, R_32),
7898
7899 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7900 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7901 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7902 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7903 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7904 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7905
7906 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7907 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7908
7909 /* Capability Registers. */
7910 REGSET31 (c, CA_N), REGSET31 (C, CA_N),
7911 REGDEF (csp, 31, CA_SP), REGDEF (CSP, 31, CA_SP),
7912 REGDEF (czr, 31, CA_Z), REGDEF (CZR, 31, CA_Z),
7913 REGDEF (ddc, 33, CA_D), REGDEF (DDC, 33, CA_D),
7914 REGDEF_ALIAS (clr, 30, CA_N), REGDEF_ALIAS (CLR, 30, CA_N),
7915
7916 /* Floating-point single precision registers. */
7917 REGSET (s, FP_S), REGSET (S, FP_S),
7918
7919 /* Floating-point double precision registers. */
7920 REGSET (d, FP_D), REGSET (D, FP_D),
7921
7922 /* Floating-point half precision registers. */
7923 REGSET (h, FP_H), REGSET (H, FP_H),
7924
7925 /* Floating-point byte precision registers. */
7926 REGSET (b, FP_B), REGSET (B, FP_B),
7927
7928 /* Floating-point quad precision registers. */
7929 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7930
7931 /* FP/SIMD registers. */
7932 REGSET (v, VN), REGSET (V, VN),
7933
7934 /* SVE vector registers. */
7935 REGSET (z, ZN), REGSET (Z, ZN),
7936
7937 /* SVE predicate registers. */
7938 REGSET16 (p, PN), REGSET16 (P, PN)
7939 };
7940
7941 #undef REGDEF
7942 #undef REGDEF_ALIAS
7943 #undef REGNUM
7944 #undef REGSET16
7945 #undef REGSET31
7946 #undef REGSET
7947
7948 #define N 1
7949 #define n 0
7950 #define Z 1
7951 #define z 0
7952 #define C 1
7953 #define c 0
7954 #define V 1
7955 #define v 0
7956 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7957 static const asm_nzcv nzcv_names[] = {
7958 {"nzcv", B (n, z, c, v)},
7959 {"nzcV", B (n, z, c, V)},
7960 {"nzCv", B (n, z, C, v)},
7961 {"nzCV", B (n, z, C, V)},
7962 {"nZcv", B (n, Z, c, v)},
7963 {"nZcV", B (n, Z, c, V)},
7964 {"nZCv", B (n, Z, C, v)},
7965 {"nZCV", B (n, Z, C, V)},
7966 {"Nzcv", B (N, z, c, v)},
7967 {"NzcV", B (N, z, c, V)},
7968 {"NzCv", B (N, z, C, v)},
7969 {"NzCV", B (N, z, C, V)},
7970 {"NZcv", B (N, Z, c, v)},
7971 {"NZcV", B (N, Z, c, V)},
7972 {"NZCv", B (N, Z, C, v)},
7973 {"NZCV", B (N, Z, C, V)}
7974 };
7975
7976 #undef N
7977 #undef n
7978 #undef Z
7979 #undef z
7980 #undef C
7981 #undef c
7982 #undef V
7983 #undef v
7984 #undef B
7985 \f
7986 /* MD interface: bits in the object file. */
7987
7988 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7989 for use in the a.out file, and stores them in the array pointed to by buf.
7990 This knows about the endian-ness of the target machine and does
7991 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7992 2 (short) and 4 (long) Floating numbers are put out as a series of
7993 LITTLENUMS (shorts, here at least). */
7994
7995 void
7996 md_number_to_chars (char *buf, valueT val, int n)
7997 {
7998 if (target_big_endian)
7999 number_to_chars_bigendian (buf, val, n);
8000 else
8001 number_to_chars_littleendian (buf, val, n);
8002 }
8003
8004 /* MD interface: Sections. */
8005
8006 /* Estimate the size of a frag before relaxing. Assume everything fits in
8007 4 bytes. */
8008
8009 int
8010 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8011 {
8012 fragp->fr_var = 4;
8013 return 4;
8014 }
8015
8016 /* Round up a section size to the appropriate boundary. */
8017
8018 valueT
8019 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8020 {
8021 return size;
8022 }
8023
8024 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8025 of an rs_align_code fragment.
8026
8027 Here we fill the frag with the appropriate info for padding the
8028 output stream. The resulting frag will consist of a fixed (fr_fix)
8029 and of a repeating (fr_var) part.
8030
8031 The fixed content is always emitted before the repeating content and
8032 these two parts are used as follows in constructing the output:
8033 - the fixed part will be used to align to a valid instruction word
8034 boundary, in case that we start at a misaligned address; as no
8035 executable instruction can live at the misaligned location, we
8036 simply fill with zeros;
8037 - the variable part will be used to cover the remaining padding and
8038 we fill using the AArch64 NOP instruction.
8039
8040 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8041 enough storage space for up to 3 bytes for padding the back to a valid
8042 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8043
8044 void
8045 aarch64_handle_align (fragS * fragP)
8046 {
8047 /* NOP = d503201f */
8048 /* AArch64 instructions are always little-endian. */
8049 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8050
8051 int bytes, fix, noop_size;
8052 char *p;
8053
8054 if (fragP->fr_type != rs_align_code)
8055 return;
8056
8057 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8058 p = fragP->fr_literal + fragP->fr_fix;
8059
8060 #ifdef OBJ_ELF
8061 gas_assert (fragP->tc_frag_data.recorded);
8062 #endif
8063
8064 noop_size = sizeof (aarch64_noop);
8065
8066 fix = bytes & (noop_size - 1);
8067 if (fix)
8068 {
8069 #ifdef OBJ_ELF
8070 insert_data_mapping_symbol (MAP_CUR_INSN, fragP->fr_fix, fragP, fix);
8071 #endif
8072 memset (p, 0, fix);
8073 p += fix;
8074 fragP->fr_fix += fix;
8075 }
8076
8077 if (noop_size)
8078 memcpy (p, aarch64_noop, noop_size);
8079 fragP->fr_var = noop_size;
8080 }
8081
8082 /* Perform target specific initialisation of a frag.
8083 Note - despite the name this initialisation is not done when the frag
8084 is created, but only when its type is assigned. A frag can be created
8085 and used a long time before its type is set, so beware of assuming that
8086 this initialisation is performed first. */
8087
8088 #ifndef OBJ_ELF
8089 void
8090 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8091 int max_chars ATTRIBUTE_UNUSED)
8092 {
8093 }
8094
8095 #else /* OBJ_ELF is defined. */
8096 void
8097 aarch64_init_frag (fragS * fragP, int max_chars)
8098 {
8099 /* Record a mapping symbol for alignment frags. We will delete this
8100 later if the alignment ends up empty. */
8101 if (!fragP->tc_frag_data.recorded)
8102 fragP->tc_frag_data.recorded = 1;
8103
8104 /* PR 21809: Do not set a mapping state for debug sections
8105 - it just confuses other tools. */
8106 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8107 return;
8108
8109 switch (fragP->fr_type)
8110 {
8111 case rs_align_test:
8112 case rs_fill:
8113 mapping_state_2 (MAP_DATA, max_chars);
8114 break;
8115 case rs_align:
8116 /* PR 20364: We can get alignment frags in code sections,
8117 so do not just assume that we should use the MAP_DATA state. */
8118 mapping_state_2 (subseg_text_p (now_seg) ? MAP_CUR_INSN : MAP_DATA, max_chars);
8119 break;
8120 case rs_align_code:
8121 mapping_state_2 (MAP_CUR_INSN, max_chars);
8122 break;
8123 default:
8124 break;
8125 }
8126 }
8127 \f
8128 /* Initialize the DWARF-2 unwind information for this procedure. */
8129
8130 void
8131 tc_aarch64_frame_initial_instructions (void)
8132 {
8133 if (IS_C64)
8134 {
8135 cfi_set_return_column (REG_DW_CLR);
8136 cfi_add_CFA_def_cfa (REG_DW_CSP, 0);
8137 }
8138 else
8139 cfi_add_CFA_def_cfa (REG_SP, 0);
8140 }
8141
8142
8143 /* The extra initialisation steps needed by AArch64 in alloc_fde_entry.
8144 Currently only used to initialise the key used to sign the return
8145 address. */
8146 void
8147 tc_aarch64_fde_entry_init_extra(struct fde_entry *fde)
8148 {
8149 fde->entry_extras.pauth_key = AARCH64_PAUTH_KEY_A;
8150 fde->entry_extras.c64 = IS_C64;
8151 }
8152
8153 bfd_boolean
8154 tc_aarch64_cfi_startproc_exp (const char *arg)
8155 {
8156 /* Allow purecap only for C64 functions. */
8157 if (!strcmp ("purecap", arg) && IS_C64)
8158 return TRUE;
8159
8160 return FALSE;
8161 }
8162
8163 #endif /* OBJ_ELF */
8164
8165 /* Convert REGNAME to a DWARF-2 register number. */
8166
8167 int
8168 tc_aarch64_regname_to_dw2regnum (char *regname)
8169 {
8170 const reg_entry *reg = parse_reg (&regname);
8171 if (reg == NULL)
8172 return -1;
8173
8174 switch (reg->type)
8175 {
8176 case REG_TYPE_SP_32:
8177 case REG_TYPE_SP_64:
8178 case REG_TYPE_R_32:
8179 case REG_TYPE_R_64:
8180 return reg->number;
8181
8182 case REG_TYPE_FP_B:
8183 case REG_TYPE_FP_H:
8184 case REG_TYPE_FP_S:
8185 case REG_TYPE_FP_D:
8186 case REG_TYPE_FP_Q:
8187 return reg->number + 64;
8188
8189 case REG_TYPE_CA_N:
8190 case REG_TYPE_CA_SP:
8191 case REG_TYPE_CA_D:
8192 return reg->number + 198;
8193
8194 default:
8195 break;
8196 }
8197 return -1;
8198 }
8199
8200 /* Implement DWARF2_ADDR_SIZE. */
8201
8202 int
8203 aarch64_dwarf2_addr_size (void)
8204 {
8205 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8206 if (ilp32_p)
8207 return 4;
8208 #endif
8209 return bfd_arch_bits_per_address (stdoutput) / 8;
8210 }
8211
8212 /* MD interface: Symbol and relocation handling. */
8213
8214 /* Return the address within the segment that a PC-relative fixup is
8215 relative to. For AArch64 PC-relative fixups applied to instructions
8216 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8217
8218 long
8219 md_pcrel_from_section (fixS * fixP, segT seg)
8220 {
8221 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8222
8223 /* If this is pc-relative and we are going to emit a relocation
8224 then we just want to put out any pipeline compensation that the linker
8225 will need. Otherwise we want to use the calculated base. */
8226 if (fixP->fx_pcrel
8227 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8228 || aarch64_force_relocation (fixP)))
8229 base = 0;
8230
8231 /* AArch64 should be consistent for all pc-relative relocations. */
8232 return base + AARCH64_PCREL_OFFSET;
8233 }
8234
8235 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8236 Otherwise we have no need to default values of symbols. */
8237
8238 symbolS *
8239 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8240 {
8241 #ifdef OBJ_ELF
8242 if (name[0] == '_' && name[1] == 'G'
8243 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8244 {
8245 if (!GOT_symbol)
8246 {
8247 if (symbol_find (name))
8248 as_bad (_("GOT already in the symbol table"));
8249
8250 GOT_symbol = symbol_new (name, undefined_section,
8251 &zero_address_frag, 0);
8252 }
8253
8254 return GOT_symbol;
8255 }
8256 #endif
8257
8258 return 0;
8259 }
8260
8261 /* Return non-zero if the indicated VALUE has overflowed the maximum
8262 range expressible by a unsigned number with the indicated number of
8263 BITS. */
8264
8265 static bfd_boolean
8266 unsigned_overflow (valueT value, unsigned bits)
8267 {
8268 valueT lim;
8269 if (bits >= sizeof (valueT) * 8)
8270 return FALSE;
8271 lim = (valueT) 1 << bits;
8272 return (value >= lim);
8273 }
8274
8275
8276 /* Return non-zero if the indicated VALUE has overflowed the maximum
8277 range expressible by an signed number with the indicated number of
8278 BITS. */
8279
8280 static bfd_boolean
8281 signed_overflow (offsetT value, unsigned bits)
8282 {
8283 offsetT lim;
8284 if (bits >= sizeof (offsetT) * 8)
8285 return FALSE;
8286 lim = (offsetT) 1 << (bits - 1);
8287 return (value < -lim || value >= lim);
8288 }
8289
8290 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8291 unsigned immediate offset load/store instruction, try to encode it as
8292 an unscaled, 9-bit, signed immediate offset load/store instruction.
8293 Return TRUE if it is successful; otherwise return FALSE.
8294
8295 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8296 in response to the standard LDR/STR mnemonics when the immediate offset is
8297 unambiguous, i.e. when it is negative or unaligned. */
8298
8299 static bfd_boolean
8300 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8301 {
8302 int idx;
8303 enum aarch64_op new_op;
8304 const aarch64_opcode *new_opcode;
8305 enum aarch64_opnd target;
8306
8307 gas_assert (instr->opcode->iclass == ldst_pos
8308 || instr->opcode->iclass == ldst_altbase);
8309
8310 target = (instr->opcode->iclass == ldst_pos ? AARCH64_OPND_ADDR_SIMM9
8311 : AARCH64_OPND_CAPADDR_SIMM9);
8312
8313 switch (instr->opcode->op)
8314 {
8315 case OP_LDRB_POS:new_op = OP_LDURB; break;
8316 case OP_STRB_POS: new_op = OP_STURB; break;
8317 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8318 case OP_LDRH_POS: new_op = OP_LDURH; break;
8319 case OP_LDRH_POS_A: new_op = OP_LDURH_A; break;
8320
8321 case OP_STRH_POS: new_op = OP_STURH; break;
8322 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8323 case OP_LDR_POS: new_op = OP_LDUR; break;
8324 case OP_STR_POS: new_op = OP_STUR; break;
8325 case OP_LDRF_POS: new_op = OP_LDURV; break;
8326 case OP_STRF_POS: new_op = OP_STURV; break;
8327 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8328 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8329 case OP_LDR_POS_C: new_op = OP_LDUR_C; break;
8330 case OP_STR_POS_C: new_op = OP_STUR_C; break;
8331 case OP_LDRB_POS_A:new_op = OP_LDURB_A; break;
8332 case OP_STRB_POS_A: new_op = OP_STURB_A; break;
8333 case OP_LDR_POS_AC: new_op = OP_LDUR_AC; break;
8334 case OP_LDR_POS_AX: new_op = OP_LDUR_AX; break;
8335 case OP_STR_POS_AC: new_op = OP_STUR_AC; break;
8336 case OP_STR_POS_AX: new_op = OP_STUR_AX; break;
8337
8338 case OP_LDRFP_POS_A: new_op = OP_LDURFP_A; break;
8339 case OP_LDRFPQ_POS_A: new_op = OP_LDURFPQ_POS_A; break;
8340 case OP_STRFP_POS_A: new_op = OP_STURFP_POS_A; break;
8341 case OP_STRFPQ_POS_A: new_op = OP_STURFPQ_POS_A; break;
8342
8343 default: new_op = OP_NIL; break;
8344 }
8345
8346 if (new_op == OP_NIL)
8347 return FALSE;
8348
8349 new_opcode = aarch64_get_opcode (new_op);
8350 gas_assert (new_opcode != NULL);
8351
8352 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8353 instr->opcode->op, new_opcode->op);
8354
8355 aarch64_replace_opcode (instr, new_opcode);
8356
8357 /* Clear up the address operand's qualifier; otherwise the
8358 qualifier matching may fail because the out-of-date qualifier will
8359 prevent the operand being updated with a new and correct qualifier. */
8360 idx = aarch64_operand_index (instr->opcode->operands,
8361 target);
8362 gas_assert (idx == 1);
8363 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8364
8365 DEBUG_TRACE ("Found LDUR entry to encode programmer-friendly LDR");
8366
8367 if (!aarch64_opcode_encode (cpu_variant, instr->opcode, instr, &instr->value,
8368 NULL, NULL, insn_sequence))
8369 return FALSE;
8370
8371 return TRUE;
8372 }
8373
8374 /* Called by fix_insn to fix a MOV immediate alias instruction.
8375
8376 Operand for a generic move immediate instruction, which is an alias
8377 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8378 a 32-bit/64-bit immediate value into general register. An assembler error
8379 shall result if the immediate cannot be created by a single one of these
8380 instructions. If there is a choice, then to ensure reversability an
8381 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8382
8383 static void
8384 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8385 {
8386 const aarch64_opcode *opcode;
8387
8388 /* Need to check if the destination is SP/ZR. The check has to be done
8389 before any aarch64_replace_opcode. */
8390 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8391 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8392
8393 instr->operands[1].imm.value = value;
8394 instr->operands[1].skip = 0;
8395
8396 if (try_mov_wide_p)
8397 {
8398 /* Try the MOVZ alias. */
8399 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8400 aarch64_replace_opcode (instr, opcode);
8401 if (aarch64_opcode_encode (cpu_variant, instr->opcode, instr,
8402 &instr->value, NULL, NULL, insn_sequence))
8403 {
8404 put_aarch64_insn (buf, instr->value);
8405 return;
8406 }
8407 /* Try the MOVK alias. */
8408 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8409 aarch64_replace_opcode (instr, opcode);
8410 if (aarch64_opcode_encode (cpu_variant, instr->opcode, instr,
8411 &instr->value, NULL, NULL, insn_sequence))
8412 {
8413 put_aarch64_insn (buf, instr->value);
8414 return;
8415 }
8416 }
8417
8418 if (try_mov_bitmask_p)
8419 {
8420 /* Try the ORR alias. */
8421 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8422 aarch64_replace_opcode (instr, opcode);
8423 if (aarch64_opcode_encode (cpu_variant, instr->opcode, instr,
8424 &instr->value, NULL, NULL, insn_sequence))
8425 {
8426 put_aarch64_insn (buf, instr->value);
8427 return;
8428 }
8429 }
8430
8431 as_bad_where (fixP->fx_file, fixP->fx_line,
8432 _("immediate cannot be moved by a single instruction"));
8433 }
8434
8435 /* An instruction operand which is immediate related may have symbol used
8436 in the assembly, e.g.
8437
8438 mov w0, u32
8439 .set u32, 0x00ffff00
8440
8441 At the time when the assembly instruction is parsed, a referenced symbol,
8442 like 'u32' in the above example may not have been seen; a fixS is created
8443 in such a case and is handled here after symbols have been resolved.
8444 Instruction is fixed up with VALUE using the information in *FIXP plus
8445 extra information in FLAGS.
8446
8447 This function is called by md_apply_fix to fix up instructions that need
8448 a fix-up described above but does not involve any linker-time relocation. */
8449
8450 static void
8451 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8452 {
8453 int idx;
8454 uint32_t insn;
8455 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8456 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8457 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8458
8459 if (new_inst)
8460 {
8461 /* Now the instruction is about to be fixed-up, so the operand that
8462 was previously marked as 'ignored' needs to be unmarked in order
8463 to get the encoding done properly. */
8464 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8465 new_inst->operands[idx].skip = 0;
8466 }
8467
8468 gas_assert (opnd != AARCH64_OPND_NIL);
8469
8470 switch (opnd)
8471 {
8472 case AARCH64_OPND_EXCEPTION:
8473 case AARCH64_OPND_UNDEFINED:
8474 if (unsigned_overflow (value, 16))
8475 as_bad_where (fixP->fx_file, fixP->fx_line,
8476 _("immediate out of range"));
8477 insn = get_aarch64_insn (buf);
8478 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8479 put_aarch64_insn (buf, insn);
8480 break;
8481
8482 case AARCH64_OPND_AIMM:
8483 case AARCH64_OPND_A64C_AIMM:
8484 /* ADD or SUB with immediate.
8485 NOTE this assumes we come here with a add/sub shifted reg encoding
8486 3 322|2222|2 2 2 21111 111111
8487 1 098|7654|3 2 1 09876 543210 98765 43210
8488 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8489 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8490 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8491 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8492 ->
8493 3 322|2222|2 2 221111111111
8494 1 098|7654|3 2 109876543210 98765 43210
8495 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8496 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8497 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8498 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8499 Fields sf Rn Rd are already set. */
8500 insn = get_aarch64_insn (buf);
8501 if (value < 0)
8502 {
8503 /* Add <-> sub. */
8504 insn = reencode_addsub_switch_add_sub (insn);
8505 value = -value;
8506 }
8507
8508 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8509 && unsigned_overflow (value, 12))
8510 {
8511 /* Try to shift the value by 12 to make it fit. */
8512 if (((value >> 12) << 12) == value
8513 && ! unsigned_overflow (value, 12 + 12))
8514 {
8515 value >>= 12;
8516 insn |= encode_addsub_imm_shift_amount (1);
8517 }
8518 }
8519
8520 if (unsigned_overflow (value, 12))
8521 as_bad_where (fixP->fx_file, fixP->fx_line,
8522 _("immediate out of range"));
8523
8524 insn |= encode_addsub_imm (value);
8525
8526 put_aarch64_insn (buf, insn);
8527 break;
8528
8529 case AARCH64_OPND_SIMD_IMM:
8530 case AARCH64_OPND_SIMD_IMM_SFT:
8531 case AARCH64_OPND_LIMM:
8532 /* Bit mask immediate. */
8533 gas_assert (new_inst != NULL);
8534 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8535 new_inst->operands[idx].imm.value = value;
8536 if (aarch64_opcode_encode (cpu_variant, new_inst->opcode, new_inst,
8537 &new_inst->value, NULL, NULL, insn_sequence))
8538 put_aarch64_insn (buf, new_inst->value);
8539 else
8540 as_bad_where (fixP->fx_file, fixP->fx_line,
8541 _("invalid immediate"));
8542 break;
8543
8544 case AARCH64_OPND_HALF:
8545 /* 16-bit unsigned immediate. */
8546 if (unsigned_overflow (value, 16))
8547 as_bad_where (fixP->fx_file, fixP->fx_line,
8548 _("immediate out of range"));
8549 insn = get_aarch64_insn (buf);
8550 insn |= encode_movw_imm (value & 0xffff);
8551 put_aarch64_insn (buf, insn);
8552 break;
8553
8554 case AARCH64_OPND_IMM_MOV:
8555 /* Operand for a generic move immediate instruction, which is
8556 an alias instruction that generates a single MOVZ, MOVN or ORR
8557 instruction to loads a 32-bit/64-bit immediate value into general
8558 register. An assembler error shall result if the immediate cannot be
8559 created by a single one of these instructions. If there is a choice,
8560 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8561 and MOVZ or MOVN to ORR. */
8562 gas_assert (new_inst != NULL);
8563 fix_mov_imm_insn (fixP, buf, new_inst, value);
8564 break;
8565
8566 case AARCH64_OPND_CAPADDR_UIMM9:
8567 case AARCH64_OPND_A64C_ADDR_SIMM9:
8568 case AARCH64_OPND_A64C_ADDR_SIMM7:
8569 case AARCH64_OPND_ADDR_SIMM7:
8570 case AARCH64_OPND_ADDR_SIMM9:
8571 case AARCH64_OPND_ADDR_SIMM9_2:
8572 case AARCH64_OPND_ADDR_SIMM10:
8573 case AARCH64_OPND_ADDR_UIMM12:
8574 case AARCH64_OPND_ADDR_SIMM11:
8575 case AARCH64_OPND_ADDR_SIMM13:
8576 /* Immediate offset in an address. */
8577 insn = get_aarch64_insn (buf);
8578
8579 gas_assert (new_inst != NULL && new_inst->value == insn);
8580 gas_assert (new_inst->opcode->operands[1] == opnd
8581 || new_inst->opcode->operands[2] == opnd);
8582
8583 /* Get the index of the address operand. */
8584 if (new_inst->opcode->operands[1] == opnd)
8585 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8586 idx = 1;
8587 else
8588 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8589 idx = 2;
8590
8591 /* Update the resolved offset value. */
8592 new_inst->operands[idx].addr.offset.imm = value;
8593
8594 /* Encode/fix-up. */
8595 if (aarch64_opcode_encode (cpu_variant, new_inst->opcode, new_inst,
8596 &new_inst->value, NULL, NULL, insn_sequence))
8597 {
8598 put_aarch64_insn (buf, new_inst->value);
8599 break;
8600 }
8601 else if ((new_inst->opcode->iclass == ldst_pos
8602 || new_inst->opcode->iclass == ldst_altbase)
8603 && try_to_encode_as_unscaled_ldst (new_inst))
8604 {
8605 put_aarch64_insn (buf, new_inst->value);
8606 break;
8607 }
8608
8609 as_bad_where (fixP->fx_file, fixP->fx_line,
8610 _("immediate offset out of range"));
8611 break;
8612
8613 default:
8614 gas_assert (0);
8615 as_fatal (_("unhandled operand code %d"), opnd);
8616 }
8617 }
8618
8619 /* Apply a fixup (fixP) to segment data, once it has been determined
8620 by our caller that we have all the info we need to fix it up.
8621
8622 Parameter valP is the pointer to the value of the bits. */
8623
8624 void
8625 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8626 {
8627 offsetT value = *valP;
8628 uint32_t insn;
8629 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8630 int scale;
8631 unsigned flags = fixP->fx_addnumber;
8632 /* We check alignment for relocations of this kind. These relocations could
8633 be applied on a C64 STT_FUNC symbol and hence may have the LSB set on
8634 `*valP`, their AARCH64 counterparts can not be applied on such symbols and
8635 hence should never have the LSB set on their value. */
8636 valueT alignment_mask = (fixP->fx_r_type == BFD_RELOC_MORELLO_BRANCH19
8637 || fixP->fx_r_type == BFD_RELOC_MORELLO_TSTBR14
8638 || fixP->fx_r_type == BFD_RELOC_MORELLO_CALL26
8639 || fixP->fx_r_type == BFD_RELOC_MORELLO_JUMP26)
8640 ? 2 : 3;
8641
8642 DEBUG_TRACE ("\n\n");
8643 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8644 DEBUG_TRACE ("Enter md_apply_fix");
8645
8646 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8647
8648 /* Note whether this will delete the relocation. */
8649
8650 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8651 fixP->fx_done = 1;
8652
8653 /* Process the relocations. */
8654 switch (fixP->fx_r_type)
8655 {
8656 case BFD_RELOC_NONE:
8657 /* This will need to go in the object file. */
8658 fixP->fx_done = 0;
8659 break;
8660
8661 case BFD_RELOC_8:
8662 case BFD_RELOC_8_PCREL:
8663 if (fixP->fx_done || !seg->use_rela_p)
8664 md_number_to_chars (buf, value, 1);
8665 break;
8666
8667 case BFD_RELOC_16:
8668 case BFD_RELOC_16_PCREL:
8669 if (fixP->fx_done || !seg->use_rela_p)
8670 md_number_to_chars (buf, value, 2);
8671 break;
8672
8673 case BFD_RELOC_32:
8674 case BFD_RELOC_32_PCREL:
8675 if (fixP->fx_done || !seg->use_rela_p)
8676 md_number_to_chars (buf, value, 4);
8677 break;
8678
8679 case BFD_RELOC_64:
8680 case BFD_RELOC_64_PCREL:
8681 if (fixP->fx_done || !seg->use_rela_p)
8682 md_number_to_chars (buf, value, 8);
8683 break;
8684
8685 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8686 /* We claim that these fixups have been processed here, even if
8687 in fact we generate an error because we do not have a reloc
8688 for them, so tc_gen_reloc() will reject them. */
8689 fixP->fx_done = 1;
8690 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8691 {
8692 as_bad_where (fixP->fx_file, fixP->fx_line,
8693 _("undefined symbol %s used as an immediate value"),
8694 S_GET_NAME (fixP->fx_addsy));
8695 goto apply_fix_return;
8696 }
8697 fix_insn (fixP, flags, value);
8698 break;
8699
8700 case BFD_RELOC_MORELLO_LD_LO17_PCREL:
8701 if (fixP->fx_done || !seg->use_rela_p)
8702 {
8703 /* The LDR-immediate that uses LO17 aligns the address down to
8704 16-byte boundary to get the final address of the capability.
8705 Since the fixed up immediate also needs to be 16-byte aligned,
8706 align it up to the 16-byte boundary so that the downward alignment
8707 of the load literal instruction gets us the correct address. */
8708 value = (value + 0xf) & ~(offsetT) 0xf;
8709
8710 if (signed_overflow (value, 21))
8711 as_bad_where (fixP->fx_file, fixP->fx_line,
8712 _("pcc-relative load offset out of range"));
8713 insn = get_aarch64_insn (buf);
8714 insn |= encode_ld_lit_ofs_17 (value >> 4);
8715 put_aarch64_insn (buf, insn);
8716 }
8717 break;
8718
8719 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8720 if (fixP->fx_done || !seg->use_rela_p)
8721 {
8722 if (value & 3)
8723 as_bad_where (fixP->fx_file, fixP->fx_line,
8724 _("pc-relative load offset not word aligned"));
8725 if (signed_overflow (value, 21))
8726 as_bad_where (fixP->fx_file, fixP->fx_line,
8727 _("pc-relative load offset out of range"));
8728 insn = get_aarch64_insn (buf);
8729 insn |= encode_ld_lit_ofs_19 (value >> 2);
8730 put_aarch64_insn (buf, insn);
8731 }
8732 break;
8733
8734 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8735 if (fixP->fx_done || !seg->use_rela_p)
8736 {
8737 if (signed_overflow (value, 21))
8738 as_bad_where (fixP->fx_file, fixP->fx_line,
8739 _("pc-relative address offset out of range"));
8740 insn = get_aarch64_insn (buf);
8741 insn |= encode_adr_imm (value);
8742 put_aarch64_insn (buf, insn);
8743 }
8744 break;
8745
8746 case BFD_RELOC_AARCH64_BRANCH19:
8747 case BFD_RELOC_MORELLO_BRANCH19:
8748 if (fixP->fx_done || !seg->use_rela_p)
8749 {
8750 if (value & alignment_mask)
8751 as_bad_where (fixP->fx_file, fixP->fx_line,
8752 _("conditional branch target not word aligned"));
8753 if (signed_overflow (value, 21))
8754 as_bad_where (fixP->fx_file, fixP->fx_line,
8755 _("conditional branch out of range"));
8756 insn = get_aarch64_insn (buf);
8757 insn |= encode_cond_branch_ofs_19 (value >> 2);
8758 put_aarch64_insn (buf, insn);
8759 }
8760 break;
8761
8762 case BFD_RELOC_MORELLO_TSTBR14:
8763 case BFD_RELOC_AARCH64_TSTBR14:
8764 if (fixP->fx_done || !seg->use_rela_p)
8765 {
8766 if (value & alignment_mask)
8767 as_bad_where (fixP->fx_file, fixP->fx_line,
8768 _("conditional branch target not word aligned"));
8769 if (signed_overflow (value, 16))
8770 as_bad_where (fixP->fx_file, fixP->fx_line,
8771 _("conditional branch out of range"));
8772 insn = get_aarch64_insn (buf);
8773 insn |= encode_tst_branch_ofs_14 (value >> 2);
8774 put_aarch64_insn (buf, insn);
8775 }
8776 break;
8777
8778 case BFD_RELOC_MORELLO_CALL26:
8779 case BFD_RELOC_MORELLO_JUMP26:
8780 case BFD_RELOC_AARCH64_CALL26:
8781 case BFD_RELOC_AARCH64_JUMP26:
8782 if (fixP->fx_done || !seg->use_rela_p)
8783 {
8784 if (value & alignment_mask)
8785 as_bad_where (fixP->fx_file, fixP->fx_line,
8786 _("branch target not word aligned"));
8787 if (signed_overflow (value, 28))
8788 as_bad_where (fixP->fx_file, fixP->fx_line,
8789 _("branch out of range"));
8790 insn = get_aarch64_insn (buf);
8791 insn |= encode_branch_ofs_26 (value >> 2);
8792 put_aarch64_insn (buf, insn);
8793 }
8794 break;
8795
8796 case BFD_RELOC_MORELLO_MOVW_SIZE_G0:
8797 case BFD_RELOC_MORELLO_MOVW_SIZE_G0_NC:
8798 case BFD_RELOC_AARCH64_MOVW_G0:
8799 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8800 case BFD_RELOC_AARCH64_MOVW_G0_S:
8801 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8802 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8803 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8804 scale = 0;
8805 goto movw_common;
8806 case BFD_RELOC_MORELLO_MOVW_SIZE_G1:
8807 case BFD_RELOC_MORELLO_MOVW_SIZE_G1_NC:
8808 case BFD_RELOC_AARCH64_MOVW_G1:
8809 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8810 case BFD_RELOC_AARCH64_MOVW_G1_S:
8811 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8812 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8813 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8814 scale = 16;
8815 goto movw_common;
8816 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8817 scale = 0;
8818 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8819 /* Should always be exported to object file, see
8820 aarch64_force_relocation(). */
8821 gas_assert (!fixP->fx_done);
8822 gas_assert (seg->use_rela_p);
8823 goto movw_common;
8824 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8825 scale = 16;
8826 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8827 /* Should always be exported to object file, see
8828 aarch64_force_relocation(). */
8829 gas_assert (!fixP->fx_done);
8830 gas_assert (seg->use_rela_p);
8831 goto movw_common;
8832 case BFD_RELOC_MORELLO_MOVW_SIZE_G2:
8833 case BFD_RELOC_MORELLO_MOVW_SIZE_G2_NC:
8834 case BFD_RELOC_AARCH64_MOVW_G2:
8835 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8836 case BFD_RELOC_AARCH64_MOVW_G2_S:
8837 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8838 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8839 scale = 32;
8840 goto movw_common;
8841 case BFD_RELOC_MORELLO_MOVW_SIZE_G3:
8842 case BFD_RELOC_AARCH64_MOVW_G3:
8843 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8844 scale = 48;
8845 movw_common:
8846 if (fixP->fx_done || !seg->use_rela_p)
8847 {
8848 insn = get_aarch64_insn (buf);
8849
8850 if (!fixP->fx_done)
8851 {
8852 /* REL signed addend must fit in 16 bits */
8853 if (signed_overflow (value, 16))
8854 as_bad_where (fixP->fx_file, fixP->fx_line,
8855 _("offset out of range"));
8856 }
8857 else
8858 {
8859 /* Check for overflow and scale. */
8860 switch (fixP->fx_r_type)
8861 {
8862 case BFD_RELOC_MORELLO_MOVW_SIZE_G0:
8863 case BFD_RELOC_MORELLO_MOVW_SIZE_G1:
8864 case BFD_RELOC_MORELLO_MOVW_SIZE_G2:
8865 case BFD_RELOC_MORELLO_MOVW_SIZE_G3:
8866 case BFD_RELOC_AARCH64_MOVW_G0:
8867 case BFD_RELOC_AARCH64_MOVW_G1:
8868 case BFD_RELOC_AARCH64_MOVW_G2:
8869 case BFD_RELOC_AARCH64_MOVW_G3:
8870 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8871 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8872 if (unsigned_overflow (value, scale + 16))
8873 as_bad_where (fixP->fx_file, fixP->fx_line,
8874 _("unsigned value out of range"));
8875 break;
8876 case BFD_RELOC_AARCH64_MOVW_G0_S:
8877 case BFD_RELOC_AARCH64_MOVW_G1_S:
8878 case BFD_RELOC_AARCH64_MOVW_G2_S:
8879 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8880 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8881 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8882 /* NOTE: We can only come here with movz or movn. */
8883 if (signed_overflow (value, scale + 16))
8884 as_bad_where (fixP->fx_file, fixP->fx_line,
8885 _("signed value out of range"));
8886 if (value < 0)
8887 {
8888 /* Force use of MOVN. */
8889 value = ~value;
8890 insn = reencode_movzn_to_movn (insn);
8891 }
8892 else
8893 {
8894 /* Force use of MOVZ. */
8895 insn = reencode_movzn_to_movz (insn);
8896 }
8897 break;
8898 default:
8899 /* Unchecked relocations. */
8900 break;
8901 }
8902 value >>= scale;
8903 }
8904
8905 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8906 insn |= encode_movw_imm (value & 0xffff);
8907
8908 put_aarch64_insn (buf, insn);
8909 }
8910 break;
8911
8912 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8913 fixP->fx_r_type = (ilp32_p
8914 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8915 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8916 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8917 /* Should always be exported to object file, see
8918 aarch64_force_relocation(). */
8919 gas_assert (!fixP->fx_done);
8920 gas_assert (seg->use_rela_p);
8921 break;
8922
8923 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8924 if (fixP->tc_fix_data.c64)
8925 fixP->fx_r_type = BFD_RELOC_MORELLO_TLSDESC_LD128_LO12;
8926 else if (ilp32_p)
8927 fixP->fx_r_type = BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC;
8928 else
8929 fixP->fx_r_type = BFD_RELOC_AARCH64_TLSDESC_LD64_LO12;
8930 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8931 /* Should always be exported to object file, see
8932 aarch64_force_relocation(). */
8933 gas_assert (!fixP->fx_done);
8934 gas_assert (seg->use_rela_p);
8935 break;
8936
8937 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8938 case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
8939 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8940 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8941 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8942 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8943 case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
8944 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8945 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8946 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8947 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8948 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8949 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8950 case BFD_RELOC_MORELLO_TLSIE_ADR_GOTTPREL_PAGE20:
8951 case BFD_RELOC_MORELLO_TLSIE_ADD_LO12:
8952 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8953 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8954 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8955 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8956 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8957 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8958 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8959 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8960 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8961 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8962 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8963 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8964 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8965 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8966 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8967 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8968 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8969 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8970 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8971 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8972 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8973 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8974 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8975 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8976 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8977 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8978 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8979 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8980 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8981 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8982 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8983 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8984 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8985 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8986 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8987 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8988 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8989 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8990 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8991 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8992 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8993 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8994 /* Should always be exported to object file, see
8995 aarch64_force_relocation(). */
8996 gas_assert (!fixP->fx_done);
8997 gas_assert (seg->use_rela_p);
8998 break;
8999
9000 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9001 /* Should always be exported to object file, see
9002 aarch64_force_relocation(). */
9003 if (fixP->tc_fix_data.c64)
9004 fixP->fx_r_type = BFD_RELOC_MORELLO_LD128_GOT_LO12_NC;
9005 else if (ilp32_p)
9006 fixP->fx_r_type = BFD_RELOC_AARCH64_LD32_GOT_LO12_NC;
9007 else
9008 fixP->fx_r_type = BFD_RELOC_AARCH64_LD64_GOT_LO12_NC;
9009 gas_assert (!fixP->fx_done);
9010 gas_assert (seg->use_rela_p);
9011 break;
9012
9013 case BFD_RELOC_AARCH64_ADD_LO12:
9014 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9015 case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
9016 case BFD_RELOC_MORELLO_ADR_HI20_NC_PCREL:
9017 case BFD_RELOC_MORELLO_ADR_HI20_PCREL:
9018 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9019 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9020 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9021 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9022 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9023 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9024 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9025 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9026 case BFD_RELOC_AARCH64_LDST128_LO12:
9027 case BFD_RELOC_AARCH64_LDST16_LO12:
9028 case BFD_RELOC_AARCH64_LDST32_LO12:
9029 case BFD_RELOC_AARCH64_LDST64_LO12:
9030 case BFD_RELOC_AARCH64_LDST8_LO12:
9031 case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
9032 /* Should always be exported to object file, see
9033 aarch64_force_relocation(). */
9034 gas_assert (!fixP->fx_done);
9035 gas_assert (seg->use_rela_p);
9036 break;
9037
9038 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9039 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9040 case BFD_RELOC_MORELLO_TLSDESC_CALL:
9041 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9042 case BFD_RELOC_MORELLO_CAPINIT:
9043 break;
9044
9045 case BFD_RELOC_UNUSED:
9046 /* An error will already have been reported. */
9047 break;
9048
9049 default:
9050 as_bad_where (fixP->fx_file, fixP->fx_line,
9051 _("unexpected %s fixup"),
9052 bfd_get_reloc_code_name (fixP->fx_r_type));
9053 break;
9054 }
9055
9056 apply_fix_return:
9057 /* Free the allocated the struct aarch64_inst.
9058 N.B. currently there are very limited number of fix-up types actually use
9059 this field, so the impact on the performance should be minimal . */
9060 free (fixP->tc_fix_data.inst);
9061
9062 return;
9063 }
9064
9065 /* Translate internal representation of relocation info to BFD target
9066 format. */
9067
9068 arelent *
9069 tc_gen_reloc (asection * section, fixS * fixp)
9070 {
9071 arelent *reloc;
9072 bfd_reloc_code_real_type code;
9073
9074 reloc = XNEW (arelent);
9075
9076 reloc->sym_ptr_ptr = XNEW (asymbol *);
9077 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9078 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9079
9080 if (fixp->fx_pcrel)
9081 {
9082 if (section->use_rela_p)
9083 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9084 else
9085 fixp->fx_offset = reloc->address;
9086 }
9087 reloc->addend = fixp->fx_offset;
9088
9089 code = fixp->fx_r_type;
9090 switch (code)
9091 {
9092 case BFD_RELOC_16:
9093 if (fixp->fx_pcrel)
9094 code = BFD_RELOC_16_PCREL;
9095 break;
9096
9097 case BFD_RELOC_32:
9098 if (fixp->fx_pcrel)
9099 code = BFD_RELOC_32_PCREL;
9100 break;
9101
9102 case BFD_RELOC_64:
9103 if (fixp->fx_pcrel)
9104 code = BFD_RELOC_64_PCREL;
9105 break;
9106
9107 default:
9108 break;
9109 }
9110
9111 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9112 if (reloc->howto == NULL)
9113 {
9114 as_bad_where (fixp->fx_file, fixp->fx_line,
9115 _
9116 ("cannot represent %s relocation in this object file format"),
9117 bfd_get_reloc_code_name (code));
9118 return NULL;
9119 }
9120
9121 return reloc;
9122 }
9123
9124 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9125
9126 void
9127 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9128 {
9129 bfd_reloc_code_real_type type;
9130 int pcrel = 0;
9131
9132 /* Pick a reloc.
9133 FIXME: @@ Should look at CPU word size. */
9134 switch (size)
9135 {
9136 case 1:
9137 type = BFD_RELOC_8;
9138 break;
9139 case 2:
9140 type = BFD_RELOC_16;
9141 break;
9142 case 4:
9143 type = BFD_RELOC_32;
9144 break;
9145 case 8:
9146 type = BFD_RELOC_64;
9147 break;
9148 default:
9149 as_bad (_("cannot do %u-byte relocation"), size);
9150 type = BFD_RELOC_UNUSED;
9151 break;
9152 }
9153
9154 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9155 }
9156
9157 int
9158 aarch64_force_relocation (struct fix *fixp)
9159 {
9160 switch (fixp->fx_r_type)
9161 {
9162 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9163 /* Perform these "immediate" internal relocations
9164 even if the symbol is extern or weak. */
9165 return 0;
9166
9167 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9168 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9169 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9170 /* Pseudo relocs that need to be fixed up according to
9171 ilp32_p. */
9172 return 0;
9173
9174 case BFD_RELOC_AARCH64_ADD_LO12:
9175 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9176 case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
9177 case BFD_RELOC_MORELLO_ADR_HI20_NC_PCREL:
9178 case BFD_RELOC_MORELLO_ADR_HI20_PCREL:
9179 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9180 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9181 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9182 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9183 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9184 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9185 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9186 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9187 case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
9188 case BFD_RELOC_AARCH64_LDST128_LO12:
9189 case BFD_RELOC_AARCH64_LDST16_LO12:
9190 case BFD_RELOC_AARCH64_LDST32_LO12:
9191 case BFD_RELOC_AARCH64_LDST64_LO12:
9192 case BFD_RELOC_AARCH64_LDST8_LO12:
9193 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9194 case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
9195 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9196 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9197 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9198 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9199 case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
9200 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9201 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9202 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9203 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9204 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9205 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9206 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9207 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9208 case BFD_RELOC_MORELLO_TLSIE_ADR_GOTTPREL_PAGE20:
9209 case BFD_RELOC_MORELLO_TLSIE_ADD_LO12:
9210 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9211 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9212 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9213 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9214 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9215 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9216 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9217 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9218 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9219 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9220 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9221 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9222 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9223 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9224 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9225 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9226 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9227 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9228 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9229 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9230 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9231 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9232 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9233 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9234 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9235 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9236 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9237 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9238 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9239 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9240 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9241 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9242 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9243 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9244 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9245 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9246 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9247 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9248 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9249 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9250 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9251 /* Always leave these relocations for the linker. */
9252 return 1;
9253
9254 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9255 case BFD_RELOC_MORELLO_BRANCH19:
9256 case BFD_RELOC_MORELLO_TSTBR14:
9257 case BFD_RELOC_AARCH64_BRANCH19:
9258 case BFD_RELOC_AARCH64_TSTBR14:
9259 case BFD_RELOC_MORELLO_CALL26:
9260 case BFD_RELOC_MORELLO_JUMP26:
9261 case BFD_RELOC_AARCH64_CALL26:
9262 case BFD_RELOC_AARCH64_JUMP26:
9263 gas_assert (fixp->fx_addsy != NULL);
9264
9265 /* A jump/call destination will get adjusted to section+offset only
9266 if both caller and callee are of the same type. */
9267 if (symbol_section_p (fixp->fx_addsy))
9268 break;
9269
9270 if ((fixp->tc_fix_data.c64
9271 && !AARCH64_IS_C64 (fixp->fx_addsy))
9272 || (!fixp->tc_fix_data.c64
9273 && AARCH64_IS_C64 (fixp->fx_addsy)))
9274 return 1;
9275
9276 break;
9277
9278 default:
9279 break;
9280 }
9281
9282 return generic_force_reloc (fixp);
9283 }
9284
9285 #ifdef OBJ_ELF
9286
9287 /* Implement md_after_parse_args. This is the earliest time we need to decide
9288 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9289
9290 void
9291 aarch64_after_parse_args (void)
9292 {
9293 if (aarch64_abi != AARCH64_ABI_NONE)
9294 return;
9295
9296 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9297 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9298 aarch64_abi = AARCH64_ABI_ILP32;
9299 else
9300 aarch64_abi = AARCH64_ABI_LP64;
9301 }
9302
9303 const char *
9304 elf64_aarch64_target_format (void)
9305 {
9306 #ifdef TE_CLOUDABI
9307 /* FIXME: What to do for ilp32_p ? */
9308 if (target_big_endian)
9309 return "elf64-bigaarch64-cloudabi";
9310 else
9311 return "elf64-littleaarch64-cloudabi";
9312 #else
9313 if (target_big_endian)
9314 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9315 else
9316 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9317 #endif
9318 }
9319
9320 void
9321 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9322 {
9323 elf_frob_symbol (symp, puntp);
9324 }
9325 #endif
9326
9327 /* MD interface: Finalization. */
9328
9329 /* A good place to do this, although this was probably not intended
9330 for this kind of use. We need to dump the literal pool before
9331 references are made to a null symbol pointer. */
9332
9333 void
9334 aarch64_cleanup (void)
9335 {
9336 literal_pool *pool;
9337
9338 for (pool = list_of_pools; pool; pool = pool->next)
9339 {
9340 /* Put it at the end of the relevant section. */
9341 subseg_set (pool->section, pool->sub_section);
9342 s_ltorg (0);
9343 }
9344 }
9345
9346 #ifdef OBJ_ELF
9347 /* Remove any excess mapping symbols generated for alignment frags in
9348 SEC. We may have created a mapping symbol before a zero byte
9349 alignment; remove it if there's a mapping symbol after the
9350 alignment. */
9351 static void
9352 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9353 void *dummy ATTRIBUTE_UNUSED)
9354 {
9355 segment_info_type *seginfo = seg_info (sec);
9356 fragS *fragp;
9357
9358 if (seginfo == NULL || seginfo->frchainP == NULL)
9359 return;
9360
9361 for (fragp = seginfo->frchainP->frch_root;
9362 fragp != NULL; fragp = fragp->fr_next)
9363 {
9364 symbolS *sym = fragp->tc_frag_data.last_map;
9365 fragS *next = fragp->fr_next;
9366
9367 /* Variable-sized frags have been converted to fixed size by
9368 this point. But if this was variable-sized to start with,
9369 there will be a fixed-size frag after it. So don't handle
9370 next == NULL. */
9371 if (sym == NULL || next == NULL)
9372 continue;
9373
9374 if (S_GET_VALUE (sym) < next->fr_address)
9375 /* Not at the end of this frag. */
9376 continue;
9377 know (S_GET_VALUE (sym) == next->fr_address);
9378
9379 do
9380 {
9381 if (next->tc_frag_data.first_map != NULL)
9382 {
9383 /* Next frag starts with a mapping symbol. Discard this
9384 one. */
9385 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9386 break;
9387 }
9388
9389 if (next->fr_next == NULL)
9390 {
9391 /* This mapping symbol is at the end of the section. Discard
9392 it. */
9393 know (next->fr_fix == 0 && next->fr_var == 0);
9394 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9395 break;
9396 }
9397
9398 /* As long as we have empty frags without any mapping symbols,
9399 keep looking. */
9400 /* If the next frag is non-empty and does not start with a
9401 mapping symbol, then this mapping symbol is required. */
9402 if (next->fr_address != next->fr_next->fr_address)
9403 break;
9404
9405 next = next->fr_next;
9406 }
9407 while (next != NULL);
9408 }
9409 }
9410 #endif
9411
9412 /* Avoid relocations from using section symbols in some cases. */
9413 bfd_boolean
9414 aarch64_fix_adjustable (struct fix *fixP)
9415 {
9416 switch (fixP->fx_r_type)
9417 {
9418 /* The AArch64 GNU bfd linker can not handle 'symbol + offset' entries in the
9419 GOT (it internally uses a symbol to reference a GOT slot). Hence we can't
9420 emit any "section symbol + offset" relocations for the GOT. */
9421 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9422 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9423 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9424 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9425 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9426 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9427 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9428 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9429 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9430 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9431 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9432 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9433 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9434 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9435 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9436 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9437 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9438 return FALSE;
9439
9440 /* We need size information of the target symbols to initialise
9441 capabilities. */
9442 case BFD_RELOC_MORELLO_CAPINIT:
9443 case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
9444 case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
9445 return FALSE;
9446
9447 /* We need to retain symbol information when jumping between A64 and C64
9448 states or between two C64 functions. In the C64 -> C64 situation it's
9449 really only a corner case that breaks when symbols get replaced with
9450 section symbols; this is when the jump distance is longer than what a
9451 branch instruction can handle and we want to branch through a stub.
9452 In such a case, the linker needs to know the symbol types of the
9453 source and the destination and section symbols are an unreliable
9454 source of this information. */
9455 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9456 case BFD_RELOC_AARCH64_ADD_LO12:
9457 case BFD_RELOC_AARCH64_BRANCH19:
9458 case BFD_RELOC_AARCH64_TSTBR14:
9459 case BFD_RELOC_AARCH64_JUMP26:
9460 case BFD_RELOC_AARCH64_CALL26:
9461 case BFD_RELOC_MORELLO_BRANCH19:
9462 case BFD_RELOC_MORELLO_TSTBR14:
9463 case BFD_RELOC_MORELLO_JUMP26:
9464 case BFD_RELOC_MORELLO_CALL26:
9465 if (fixP->tc_fix_data.c64 || AARCH64_IS_C64 (fixP->fx_addsy))
9466 return FALSE;
9467 break;
9468 default:
9469 break;
9470 }
9471
9472 return TRUE;
9473 }
9474
9475 /* Adjust the symbol table. */
9476
9477 void
9478 aarch64_adjust_symtab (void)
9479 {
9480 #ifdef OBJ_ELF
9481 symbolS * sym;
9482
9483 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
9484 {
9485 if (AARCH64_IS_C64 (sym)
9486 && (symbol_get_bfdsym (sym)->flags & BSF_FUNCTION))
9487 {
9488 elf_symbol_type * elf_sym;
9489
9490 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
9491
9492 if (!bfd_is_aarch64_special_symbol_name
9493 (elf_sym->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY))
9494 elf_sym->internal_elf_sym.st_target_internal = ST_BRANCH_TO_C64;
9495 }
9496 }
9497
9498 /* Remove any overlapping mapping symbols generated by alignment frags. */
9499 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9500 /* Now do generic ELF adjustments. */
9501 elf_adjust_symtab ();
9502 #endif
9503 }
9504
9505 static void
9506 checked_hash_insert (htab_t table, const char *key, void *value)
9507 {
9508 str_hash_insert (table, key, value, 0);
9509 }
9510
9511 static void
9512 sysreg_hash_insert (htab_t table, const char *key, void *value)
9513 {
9514 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9515 checked_hash_insert (table, key, value);
9516 }
9517
9518 static void
9519 fill_instruction_hash_table (void)
9520 {
9521 aarch64_opcode *opcode = aarch64_opcode_table;
9522
9523 while (opcode->name != NULL)
9524 {
9525 templates *templ, *new_templ;
9526 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9527
9528 new_templ = XNEW (templates);
9529 new_templ->opcode = opcode;
9530 new_templ->next = NULL;
9531
9532 if (!templ)
9533 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9534 else
9535 {
9536 new_templ->next = templ->next;
9537 templ->next = new_templ;
9538 }
9539 ++opcode;
9540 }
9541 }
9542
9543 static inline void
9544 convert_to_upper (char *dst, const char *src, size_t num)
9545 {
9546 unsigned int i;
9547 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9548 *dst = TOUPPER (*src);
9549 *dst = '\0';
9550 }
9551
9552 /* Assume STR point to a lower-case string, allocate, convert and return
9553 the corresponding upper-case string. */
9554 static inline const char*
9555 get_upper_str (const char *str)
9556 {
9557 char *ret;
9558 size_t len = strlen (str);
9559 ret = XNEWVEC (char, len + 1);
9560 convert_to_upper (ret, str, len);
9561 return ret;
9562 }
9563
9564 /* MD interface: Initialization. */
9565
9566 void
9567 md_begin (void)
9568 {
9569 unsigned mach;
9570 unsigned int i;
9571
9572 aarch64_ops_hsh = str_htab_create ();
9573 aarch64_cond_hsh = str_htab_create ();
9574 aarch64_shift_hsh = str_htab_create ();
9575 aarch64_sys_regs_hsh = str_htab_create ();
9576 aarch64_pstatefield_hsh = str_htab_create ();
9577 aarch64_sys_regs_ic_hsh = str_htab_create ();
9578 aarch64_sys_regs_dc_hsh = str_htab_create ();
9579 aarch64_sys_regs_at_hsh = str_htab_create ();
9580 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9581 aarch64_sys_regs_sr_hsh = str_htab_create ();
9582 aarch64_reg_hsh = str_htab_create ();
9583 aarch64_barrier_opt_hsh = str_htab_create ();
9584 aarch64_nzcv_hsh = str_htab_create ();
9585 aarch64_pldop_hsh = str_htab_create ();
9586 aarch64_hint_opt_hsh = str_htab_create ();
9587
9588 fill_instruction_hash_table ();
9589
9590 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9591 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9592 (void *) (aarch64_sys_regs + i));
9593
9594 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9595 sysreg_hash_insert (aarch64_pstatefield_hsh,
9596 aarch64_pstatefields[i].name,
9597 (void *) (aarch64_pstatefields + i));
9598
9599 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9600 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9601 aarch64_sys_regs_ic[i].name,
9602 (void *) (aarch64_sys_regs_ic + i));
9603
9604 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9605 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9606 aarch64_sys_regs_dc[i].name,
9607 (void *) (aarch64_sys_regs_dc + i));
9608
9609 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9610 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9611 aarch64_sys_regs_at[i].name,
9612 (void *) (aarch64_sys_regs_at + i));
9613
9614 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9615 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9616 aarch64_sys_regs_tlbi[i].name,
9617 (void *) (aarch64_sys_regs_tlbi + i));
9618
9619 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9620 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9621 aarch64_sys_regs_sr[i].name,
9622 (void *) (aarch64_sys_regs_sr + i));
9623
9624 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9625 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9626 (void *) (reg_names + i));
9627
9628 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9629 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9630 (void *) (nzcv_names + i));
9631
9632 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9633 {
9634 const char *name = aarch64_operand_modifiers[i].name;
9635 checked_hash_insert (aarch64_shift_hsh, name,
9636 (void *) (aarch64_operand_modifiers + i));
9637 /* Also hash the name in the upper case. */
9638 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9639 (void *) (aarch64_operand_modifiers + i));
9640 }
9641
9642 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9643 {
9644 unsigned int j;
9645 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9646 the same condition code. */
9647 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9648 {
9649 const char *name = aarch64_conds[i].names[j];
9650 if (name == NULL)
9651 break;
9652 checked_hash_insert (aarch64_cond_hsh, name,
9653 (void *) (aarch64_conds + i));
9654 /* Also hash the name in the upper case. */
9655 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9656 (void *) (aarch64_conds + i));
9657 }
9658 }
9659
9660 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9661 {
9662 const char *name = aarch64_barrier_options[i].name;
9663 /* Skip xx00 - the unallocated values of option. */
9664 if ((i & 0x3) == 0)
9665 continue;
9666 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9667 (void *) (aarch64_barrier_options + i));
9668 /* Also hash the name in the upper case. */
9669 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9670 (void *) (aarch64_barrier_options + i));
9671 }
9672
9673 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9674 {
9675 const char* name = aarch64_prfops[i].name;
9676 /* Skip the unallocated hint encodings. */
9677 if (name == NULL)
9678 continue;
9679 checked_hash_insert (aarch64_pldop_hsh, name,
9680 (void *) (aarch64_prfops + i));
9681 /* Also hash the name in the upper case. */
9682 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9683 (void *) (aarch64_prfops + i));
9684 }
9685
9686 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9687 {
9688 const char* name = aarch64_hint_options[i].name;
9689 const char* upper_name = get_upper_str(name);
9690
9691 checked_hash_insert (aarch64_hint_opt_hsh, name,
9692 (void *) (aarch64_hint_options + i));
9693
9694 /* Also hash the name in the upper case if not the same. */
9695 if (strcmp (name, upper_name) != 0)
9696 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9697 (void *) (aarch64_hint_options + i));
9698 }
9699
9700 /* Set the cpu variant based on the command-line options. */
9701 if (!mcpu_cpu_opt)
9702 mcpu_cpu_opt = march_cpu_opt;
9703
9704 if (!mcpu_cpu_opt)
9705 mcpu_cpu_opt = &cpu_default;
9706
9707 cpu_variant = *mcpu_cpu_opt;
9708
9709 /* Record the CPU type. */
9710 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9711
9712 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9713
9714 #ifdef OBJ_ELF
9715 if (IS_C64)
9716 bfd_set_private_flags (stdoutput, EF_AARCH64_CHERI_PURECAP);
9717 #endif
9718 }
9719
9720 /* Command line processing. */
9721
9722 const char *md_shortopts = "m:";
9723
9724 #ifdef AARCH64_BI_ENDIAN
9725 #define OPTION_EB (OPTION_MD_BASE + 0)
9726 #define OPTION_EL (OPTION_MD_BASE + 1)
9727 #else
9728 #if TARGET_BYTES_BIG_ENDIAN
9729 #define OPTION_EB (OPTION_MD_BASE + 0)
9730 #else
9731 #define OPTION_EL (OPTION_MD_BASE + 1)
9732 #endif
9733 #endif
9734
9735 struct option md_longopts[] = {
9736 #ifdef OPTION_EB
9737 {"EB", no_argument, NULL, OPTION_EB},
9738 #endif
9739 #ifdef OPTION_EL
9740 {"EL", no_argument, NULL, OPTION_EL},
9741 #endif
9742 {NULL, no_argument, NULL, 0}
9743 };
9744
9745 size_t md_longopts_size = sizeof (md_longopts);
9746
9747 struct aarch64_option_table
9748 {
9749 const char *option; /* Option name to match. */
9750 const char *help; /* Help information. */
9751 int *var; /* Variable to change. */
9752 int value; /* What to change it to. */
9753 char *deprecated; /* If non-null, print this message. */
9754 };
9755
9756 static struct aarch64_option_table aarch64_opts[] = {
9757 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9758 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9759 NULL},
9760 #ifdef DEBUG_AARCH64
9761 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9762 #endif /* DEBUG_AARCH64 */
9763 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9764 NULL},
9765 {"mno-verbose-error", N_("do not output verbose error messages"),
9766 &verbose_error_p, 0, NULL},
9767 {NULL, NULL, NULL, 0, NULL}
9768 };
9769
9770 struct aarch64_cpu_option_table
9771 {
9772 const char *name;
9773 const aarch64_feature_set value;
9774 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9775 case. */
9776 const char *canonical_name;
9777 };
9778
9779 /* This list should, at a minimum, contain all the cpu names
9780 recognized by GCC. */
9781 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9782 {"all", AARCH64_ANY, NULL},
9783 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9784 AARCH64_FEATURE_CRC), "Cortex-A34"},
9785 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9786 AARCH64_FEATURE_CRC), "Cortex-A35"},
9787 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9788 AARCH64_FEATURE_CRC), "Cortex-A53"},
9789 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9790 AARCH64_FEATURE_CRC), "Cortex-A57"},
9791 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9792 AARCH64_FEATURE_CRC), "Cortex-A72"},
9793 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9794 AARCH64_FEATURE_CRC), "Cortex-A73"},
9795 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9796 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9797 "Cortex-A55"},
9798 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9799 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9800 "Cortex-A75"},
9801 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9802 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9803 "Cortex-A76"},
9804 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9805 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9806 | AARCH64_FEATURE_DOTPROD
9807 | AARCH64_FEATURE_SSBS),
9808 "Cortex-A76AE"},
9809 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9810 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9811 | AARCH64_FEATURE_DOTPROD
9812 | AARCH64_FEATURE_SSBS),
9813 "Cortex-A77"},
9814 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9815 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9816 | AARCH64_FEATURE_DOTPROD
9817 | AARCH64_FEATURE_SSBS),
9818 "Cortex-A65"},
9819 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9820 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9821 | AARCH64_FEATURE_DOTPROD
9822 | AARCH64_FEATURE_SSBS),
9823 "Cortex-A65AE"},
9824 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9825 AARCH64_FEATURE_F16
9826 | AARCH64_FEATURE_RCPC
9827 | AARCH64_FEATURE_DOTPROD
9828 | AARCH64_FEATURE_SSBS
9829 | AARCH64_FEATURE_PROFILE),
9830 "Cortex-A78"},
9831 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9832 AARCH64_FEATURE_F16
9833 | AARCH64_FEATURE_RCPC
9834 | AARCH64_FEATURE_DOTPROD
9835 | AARCH64_FEATURE_SSBS
9836 | AARCH64_FEATURE_PROFILE),
9837 "Cortex-A78AE"},
9838 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9839 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9840 | AARCH64_FEATURE_DOTPROD
9841 | AARCH64_FEATURE_PROFILE),
9842 "Ares"},
9843 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9844 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9845 "Samsung Exynos M1"},
9846 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9847 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9848 | AARCH64_FEATURE_RDMA),
9849 "Qualcomm Falkor"},
9850 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9851 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9852 | AARCH64_FEATURE_DOTPROD
9853 | AARCH64_FEATURE_SSBS),
9854 "Neoverse E1"},
9855 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9856 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9857 | AARCH64_FEATURE_DOTPROD
9858 | AARCH64_FEATURE_PROFILE),
9859 "Neoverse N1"},
9860 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9861 AARCH64_FEATURE_BFLOAT16
9862 | AARCH64_FEATURE_I8MM
9863 | AARCH64_FEATURE_F16
9864 | AARCH64_FEATURE_SVE
9865 | AARCH64_FEATURE_SVE2
9866 | AARCH64_FEATURE_SVE2_BITPERM
9867 | AARCH64_FEATURE_MEMTAG
9868 | AARCH64_FEATURE_RNG),
9869 "Neoverse N2"},
9870 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9871 AARCH64_FEATURE_PROFILE
9872 | AARCH64_FEATURE_CVADP
9873 | AARCH64_FEATURE_SVE
9874 | AARCH64_FEATURE_SSBS
9875 | AARCH64_FEATURE_RNG
9876 | AARCH64_FEATURE_F16
9877 | AARCH64_FEATURE_BFLOAT16
9878 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9879 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9880 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9881 | AARCH64_FEATURE_RDMA),
9882 "Qualcomm QDF24XX"},
9883 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9884 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9885 "Qualcomm Saphira"},
9886 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9887 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9888 "Cavium ThunderX"},
9889 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9890 AARCH64_FEATURE_CRYPTO),
9891 "Broadcom Vulcan"},
9892 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9893 in earlier releases and is superseded by 'xgene1' in all
9894 tools. */
9895 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9896 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9897 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9898 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9899 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9900 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9901 AARCH64_FEATURE_F16
9902 | AARCH64_FEATURE_RCPC
9903 | AARCH64_FEATURE_DOTPROD
9904 | AARCH64_FEATURE_SSBS
9905 | AARCH64_FEATURE_PROFILE),
9906 "Cortex-X1"},
9907 {"generic", AARCH64_ARCH_V8, NULL},
9908
9909 {NULL, AARCH64_ARCH_NONE, NULL}
9910 };
9911
9912 struct aarch64_arch_option_table
9913 {
9914 const char *name;
9915 const aarch64_feature_set value;
9916 };
9917
9918 /* This list should, at a minimum, contain all the architecture names
9919 recognized by GCC. */
9920 static const struct aarch64_arch_option_table aarch64_archs[] = {
9921 {"all", AARCH64_ANY},
9922 {"armv8-a", AARCH64_ARCH_V8},
9923 {"armv8.1-a", AARCH64_ARCH_V8_1},
9924 {"armv8.2-a", AARCH64_ARCH_V8_2},
9925 {"armv8.3-a", AARCH64_ARCH_V8_3},
9926 {"armv8.4-a", AARCH64_ARCH_V8_4},
9927 {"armv8.5-a", AARCH64_ARCH_V8_5},
9928 {"armv8.6-a", AARCH64_ARCH_V8_6},
9929 {"armv8-r", AARCH64_ARCH_V8_R},
9930 {"morello", AARCH64_ARCH_MORELLO},
9931 {NULL, AARCH64_ARCH_NONE}
9932 };
9933
9934 /* ISA extensions. */
9935 struct aarch64_option_cpu_value_table
9936 {
9937 const char *name;
9938 const aarch64_feature_set value;
9939 const aarch64_feature_set require; /* Feature dependencies. */
9940 };
9941
9942 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9943 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9944 AARCH64_ARCH_NONE},
9945 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9946 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9947 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9948 AARCH64_ARCH_NONE},
9949 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9950 AARCH64_ARCH_NONE},
9951 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9952 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9953 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9954 AARCH64_ARCH_NONE},
9955 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9956 AARCH64_ARCH_NONE},
9957 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9958 AARCH64_ARCH_NONE},
9959 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9960 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9961 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9962 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9963 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9964 AARCH64_FEATURE (AARCH64_FEATURE_FP
9965 | AARCH64_FEATURE_F16, 0)},
9966 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9967 AARCH64_ARCH_NONE},
9968 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9969 AARCH64_FEATURE (AARCH64_FEATURE_F16
9970 | AARCH64_FEATURE_SIMD
9971 | AARCH64_FEATURE_COMPNUM, 0)},
9972 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9973 AARCH64_ARCH_NONE},
9974 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9975 AARCH64_FEATURE (AARCH64_FEATURE_F16
9976 | AARCH64_FEATURE_SIMD, 0)},
9977 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9978 AARCH64_ARCH_NONE},
9979 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9980 AARCH64_ARCH_NONE},
9981 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9982 AARCH64_ARCH_NONE},
9983 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9984 AARCH64_ARCH_NONE},
9985 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9986 AARCH64_ARCH_NONE},
9987 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9988 AARCH64_ARCH_NONE},
9989 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9990 AARCH64_ARCH_NONE},
9991 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9992 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9993 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9994 AARCH64_ARCH_NONE},
9995 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9996 AARCH64_ARCH_NONE},
9997 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9998 AARCH64_ARCH_NONE},
9999 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10000 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10001 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10002 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10003 | AARCH64_FEATURE_SM4, 0)},
10004 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10005 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10006 | AARCH64_FEATURE_AES, 0)},
10007 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10008 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10009 | AARCH64_FEATURE_SHA3, 0)},
10010 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10011 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10012 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10013 AARCH64_ARCH_NONE},
10014 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10015 AARCH64_ARCH_NONE},
10016 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10017 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10018 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10019 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10020 {"a64c", AARCH64_FEATURE (AARCH64_FEATURE_A64C, 0),
10021 AARCH64_ARCH_NONE},
10022 {"c64", AARCH64_FEATURE (AARCH64_FEATURE_C64, 0),
10023 AARCH64_FEATURE (AARCH64_FEATURE_A64C, 0)},
10024 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10025 };
10026
10027 struct aarch64_long_option_table
10028 {
10029 const char *option; /* Substring to match. */
10030 const char *help; /* Help information. */
10031 int (*func) (const char *subopt); /* Function to decode sub-option. */
10032 char *deprecated; /* If non-null, print this message. */
10033 };
10034
10035 /* Transitive closure of features depending on set. */
10036 static aarch64_feature_set
10037 aarch64_feature_disable_set (aarch64_feature_set set)
10038 {
10039 const struct aarch64_option_cpu_value_table *opt;
10040 aarch64_feature_set prev = 0;
10041
10042 while (prev != set) {
10043 prev = set;
10044 for (opt = aarch64_features; opt->name != NULL; opt++)
10045 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10046 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10047 }
10048 return set;
10049 }
10050
10051 /* Transitive closure of dependencies of set. */
10052 static aarch64_feature_set
10053 aarch64_feature_enable_set (aarch64_feature_set set)
10054 {
10055 const struct aarch64_option_cpu_value_table *opt;
10056 aarch64_feature_set prev = 0;
10057
10058 while (prev != set) {
10059 prev = set;
10060 for (opt = aarch64_features; opt->name != NULL; opt++)
10061 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10062 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10063 }
10064 return set;
10065 }
10066
10067 static int
10068 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10069 bfd_boolean ext_only)
10070 {
10071 /* We insist on extensions being added before being removed. We achieve
10072 this by using the ADDING_VALUE variable to indicate whether we are
10073 adding an extension (1) or removing it (0) and only allowing it to
10074 change in the order -1 -> 1 -> 0. */
10075 int adding_value = -1;
10076 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10077
10078 /* Copy the feature set, so that we can modify it. */
10079 *ext_set = **opt_p;
10080 *opt_p = ext_set;
10081
10082 while (str != NULL && *str != 0)
10083 {
10084 const struct aarch64_option_cpu_value_table *opt;
10085 const char *ext = NULL;
10086 int optlen;
10087
10088 if (!ext_only)
10089 {
10090 if (*str != '+')
10091 {
10092 as_bad (_("invalid architectural extension"));
10093 return 0;
10094 }
10095
10096 ext = strchr (++str, '+');
10097 }
10098
10099 if (ext != NULL)
10100 optlen = ext - str;
10101 else
10102 optlen = strlen (str);
10103
10104 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
10105 {
10106 if (adding_value != 0)
10107 adding_value = 0;
10108 optlen -= 2;
10109 str += 2;
10110 }
10111 else if (optlen > 0)
10112 {
10113 if (adding_value == -1)
10114 adding_value = 1;
10115 else if (adding_value != 1)
10116 {
10117 as_bad (_("must specify extensions to add before specifying "
10118 "those to remove"));
10119 return FALSE;
10120 }
10121 }
10122
10123 if (optlen == 0)
10124 {
10125 as_bad (_("missing architectural extension"));
10126 return 0;
10127 }
10128
10129 gas_assert (adding_value != -1);
10130
10131 for (opt = aarch64_features; opt->name != NULL; opt++)
10132 if (strncmp (opt->name, str, optlen) == 0)
10133 {
10134 aarch64_feature_set set;
10135
10136 /* Add or remove the extension. */
10137 if (adding_value)
10138 {
10139 set = aarch64_feature_enable_set (opt->value);
10140 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10141 }
10142 else
10143 {
10144 set = aarch64_feature_disable_set (opt->value);
10145 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10146 }
10147 break;
10148 }
10149
10150 if (opt->name == NULL)
10151 {
10152 as_bad (_("unknown architectural extension `%s'"), str);
10153 return 0;
10154 }
10155
10156 str = ext;
10157 };
10158
10159 return 1;
10160 }
10161
10162 static int
10163 aarch64_parse_cpu (const char *str)
10164 {
10165 const struct aarch64_cpu_option_table *opt;
10166 const char *ext = strchr (str, '+');
10167 size_t optlen;
10168
10169 if (ext != NULL)
10170 optlen = ext - str;
10171 else
10172 optlen = strlen (str);
10173
10174 if (optlen == 0)
10175 {
10176 as_bad (_("missing cpu name `%s'"), str);
10177 return 0;
10178 }
10179
10180 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10181 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10182 {
10183 mcpu_cpu_opt = &opt->value;
10184 if (ext != NULL)
10185 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
10186
10187 return 1;
10188 }
10189
10190 as_bad (_("unknown cpu `%s'"), str);
10191 return 0;
10192 }
10193
10194 static int
10195 aarch64_parse_arch (const char *str)
10196 {
10197 const struct aarch64_arch_option_table *opt;
10198 const char *ext = strchr (str, '+');
10199 size_t optlen;
10200
10201 if (ext != NULL)
10202 optlen = ext - str;
10203 else
10204 optlen = strlen (str);
10205
10206 if (optlen == 0)
10207 {
10208 as_bad (_("missing architecture name `%s'"), str);
10209 return 0;
10210 }
10211
10212 for (opt = aarch64_archs; opt->name != NULL; opt++)
10213 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10214 {
10215 march_cpu_opt = &opt->value;
10216 if (ext != NULL)
10217 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
10218
10219 return 1;
10220 }
10221
10222 as_bad (_("unknown architecture `%s'\n"), str);
10223 return 0;
10224 }
10225
10226 /* ABIs. */
10227 struct aarch64_option_abi_value_table
10228 {
10229 const char *name;
10230 enum aarch64_abi_type value;
10231 };
10232
10233 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10234 {"ilp32", AARCH64_ABI_ILP32},
10235 {"lp64", AARCH64_ABI_LP64},
10236 /* Note that these values are accepted since they are valid parameters to
10237 the -mabi argument for GCC. However we base no decision on them. */
10238 {"purecap", AARCH64_ABI_PURECAP},
10239 {"hybrid", AARCH64_ABI_HYBRID},
10240 };
10241
10242 static int
10243 aarch64_parse_abi (const char *str)
10244 {
10245 unsigned int i;
10246
10247 if (str[0] == '\0')
10248 {
10249 as_bad (_("missing abi name `%s'"), str);
10250 return 0;
10251 }
10252
10253 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10254 if (strcmp (str, aarch64_abis[i].name) == 0)
10255 {
10256 aarch64_abi = aarch64_abis[i].value;
10257 return 1;
10258 }
10259
10260 as_bad (_("unknown abi `%s'\n"), str);
10261 return 0;
10262 }
10263
10264 static struct aarch64_long_option_table aarch64_long_opts[] = {
10265 #ifdef OBJ_ELF
10266 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10267 aarch64_parse_abi, NULL},
10268 #endif /* OBJ_ELF */
10269 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10270 aarch64_parse_cpu, NULL},
10271 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10272 aarch64_parse_arch, NULL},
10273 {NULL, NULL, 0, NULL}
10274 };
10275
10276 int
10277 md_parse_option (int c, const char *arg)
10278 {
10279 struct aarch64_option_table *opt;
10280 struct aarch64_long_option_table *lopt;
10281
10282 switch (c)
10283 {
10284 #ifdef OPTION_EB
10285 case OPTION_EB:
10286 target_big_endian = 1;
10287 break;
10288 #endif
10289
10290 #ifdef OPTION_EL
10291 case OPTION_EL:
10292 target_big_endian = 0;
10293 break;
10294 #endif
10295
10296 case 'a':
10297 /* Listing option. Just ignore these, we don't support additional
10298 ones. */
10299 return 0;
10300
10301 default:
10302 for (opt = aarch64_opts; opt->option != NULL; opt++)
10303 {
10304 if (c == opt->option[0]
10305 && ((arg == NULL && opt->option[1] == 0)
10306 || streq (arg, opt->option + 1)))
10307 {
10308 /* If the option is deprecated, tell the user. */
10309 if (opt->deprecated != NULL)
10310 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10311 arg ? arg : "", _(opt->deprecated));
10312
10313 if (opt->var != NULL)
10314 *opt->var = opt->value;
10315
10316 return 1;
10317 }
10318 }
10319
10320 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10321 {
10322 /* These options are expected to have an argument. */
10323 if (c == lopt->option[0]
10324 && arg != NULL
10325 && strncmp (arg, lopt->option + 1,
10326 strlen (lopt->option + 1)) == 0)
10327 {
10328 /* If the option is deprecated, tell the user. */
10329 if (lopt->deprecated != NULL)
10330 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10331 _(lopt->deprecated));
10332
10333 /* Call the sup-option parser. */
10334 return lopt->func (arg + strlen (lopt->option) - 1);
10335 }
10336 }
10337
10338 return 0;
10339 }
10340
10341 return 1;
10342 }
10343
10344 void
10345 md_show_usage (FILE * fp)
10346 {
10347 struct aarch64_option_table *opt;
10348 struct aarch64_long_option_table *lopt;
10349
10350 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10351
10352 for (opt = aarch64_opts; opt->option != NULL; opt++)
10353 if (opt->help != NULL)
10354 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10355
10356 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10357 if (lopt->help != NULL)
10358 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10359
10360 #ifdef OPTION_EB
10361 fprintf (fp, _("\
10362 -EB assemble code for a big-endian cpu\n"));
10363 #endif
10364
10365 #ifdef OPTION_EL
10366 fprintf (fp, _("\
10367 -EL assemble code for a little-endian cpu\n"));
10368 #endif
10369 }
10370
10371 /* Parse a .cpu directive. */
10372
10373 static void
10374 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10375 {
10376 const struct aarch64_cpu_option_table *opt;
10377 char saved_char;
10378 char *name;
10379 char *ext;
10380 size_t optlen;
10381
10382 name = input_line_pointer;
10383 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10384 input_line_pointer++;
10385 saved_char = *input_line_pointer;
10386 *input_line_pointer = 0;
10387
10388 ext = strchr (name, '+');
10389
10390 if (ext != NULL)
10391 optlen = ext - name;
10392 else
10393 optlen = strlen (name);
10394
10395 /* Skip the first "all" entry. */
10396 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10397 if (strlen (opt->name) == optlen
10398 && strncmp (name, opt->name, optlen) == 0)
10399 {
10400 mcpu_cpu_opt = &opt->value;
10401 if (ext != NULL)
10402 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
10403 return;
10404
10405 cpu_variant = *mcpu_cpu_opt;
10406
10407 *input_line_pointer = saved_char;
10408 demand_empty_rest_of_line ();
10409 return;
10410 }
10411 as_bad (_("unknown cpu `%s'"), name);
10412 *input_line_pointer = saved_char;
10413 ignore_rest_of_line ();
10414 }
10415
10416
10417 /* Parse a .arch directive. */
10418
10419 static void
10420 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10421 {
10422 const struct aarch64_arch_option_table *opt;
10423 char saved_char;
10424 char *name;
10425 char *ext;
10426 size_t optlen;
10427
10428 name = input_line_pointer;
10429 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10430 input_line_pointer++;
10431 saved_char = *input_line_pointer;
10432 *input_line_pointer = 0;
10433
10434 ext = strchr (name, '+');
10435
10436 if (ext != NULL)
10437 optlen = ext - name;
10438 else
10439 optlen = strlen (name);
10440
10441 /* Skip the first "all" entry. */
10442 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10443 if (strlen (opt->name) == optlen
10444 && strncmp (name, opt->name, optlen) == 0)
10445 {
10446 mcpu_cpu_opt = &opt->value;
10447 if (ext != NULL)
10448 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
10449 return;
10450
10451 cpu_variant = *mcpu_cpu_opt;
10452
10453 *input_line_pointer = saved_char;
10454 demand_empty_rest_of_line ();
10455 return;
10456 }
10457
10458 as_bad (_("unknown architecture `%s'\n"), name);
10459 *input_line_pointer = saved_char;
10460 ignore_rest_of_line ();
10461 }
10462
10463 /* Parse a .arch_extension directive. */
10464
10465 static void
10466 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10467 {
10468 char saved_char;
10469 char *ext = input_line_pointer;;
10470
10471 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10472 input_line_pointer++;
10473 saved_char = *input_line_pointer;
10474 *input_line_pointer = 0;
10475
10476 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
10477 return;
10478
10479 cpu_variant = *mcpu_cpu_opt;
10480
10481 *input_line_pointer = saved_char;
10482 demand_empty_rest_of_line ();
10483 }
10484
10485 /* Copy symbol information. */
10486
10487 void
10488 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10489 {
10490 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10491 }
10492
10493 #ifdef OBJ_ELF
10494 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10495 This is needed so AArch64 specific st_other values can be independently
10496 specified for an IFUNC resolver (that is called by the dynamic linker)
10497 and the symbol it resolves (aliased to the resolver). In particular,
10498 if a function symbol has special st_other value set via directives,
10499 then attaching an IFUNC resolver to that symbol should not override
10500 the st_other setting. Requiring the directive on the IFUNC resolver
10501 symbol would be unexpected and problematic in C code, where the two
10502 symbols appear as two independent function declarations. */
10503
10504 void
10505 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10506 {
10507 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10508 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10509 if (srcelf->size)
10510 {
10511 if (destelf->size == NULL)
10512 destelf->size = XNEW (expressionS);
10513 *destelf->size = *srcelf->size;
10514 }
10515 else
10516 {
10517 free (destelf->size);
10518 destelf->size = NULL;
10519 }
10520 S_SET_SIZE (dest, S_GET_SIZE (src));
10521 }
10522 #endif