]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
[binutils][aarch64] New SVE_SHLIMM_UNPRED_22 operand.
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
243
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
246 success. */
247 #define PARSE_FAIL -1
248
249 /* This is an invalid condition code that means no conditional field is
250 present. */
251 #define COND_ALWAYS 0x10
252
253 typedef struct
254 {
255 const char *template;
256 unsigned long value;
257 } asm_barrier_opt;
258
259 typedef struct
260 {
261 const char *template;
262 uint32_t value;
263 } asm_nzcv;
264
265 struct reloc_entry
266 {
267 char *name;
268 bfd_reloc_code_real_type reloc;
269 };
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
294 | REG_TYPE(ZN)) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
301 | REG_TYPE(ZN)) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
331 BASIC_REG_TYPE(MAX)
332
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
337
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
340 {
341 /* A list of REG_TYPE_*. */
342 AARCH64_REG_TYPES
343 } aarch64_reg_type;
344
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
347 #undef REG_TYPE
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
351
352 /* Structure for a hash table entry for a register. */
353 typedef struct
354 {
355 const char *name;
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
359 } reg_entry;
360
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
363 {
364 AARCH64_REG_TYPES
365 };
366
367 #undef BASIC_REG_TYPE
368 #undef REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
371
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
374 above. */
375 static const char *
376 get_reg_expected_msg (aarch64_reg_type reg_type)
377 {
378 const char *msg;
379
380 switch (reg_type)
381 {
382 case REG_TYPE_R_32:
383 msg = N_("integer 32-bit register expected");
384 break;
385 case REG_TYPE_R_64:
386 msg = N_("integer 64-bit register expected");
387 break;
388 case REG_TYPE_R_N:
389 msg = N_("integer register expected");
390 break;
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
393 break;
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
396 break;
397 case REG_TYPE_R_Z:
398 msg = N_("integer or zero register expected");
399 break;
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
402 break;
403 case REG_TYPE_R_SP:
404 msg = N_("integer or SP register expected");
405 break;
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
408 break;
409 case REG_TYPE_FP_B:
410 msg = N_("8-bit SIMD scalar register expected");
411 break;
412 case REG_TYPE_FP_H:
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
415 break;
416 case REG_TYPE_FP_S:
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
419 break;
420 case REG_TYPE_FP_D:
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
423 break;
424 case REG_TYPE_FP_Q:
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
427 break;
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
431 break;
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
434 break;
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
437 break;
438 case REG_TYPE_ZN:
439 msg = N_("SVE vector register expected");
440 break;
441 case REG_TYPE_PN:
442 msg = N_("SVE predicate register expected");
443 break;
444 default:
445 as_fatal (_("invalid register type %d"), reg_type);
446 }
447 return msg;
448 }
449
450 /* Some well known registers that we refer to directly elsewhere. */
451 #define REG_SP 31
452 #define REG_ZR 31
453
454 /* Instructions take 4 bytes in the object file. */
455 #define INSN_SIZE 4
456
457 static struct hash_control *aarch64_ops_hsh;
458 static struct hash_control *aarch64_cond_hsh;
459 static struct hash_control *aarch64_shift_hsh;
460 static struct hash_control *aarch64_sys_regs_hsh;
461 static struct hash_control *aarch64_pstatefield_hsh;
462 static struct hash_control *aarch64_sys_regs_ic_hsh;
463 static struct hash_control *aarch64_sys_regs_dc_hsh;
464 static struct hash_control *aarch64_sys_regs_at_hsh;
465 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
466 static struct hash_control *aarch64_sys_regs_sr_hsh;
467 static struct hash_control *aarch64_reg_hsh;
468 static struct hash_control *aarch64_barrier_opt_hsh;
469 static struct hash_control *aarch64_nzcv_hsh;
470 static struct hash_control *aarch64_pldop_hsh;
471 static struct hash_control *aarch64_hint_opt_hsh;
472
473 /* Stuff needed to resolve the label ambiguity
474 As:
475 ...
476 label: <insn>
477 may differ from:
478 ...
479 label:
480 <insn> */
481
482 static symbolS *last_label_seen;
483
484 /* Literal pool structure. Held on a per-section
485 and per-sub-section basis. */
486
487 #define MAX_LITERAL_POOL_SIZE 1024
488 typedef struct literal_expression
489 {
490 expressionS exp;
491 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
492 LITTLENUM_TYPE * bignum;
493 } literal_expression;
494
495 typedef struct literal_pool
496 {
497 literal_expression literals[MAX_LITERAL_POOL_SIZE];
498 unsigned int next_free_entry;
499 unsigned int id;
500 symbolS *symbol;
501 segT section;
502 subsegT sub_section;
503 int size;
504 struct literal_pool *next;
505 } literal_pool;
506
507 /* Pointer to a linked list of literal pools. */
508 static literal_pool *list_of_pools = NULL;
509 \f
510 /* Pure syntax. */
511
512 /* This array holds the chars that always start a comment. If the
513 pre-processor is disabled, these aren't very useful. */
514 const char comment_chars[] = "";
515
516 /* This array holds the chars that only start a comment at the beginning of
517 a line. If the line seems to have the form '# 123 filename'
518 .line and .file directives will appear in the pre-processed output. */
519 /* Note that input_file.c hand checks for '#' at the beginning of the
520 first line of the input file. This is because the compiler outputs
521 #NO_APP at the beginning of its output. */
522 /* Also note that comments like this one will always work. */
523 const char line_comment_chars[] = "#";
524
525 const char line_separator_chars[] = ";";
526
527 /* Chars that can be used to separate mant
528 from exp in floating point numbers. */
529 const char EXP_CHARS[] = "eE";
530
531 /* Chars that mean this number is a floating point constant. */
532 /* As in 0f12.456 */
533 /* or 0d1.2345e12 */
534
535 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
536
537 /* Prefix character that indicates the start of an immediate value. */
538 #define is_immediate_prefix(C) ((C) == '#')
539
540 /* Separator character handling. */
541
542 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
543
544 static inline bfd_boolean
545 skip_past_char (char **str, char c)
546 {
547 if (**str == c)
548 {
549 (*str)++;
550 return TRUE;
551 }
552 else
553 return FALSE;
554 }
555
556 #define skip_past_comma(str) skip_past_char (str, ',')
557
558 /* Arithmetic expressions (possibly involving symbols). */
559
560 static bfd_boolean in_my_get_expression_p = FALSE;
561
562 /* Third argument to my_get_expression. */
563 #define GE_NO_PREFIX 0
564 #define GE_OPT_PREFIX 1
565
566 /* Return TRUE if the string pointed by *STR is successfully parsed
567 as an valid expression; *EP will be filled with the information of
568 such an expression. Otherwise return FALSE. */
569
570 static bfd_boolean
571 my_get_expression (expressionS * ep, char **str, int prefix_mode,
572 int reject_absent)
573 {
574 char *save_in;
575 segT seg;
576 int prefix_present_p = 0;
577
578 switch (prefix_mode)
579 {
580 case GE_NO_PREFIX:
581 break;
582 case GE_OPT_PREFIX:
583 if (is_immediate_prefix (**str))
584 {
585 (*str)++;
586 prefix_present_p = 1;
587 }
588 break;
589 default:
590 abort ();
591 }
592
593 memset (ep, 0, sizeof (expressionS));
594
595 save_in = input_line_pointer;
596 input_line_pointer = *str;
597 in_my_get_expression_p = TRUE;
598 seg = expression (ep);
599 in_my_get_expression_p = FALSE;
600
601 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
602 {
603 /* We found a bad expression in md_operand(). */
604 *str = input_line_pointer;
605 input_line_pointer = save_in;
606 if (prefix_present_p && ! error_p ())
607 set_fatal_syntax_error (_("bad expression"));
608 else
609 set_first_syntax_error (_("bad expression"));
610 return FALSE;
611 }
612
613 #ifdef OBJ_AOUT
614 if (seg != absolute_section
615 && seg != text_section
616 && seg != data_section
617 && seg != bss_section && seg != undefined_section)
618 {
619 set_syntax_error (_("bad segment"));
620 *str = input_line_pointer;
621 input_line_pointer = save_in;
622 return FALSE;
623 }
624 #else
625 (void) seg;
626 #endif
627
628 *str = input_line_pointer;
629 input_line_pointer = save_in;
630 return TRUE;
631 }
632
633 /* Turn a string in input_line_pointer into a floating point constant
634 of type TYPE, and store the appropriate bytes in *LITP. The number
635 of LITTLENUMS emitted is stored in *SIZEP. An error message is
636 returned, or NULL on OK. */
637
638 const char *
639 md_atof (int type, char *litP, int *sizeP)
640 {
641 return ieee_md_atof (type, litP, sizeP, target_big_endian);
642 }
643
644 /* We handle all bad expressions here, so that we can report the faulty
645 instruction in the error message. */
646 void
647 md_operand (expressionS * exp)
648 {
649 if (in_my_get_expression_p)
650 exp->X_op = O_illegal;
651 }
652
653 /* Immediate values. */
654
655 /* Errors may be set multiple times during parsing or bit encoding
656 (particularly in the Neon bits), but usually the earliest error which is set
657 will be the most meaningful. Avoid overwriting it with later (cascading)
658 errors by calling this function. */
659
660 static void
661 first_error (const char *error)
662 {
663 if (! error_p ())
664 set_syntax_error (error);
665 }
666
667 /* Similar to first_error, but this function accepts formatted error
668 message. */
669 static void
670 first_error_fmt (const char *format, ...)
671 {
672 va_list args;
673 enum
674 { size = 100 };
675 /* N.B. this single buffer will not cause error messages for different
676 instructions to pollute each other; this is because at the end of
677 processing of each assembly line, error message if any will be
678 collected by as_bad. */
679 static char buffer[size];
680
681 if (! error_p ())
682 {
683 int ret ATTRIBUTE_UNUSED;
684 va_start (args, format);
685 ret = vsnprintf (buffer, size, format, args);
686 know (ret <= size - 1 && ret >= 0);
687 va_end (args);
688 set_syntax_error (buffer);
689 }
690 }
691
692 /* Register parsing. */
693
694 /* Generic register parser which is called by other specialized
695 register parsers.
696 CCP points to what should be the beginning of a register name.
697 If it is indeed a valid register name, advance CCP over it and
698 return the reg_entry structure; otherwise return NULL.
699 It does not issue diagnostics. */
700
701 static reg_entry *
702 parse_reg (char **ccp)
703 {
704 char *start = *ccp;
705 char *p;
706 reg_entry *reg;
707
708 #ifdef REGISTER_PREFIX
709 if (*start != REGISTER_PREFIX)
710 return NULL;
711 start++;
712 #endif
713
714 p = start;
715 if (!ISALPHA (*p) || !is_name_beginner (*p))
716 return NULL;
717
718 do
719 p++;
720 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
721
722 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
723
724 if (!reg)
725 return NULL;
726
727 *ccp = p;
728 return reg;
729 }
730
731 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
732 return FALSE. */
733 static bfd_boolean
734 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
735 {
736 return (reg_type_masks[type] & (1 << reg->type)) != 0;
737 }
738
739 /* Try to parse a base or offset register. Allow SVE base and offset
740 registers if REG_TYPE includes SVE registers. Return the register
741 entry on success, setting *QUALIFIER to the register qualifier.
742 Return null otherwise.
743
744 Note that this function does not issue any diagnostics. */
745
746 static const reg_entry *
747 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
748 aarch64_opnd_qualifier_t *qualifier)
749 {
750 char *str = *ccp;
751 const reg_entry *reg = parse_reg (&str);
752
753 if (reg == NULL)
754 return NULL;
755
756 switch (reg->type)
757 {
758 case REG_TYPE_R_32:
759 case REG_TYPE_SP_32:
760 case REG_TYPE_Z_32:
761 *qualifier = AARCH64_OPND_QLF_W;
762 break;
763
764 case REG_TYPE_R_64:
765 case REG_TYPE_SP_64:
766 case REG_TYPE_Z_64:
767 *qualifier = AARCH64_OPND_QLF_X;
768 break;
769
770 case REG_TYPE_ZN:
771 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
772 || str[0] != '.')
773 return NULL;
774 switch (TOLOWER (str[1]))
775 {
776 case 's':
777 *qualifier = AARCH64_OPND_QLF_S_S;
778 break;
779 case 'd':
780 *qualifier = AARCH64_OPND_QLF_S_D;
781 break;
782 default:
783 return NULL;
784 }
785 str += 2;
786 break;
787
788 default:
789 return NULL;
790 }
791
792 *ccp = str;
793
794 return reg;
795 }
796
797 /* Try to parse a base or offset register. Return the register entry
798 on success, setting *QUALIFIER to the register qualifier. Return null
799 otherwise.
800
801 Note that this function does not issue any diagnostics. */
802
803 static const reg_entry *
804 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
805 {
806 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
807 }
808
809 /* Parse the qualifier of a vector register or vector element of type
810 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
811 succeeds; otherwise return FALSE.
812
813 Accept only one occurrence of:
814 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
815 b h s d q */
816 static bfd_boolean
817 parse_vector_type_for_operand (aarch64_reg_type reg_type,
818 struct vector_type_el *parsed_type, char **str)
819 {
820 char *ptr = *str;
821 unsigned width;
822 unsigned element_size;
823 enum vector_el_type type;
824
825 /* skip '.' */
826 gas_assert (*ptr == '.');
827 ptr++;
828
829 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
830 {
831 width = 0;
832 goto elt_size;
833 }
834 width = strtoul (ptr, &ptr, 10);
835 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
836 {
837 first_error_fmt (_("bad size %d in vector width specifier"), width);
838 return FALSE;
839 }
840
841 elt_size:
842 switch (TOLOWER (*ptr))
843 {
844 case 'b':
845 type = NT_b;
846 element_size = 8;
847 break;
848 case 'h':
849 type = NT_h;
850 element_size = 16;
851 break;
852 case 's':
853 type = NT_s;
854 element_size = 32;
855 break;
856 case 'd':
857 type = NT_d;
858 element_size = 64;
859 break;
860 case 'q':
861 if (reg_type == REG_TYPE_ZN || width == 1)
862 {
863 type = NT_q;
864 element_size = 128;
865 break;
866 }
867 /* fall through. */
868 default:
869 if (*ptr != '\0')
870 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
871 else
872 first_error (_("missing element size"));
873 return FALSE;
874 }
875 if (width != 0 && width * element_size != 64
876 && width * element_size != 128
877 && !(width == 2 && element_size == 16)
878 && !(width == 4 && element_size == 8))
879 {
880 first_error_fmt (_
881 ("invalid element size %d and vector size combination %c"),
882 width, *ptr);
883 return FALSE;
884 }
885 ptr++;
886
887 parsed_type->type = type;
888 parsed_type->width = width;
889
890 *str = ptr;
891
892 return TRUE;
893 }
894
895 /* *STR contains an SVE zero/merge predication suffix. Parse it into
896 *PARSED_TYPE and point *STR at the end of the suffix. */
897
898 static bfd_boolean
899 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
900 {
901 char *ptr = *str;
902
903 /* Skip '/'. */
904 gas_assert (*ptr == '/');
905 ptr++;
906 switch (TOLOWER (*ptr))
907 {
908 case 'z':
909 parsed_type->type = NT_zero;
910 break;
911 case 'm':
912 parsed_type->type = NT_merge;
913 break;
914 default:
915 if (*ptr != '\0' && *ptr != ',')
916 first_error_fmt (_("unexpected character `%c' in predication type"),
917 *ptr);
918 else
919 first_error (_("missing predication type"));
920 return FALSE;
921 }
922 parsed_type->width = 0;
923 *str = ptr + 1;
924 return TRUE;
925 }
926
927 /* Parse a register of the type TYPE.
928
929 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
930 name or the parsed register is not of TYPE.
931
932 Otherwise return the register number, and optionally fill in the actual
933 type of the register in *RTYPE when multiple alternatives were given, and
934 return the register shape and element index information in *TYPEINFO.
935
936 IN_REG_LIST should be set with TRUE if the caller is parsing a register
937 list. */
938
939 static int
940 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
941 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
942 {
943 char *str = *ccp;
944 const reg_entry *reg = parse_reg (&str);
945 struct vector_type_el atype;
946 struct vector_type_el parsetype;
947 bfd_boolean is_typed_vecreg = FALSE;
948
949 atype.defined = 0;
950 atype.type = NT_invtype;
951 atype.width = -1;
952 atype.index = 0;
953
954 if (reg == NULL)
955 {
956 if (typeinfo)
957 *typeinfo = atype;
958 set_default_error ();
959 return PARSE_FAIL;
960 }
961
962 if (! aarch64_check_reg_type (reg, type))
963 {
964 DEBUG_TRACE ("reg type check failed");
965 set_default_error ();
966 return PARSE_FAIL;
967 }
968 type = reg->type;
969
970 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
971 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
972 {
973 if (*str == '.')
974 {
975 if (!parse_vector_type_for_operand (type, &parsetype, &str))
976 return PARSE_FAIL;
977 }
978 else
979 {
980 if (!parse_predication_for_operand (&parsetype, &str))
981 return PARSE_FAIL;
982 }
983
984 /* Register if of the form Vn.[bhsdq]. */
985 is_typed_vecreg = TRUE;
986
987 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
988 {
989 /* The width is always variable; we don't allow an integer width
990 to be specified. */
991 gas_assert (parsetype.width == 0);
992 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
993 }
994 else if (parsetype.width == 0)
995 /* Expect index. In the new scheme we cannot have
996 Vn.[bhsdq] represent a scalar. Therefore any
997 Vn.[bhsdq] should have an index following it.
998 Except in reglists of course. */
999 atype.defined |= NTA_HASINDEX;
1000 else
1001 atype.defined |= NTA_HASTYPE;
1002
1003 atype.type = parsetype.type;
1004 atype.width = parsetype.width;
1005 }
1006
1007 if (skip_past_char (&str, '['))
1008 {
1009 expressionS exp;
1010
1011 /* Reject Sn[index] syntax. */
1012 if (!is_typed_vecreg)
1013 {
1014 first_error (_("this type of register can't be indexed"));
1015 return PARSE_FAIL;
1016 }
1017
1018 if (in_reg_list)
1019 {
1020 first_error (_("index not allowed inside register list"));
1021 return PARSE_FAIL;
1022 }
1023
1024 atype.defined |= NTA_HASINDEX;
1025
1026 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1027
1028 if (exp.X_op != O_constant)
1029 {
1030 first_error (_("constant expression required"));
1031 return PARSE_FAIL;
1032 }
1033
1034 if (! skip_past_char (&str, ']'))
1035 return PARSE_FAIL;
1036
1037 atype.index = exp.X_add_number;
1038 }
1039 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1040 {
1041 /* Indexed vector register expected. */
1042 first_error (_("indexed vector register expected"));
1043 return PARSE_FAIL;
1044 }
1045
1046 /* A vector reg Vn should be typed or indexed. */
1047 if (type == REG_TYPE_VN && atype.defined == 0)
1048 {
1049 first_error (_("invalid use of vector register"));
1050 }
1051
1052 if (typeinfo)
1053 *typeinfo = atype;
1054
1055 if (rtype)
1056 *rtype = type;
1057
1058 *ccp = str;
1059
1060 return reg->number;
1061 }
1062
1063 /* Parse register.
1064
1065 Return the register number on success; return PARSE_FAIL otherwise.
1066
1067 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1068 the register (e.g. NEON double or quad reg when either has been requested).
1069
1070 If this is a NEON vector register with additional type information, fill
1071 in the struct pointed to by VECTYPE (if non-NULL).
1072
1073 This parser does not handle register list. */
1074
1075 static int
1076 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1077 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1078 {
1079 struct vector_type_el atype;
1080 char *str = *ccp;
1081 int reg = parse_typed_reg (&str, type, rtype, &atype,
1082 /*in_reg_list= */ FALSE);
1083
1084 if (reg == PARSE_FAIL)
1085 return PARSE_FAIL;
1086
1087 if (vectype)
1088 *vectype = atype;
1089
1090 *ccp = str;
1091
1092 return reg;
1093 }
1094
1095 static inline bfd_boolean
1096 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1097 {
1098 return
1099 e1.type == e2.type
1100 && e1.defined == e2.defined
1101 && e1.width == e2.width && e1.index == e2.index;
1102 }
1103
1104 /* This function parses a list of vector registers of type TYPE.
1105 On success, it returns the parsed register list information in the
1106 following encoded format:
1107
1108 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1109 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1110
1111 The information of the register shape and/or index is returned in
1112 *VECTYPE.
1113
1114 It returns PARSE_FAIL if the register list is invalid.
1115
1116 The list contains one to four registers.
1117 Each register can be one of:
1118 <Vt>.<T>[<index>]
1119 <Vt>.<T>
1120 All <T> should be identical.
1121 All <index> should be identical.
1122 There are restrictions on <Vt> numbers which are checked later
1123 (by reg_list_valid_p). */
1124
1125 static int
1126 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1127 struct vector_type_el *vectype)
1128 {
1129 char *str = *ccp;
1130 int nb_regs;
1131 struct vector_type_el typeinfo, typeinfo_first;
1132 int val, val_range;
1133 int in_range;
1134 int ret_val;
1135 int i;
1136 bfd_boolean error = FALSE;
1137 bfd_boolean expect_index = FALSE;
1138
1139 if (*str != '{')
1140 {
1141 set_syntax_error (_("expecting {"));
1142 return PARSE_FAIL;
1143 }
1144 str++;
1145
1146 nb_regs = 0;
1147 typeinfo_first.defined = 0;
1148 typeinfo_first.type = NT_invtype;
1149 typeinfo_first.width = -1;
1150 typeinfo_first.index = 0;
1151 ret_val = 0;
1152 val = -1;
1153 val_range = -1;
1154 in_range = 0;
1155 do
1156 {
1157 if (in_range)
1158 {
1159 str++; /* skip over '-' */
1160 val_range = val;
1161 }
1162 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1163 /*in_reg_list= */ TRUE);
1164 if (val == PARSE_FAIL)
1165 {
1166 set_first_syntax_error (_("invalid vector register in list"));
1167 error = TRUE;
1168 continue;
1169 }
1170 /* reject [bhsd]n */
1171 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1172 {
1173 set_first_syntax_error (_("invalid scalar register in list"));
1174 error = TRUE;
1175 continue;
1176 }
1177
1178 if (typeinfo.defined & NTA_HASINDEX)
1179 expect_index = TRUE;
1180
1181 if (in_range)
1182 {
1183 if (val < val_range)
1184 {
1185 set_first_syntax_error
1186 (_("invalid range in vector register list"));
1187 error = TRUE;
1188 }
1189 val_range++;
1190 }
1191 else
1192 {
1193 val_range = val;
1194 if (nb_regs == 0)
1195 typeinfo_first = typeinfo;
1196 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1197 {
1198 set_first_syntax_error
1199 (_("type mismatch in vector register list"));
1200 error = TRUE;
1201 }
1202 }
1203 if (! error)
1204 for (i = val_range; i <= val; i++)
1205 {
1206 ret_val |= i << (5 * nb_regs);
1207 nb_regs++;
1208 }
1209 in_range = 0;
1210 }
1211 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1212
1213 skip_whitespace (str);
1214 if (*str != '}')
1215 {
1216 set_first_syntax_error (_("end of vector register list not found"));
1217 error = TRUE;
1218 }
1219 str++;
1220
1221 skip_whitespace (str);
1222
1223 if (expect_index)
1224 {
1225 if (skip_past_char (&str, '['))
1226 {
1227 expressionS exp;
1228
1229 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1230 if (exp.X_op != O_constant)
1231 {
1232 set_first_syntax_error (_("constant expression required."));
1233 error = TRUE;
1234 }
1235 if (! skip_past_char (&str, ']'))
1236 error = TRUE;
1237 else
1238 typeinfo_first.index = exp.X_add_number;
1239 }
1240 else
1241 {
1242 set_first_syntax_error (_("expected index"));
1243 error = TRUE;
1244 }
1245 }
1246
1247 if (nb_regs > 4)
1248 {
1249 set_first_syntax_error (_("too many registers in vector register list"));
1250 error = TRUE;
1251 }
1252 else if (nb_regs == 0)
1253 {
1254 set_first_syntax_error (_("empty vector register list"));
1255 error = TRUE;
1256 }
1257
1258 *ccp = str;
1259 if (! error)
1260 *vectype = typeinfo_first;
1261
1262 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1263 }
1264
1265 /* Directives: register aliases. */
1266
1267 static reg_entry *
1268 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1269 {
1270 reg_entry *new;
1271 const char *name;
1272
1273 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1274 {
1275 if (new->builtin)
1276 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1277 str);
1278
1279 /* Only warn about a redefinition if it's not defined as the
1280 same register. */
1281 else if (new->number != number || new->type != type)
1282 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1283
1284 return NULL;
1285 }
1286
1287 name = xstrdup (str);
1288 new = XNEW (reg_entry);
1289
1290 new->name = name;
1291 new->number = number;
1292 new->type = type;
1293 new->builtin = FALSE;
1294
1295 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1296 abort ();
1297
1298 return new;
1299 }
1300
1301 /* Look for the .req directive. This is of the form:
1302
1303 new_register_name .req existing_register_name
1304
1305 If we find one, or if it looks sufficiently like one that we want to
1306 handle any error here, return TRUE. Otherwise return FALSE. */
1307
1308 static bfd_boolean
1309 create_register_alias (char *newname, char *p)
1310 {
1311 const reg_entry *old;
1312 char *oldname, *nbuf;
1313 size_t nlen;
1314
1315 /* The input scrubber ensures that whitespace after the mnemonic is
1316 collapsed to single spaces. */
1317 oldname = p;
1318 if (strncmp (oldname, " .req ", 6) != 0)
1319 return FALSE;
1320
1321 oldname += 6;
1322 if (*oldname == '\0')
1323 return FALSE;
1324
1325 old = hash_find (aarch64_reg_hsh, oldname);
1326 if (!old)
1327 {
1328 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1329 return TRUE;
1330 }
1331
1332 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1333 the desired alias name, and p points to its end. If not, then
1334 the desired alias name is in the global original_case_string. */
1335 #ifdef TC_CASE_SENSITIVE
1336 nlen = p - newname;
1337 #else
1338 newname = original_case_string;
1339 nlen = strlen (newname);
1340 #endif
1341
1342 nbuf = xmemdup0 (newname, nlen);
1343
1344 /* Create aliases under the new name as stated; an all-lowercase
1345 version of the new name; and an all-uppercase version of the new
1346 name. */
1347 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1348 {
1349 for (p = nbuf; *p; p++)
1350 *p = TOUPPER (*p);
1351
1352 if (strncmp (nbuf, newname, nlen))
1353 {
1354 /* If this attempt to create an additional alias fails, do not bother
1355 trying to create the all-lower case alias. We will fail and issue
1356 a second, duplicate error message. This situation arises when the
1357 programmer does something like:
1358 foo .req r0
1359 Foo .req r1
1360 The second .req creates the "Foo" alias but then fails to create
1361 the artificial FOO alias because it has already been created by the
1362 first .req. */
1363 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1364 {
1365 free (nbuf);
1366 return TRUE;
1367 }
1368 }
1369
1370 for (p = nbuf; *p; p++)
1371 *p = TOLOWER (*p);
1372
1373 if (strncmp (nbuf, newname, nlen))
1374 insert_reg_alias (nbuf, old->number, old->type);
1375 }
1376
1377 free (nbuf);
1378 return TRUE;
1379 }
1380
1381 /* Should never be called, as .req goes between the alias and the
1382 register name, not at the beginning of the line. */
1383 static void
1384 s_req (int a ATTRIBUTE_UNUSED)
1385 {
1386 as_bad (_("invalid syntax for .req directive"));
1387 }
1388
1389 /* The .unreq directive deletes an alias which was previously defined
1390 by .req. For example:
1391
1392 my_alias .req r11
1393 .unreq my_alias */
1394
1395 static void
1396 s_unreq (int a ATTRIBUTE_UNUSED)
1397 {
1398 char *name;
1399 char saved_char;
1400
1401 name = input_line_pointer;
1402
1403 while (*input_line_pointer != 0
1404 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1405 ++input_line_pointer;
1406
1407 saved_char = *input_line_pointer;
1408 *input_line_pointer = 0;
1409
1410 if (!*name)
1411 as_bad (_("invalid syntax for .unreq directive"));
1412 else
1413 {
1414 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1415
1416 if (!reg)
1417 as_bad (_("unknown register alias '%s'"), name);
1418 else if (reg->builtin)
1419 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1420 name);
1421 else
1422 {
1423 char *p;
1424 char *nbuf;
1425
1426 hash_delete (aarch64_reg_hsh, name, FALSE);
1427 free ((char *) reg->name);
1428 free (reg);
1429
1430 /* Also locate the all upper case and all lower case versions.
1431 Do not complain if we cannot find one or the other as it
1432 was probably deleted above. */
1433
1434 nbuf = strdup (name);
1435 for (p = nbuf; *p; p++)
1436 *p = TOUPPER (*p);
1437 reg = hash_find (aarch64_reg_hsh, nbuf);
1438 if (reg)
1439 {
1440 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1441 free ((char *) reg->name);
1442 free (reg);
1443 }
1444
1445 for (p = nbuf; *p; p++)
1446 *p = TOLOWER (*p);
1447 reg = hash_find (aarch64_reg_hsh, nbuf);
1448 if (reg)
1449 {
1450 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1451 free ((char *) reg->name);
1452 free (reg);
1453 }
1454
1455 free (nbuf);
1456 }
1457 }
1458
1459 *input_line_pointer = saved_char;
1460 demand_empty_rest_of_line ();
1461 }
1462
1463 /* Directives: Instruction set selection. */
1464
1465 #ifdef OBJ_ELF
1466 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1467 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1468 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1469 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1470
1471 /* Create a new mapping symbol for the transition to STATE. */
1472
1473 static void
1474 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1475 {
1476 symbolS *symbolP;
1477 const char *symname;
1478 int type;
1479
1480 switch (state)
1481 {
1482 case MAP_DATA:
1483 symname = "$d";
1484 type = BSF_NO_FLAGS;
1485 break;
1486 case MAP_INSN:
1487 symname = "$x";
1488 type = BSF_NO_FLAGS;
1489 break;
1490 default:
1491 abort ();
1492 }
1493
1494 symbolP = symbol_new (symname, now_seg, value, frag);
1495 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1496
1497 /* Save the mapping symbols for future reference. Also check that
1498 we do not place two mapping symbols at the same offset within a
1499 frag. We'll handle overlap between frags in
1500 check_mapping_symbols.
1501
1502 If .fill or other data filling directive generates zero sized data,
1503 the mapping symbol for the following code will have the same value
1504 as the one generated for the data filling directive. In this case,
1505 we replace the old symbol with the new one at the same address. */
1506 if (value == 0)
1507 {
1508 if (frag->tc_frag_data.first_map != NULL)
1509 {
1510 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1511 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1512 &symbol_lastP);
1513 }
1514 frag->tc_frag_data.first_map = symbolP;
1515 }
1516 if (frag->tc_frag_data.last_map != NULL)
1517 {
1518 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1519 S_GET_VALUE (symbolP));
1520 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1521 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1522 &symbol_lastP);
1523 }
1524 frag->tc_frag_data.last_map = symbolP;
1525 }
1526
1527 /* We must sometimes convert a region marked as code to data during
1528 code alignment, if an odd number of bytes have to be padded. The
1529 code mapping symbol is pushed to an aligned address. */
1530
1531 static void
1532 insert_data_mapping_symbol (enum mstate state,
1533 valueT value, fragS * frag, offsetT bytes)
1534 {
1535 /* If there was already a mapping symbol, remove it. */
1536 if (frag->tc_frag_data.last_map != NULL
1537 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1538 frag->fr_address + value)
1539 {
1540 symbolS *symp = frag->tc_frag_data.last_map;
1541
1542 if (value == 0)
1543 {
1544 know (frag->tc_frag_data.first_map == symp);
1545 frag->tc_frag_data.first_map = NULL;
1546 }
1547 frag->tc_frag_data.last_map = NULL;
1548 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1549 }
1550
1551 make_mapping_symbol (MAP_DATA, value, frag);
1552 make_mapping_symbol (state, value + bytes, frag);
1553 }
1554
1555 static void mapping_state_2 (enum mstate state, int max_chars);
1556
1557 /* Set the mapping state to STATE. Only call this when about to
1558 emit some STATE bytes to the file. */
1559
1560 void
1561 mapping_state (enum mstate state)
1562 {
1563 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1564
1565 if (state == MAP_INSN)
1566 /* AArch64 instructions require 4-byte alignment. When emitting
1567 instructions into any section, record the appropriate section
1568 alignment. */
1569 record_alignment (now_seg, 2);
1570
1571 if (mapstate == state)
1572 /* The mapping symbol has already been emitted.
1573 There is nothing else to do. */
1574 return;
1575
1576 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1577 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1578 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1579 evaluated later in the next else. */
1580 return;
1581 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1582 {
1583 /* Only add the symbol if the offset is > 0:
1584 if we're at the first frag, check it's size > 0;
1585 if we're not at the first frag, then for sure
1586 the offset is > 0. */
1587 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1588 const int add_symbol = (frag_now != frag_first)
1589 || (frag_now_fix () > 0);
1590
1591 if (add_symbol)
1592 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1593 }
1594 #undef TRANSITION
1595
1596 mapping_state_2 (state, 0);
1597 }
1598
1599 /* Same as mapping_state, but MAX_CHARS bytes have already been
1600 allocated. Put the mapping symbol that far back. */
1601
1602 static void
1603 mapping_state_2 (enum mstate state, int max_chars)
1604 {
1605 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1606
1607 if (!SEG_NORMAL (now_seg))
1608 return;
1609
1610 if (mapstate == state)
1611 /* The mapping symbol has already been emitted.
1612 There is nothing else to do. */
1613 return;
1614
1615 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1616 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1617 }
1618 #else
1619 #define mapping_state(x) /* nothing */
1620 #define mapping_state_2(x, y) /* nothing */
1621 #endif
1622
1623 /* Directives: sectioning and alignment. */
1624
1625 static void
1626 s_bss (int ignore ATTRIBUTE_UNUSED)
1627 {
1628 /* We don't support putting frags in the BSS segment, we fake it by
1629 marking in_bss, then looking at s_skip for clues. */
1630 subseg_set (bss_section, 0);
1631 demand_empty_rest_of_line ();
1632 mapping_state (MAP_DATA);
1633 }
1634
1635 static void
1636 s_even (int ignore ATTRIBUTE_UNUSED)
1637 {
1638 /* Never make frag if expect extra pass. */
1639 if (!need_pass_2)
1640 frag_align (1, 0, 0);
1641
1642 record_alignment (now_seg, 1);
1643
1644 demand_empty_rest_of_line ();
1645 }
1646
1647 /* Directives: Literal pools. */
1648
1649 static literal_pool *
1650 find_literal_pool (int size)
1651 {
1652 literal_pool *pool;
1653
1654 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1655 {
1656 if (pool->section == now_seg
1657 && pool->sub_section == now_subseg && pool->size == size)
1658 break;
1659 }
1660
1661 return pool;
1662 }
1663
1664 static literal_pool *
1665 find_or_make_literal_pool (int size)
1666 {
1667 /* Next literal pool ID number. */
1668 static unsigned int latest_pool_num = 1;
1669 literal_pool *pool;
1670
1671 pool = find_literal_pool (size);
1672
1673 if (pool == NULL)
1674 {
1675 /* Create a new pool. */
1676 pool = XNEW (literal_pool);
1677 if (!pool)
1678 return NULL;
1679
1680 /* Currently we always put the literal pool in the current text
1681 section. If we were generating "small" model code where we
1682 knew that all code and initialised data was within 1MB then
1683 we could output literals to mergeable, read-only data
1684 sections. */
1685
1686 pool->next_free_entry = 0;
1687 pool->section = now_seg;
1688 pool->sub_section = now_subseg;
1689 pool->size = size;
1690 pool->next = list_of_pools;
1691 pool->symbol = NULL;
1692
1693 /* Add it to the list. */
1694 list_of_pools = pool;
1695 }
1696
1697 /* New pools, and emptied pools, will have a NULL symbol. */
1698 if (pool->symbol == NULL)
1699 {
1700 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1701 (valueT) 0, &zero_address_frag);
1702 pool->id = latest_pool_num++;
1703 }
1704
1705 /* Done. */
1706 return pool;
1707 }
1708
1709 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1710 Return TRUE on success, otherwise return FALSE. */
1711 static bfd_boolean
1712 add_to_lit_pool (expressionS *exp, int size)
1713 {
1714 literal_pool *pool;
1715 unsigned int entry;
1716
1717 pool = find_or_make_literal_pool (size);
1718
1719 /* Check if this literal value is already in the pool. */
1720 for (entry = 0; entry < pool->next_free_entry; entry++)
1721 {
1722 expressionS * litexp = & pool->literals[entry].exp;
1723
1724 if ((litexp->X_op == exp->X_op)
1725 && (exp->X_op == O_constant)
1726 && (litexp->X_add_number == exp->X_add_number)
1727 && (litexp->X_unsigned == exp->X_unsigned))
1728 break;
1729
1730 if ((litexp->X_op == exp->X_op)
1731 && (exp->X_op == O_symbol)
1732 && (litexp->X_add_number == exp->X_add_number)
1733 && (litexp->X_add_symbol == exp->X_add_symbol)
1734 && (litexp->X_op_symbol == exp->X_op_symbol))
1735 break;
1736 }
1737
1738 /* Do we need to create a new entry? */
1739 if (entry == pool->next_free_entry)
1740 {
1741 if (entry >= MAX_LITERAL_POOL_SIZE)
1742 {
1743 set_syntax_error (_("literal pool overflow"));
1744 return FALSE;
1745 }
1746
1747 pool->literals[entry].exp = *exp;
1748 pool->next_free_entry += 1;
1749 if (exp->X_op == O_big)
1750 {
1751 /* PR 16688: Bignums are held in a single global array. We must
1752 copy and preserve that value now, before it is overwritten. */
1753 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1754 exp->X_add_number);
1755 memcpy (pool->literals[entry].bignum, generic_bignum,
1756 CHARS_PER_LITTLENUM * exp->X_add_number);
1757 }
1758 else
1759 pool->literals[entry].bignum = NULL;
1760 }
1761
1762 exp->X_op = O_symbol;
1763 exp->X_add_number = ((int) entry) * size;
1764 exp->X_add_symbol = pool->symbol;
1765
1766 return TRUE;
1767 }
1768
1769 /* Can't use symbol_new here, so have to create a symbol and then at
1770 a later date assign it a value. That's what these functions do. */
1771
1772 static void
1773 symbol_locate (symbolS * symbolP,
1774 const char *name,/* It is copied, the caller can modify. */
1775 segT segment, /* Segment identifier (SEG_<something>). */
1776 valueT valu, /* Symbol value. */
1777 fragS * frag) /* Associated fragment. */
1778 {
1779 size_t name_length;
1780 char *preserved_copy_of_name;
1781
1782 name_length = strlen (name) + 1; /* +1 for \0. */
1783 obstack_grow (&notes, name, name_length);
1784 preserved_copy_of_name = obstack_finish (&notes);
1785
1786 #ifdef tc_canonicalize_symbol_name
1787 preserved_copy_of_name =
1788 tc_canonicalize_symbol_name (preserved_copy_of_name);
1789 #endif
1790
1791 S_SET_NAME (symbolP, preserved_copy_of_name);
1792
1793 S_SET_SEGMENT (symbolP, segment);
1794 S_SET_VALUE (symbolP, valu);
1795 symbol_clear_list_pointers (symbolP);
1796
1797 symbol_set_frag (symbolP, frag);
1798
1799 /* Link to end of symbol chain. */
1800 {
1801 extern int symbol_table_frozen;
1802
1803 if (symbol_table_frozen)
1804 abort ();
1805 }
1806
1807 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1808
1809 obj_symbol_new_hook (symbolP);
1810
1811 #ifdef tc_symbol_new_hook
1812 tc_symbol_new_hook (symbolP);
1813 #endif
1814
1815 #ifdef DEBUG_SYMS
1816 verify_symbol_chain (symbol_rootP, symbol_lastP);
1817 #endif /* DEBUG_SYMS */
1818 }
1819
1820
1821 static void
1822 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1823 {
1824 unsigned int entry;
1825 literal_pool *pool;
1826 char sym_name[20];
1827 int align;
1828
1829 for (align = 2; align <= 4; align++)
1830 {
1831 int size = 1 << align;
1832
1833 pool = find_literal_pool (size);
1834 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1835 continue;
1836
1837 /* Align pool as you have word accesses.
1838 Only make a frag if we have to. */
1839 if (!need_pass_2)
1840 frag_align (align, 0, 0);
1841
1842 mapping_state (MAP_DATA);
1843
1844 record_alignment (now_seg, align);
1845
1846 sprintf (sym_name, "$$lit_\002%x", pool->id);
1847
1848 symbol_locate (pool->symbol, sym_name, now_seg,
1849 (valueT) frag_now_fix (), frag_now);
1850 symbol_table_insert (pool->symbol);
1851
1852 for (entry = 0; entry < pool->next_free_entry; entry++)
1853 {
1854 expressionS * exp = & pool->literals[entry].exp;
1855
1856 if (exp->X_op == O_big)
1857 {
1858 /* PR 16688: Restore the global bignum value. */
1859 gas_assert (pool->literals[entry].bignum != NULL);
1860 memcpy (generic_bignum, pool->literals[entry].bignum,
1861 CHARS_PER_LITTLENUM * exp->X_add_number);
1862 }
1863
1864 /* First output the expression in the instruction to the pool. */
1865 emit_expr (exp, size); /* .word|.xword */
1866
1867 if (exp->X_op == O_big)
1868 {
1869 free (pool->literals[entry].bignum);
1870 pool->literals[entry].bignum = NULL;
1871 }
1872 }
1873
1874 /* Mark the pool as empty. */
1875 pool->next_free_entry = 0;
1876 pool->symbol = NULL;
1877 }
1878 }
1879
1880 #ifdef OBJ_ELF
1881 /* Forward declarations for functions below, in the MD interface
1882 section. */
1883 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1884 static struct reloc_table_entry * find_reloc_table_entry (char **);
1885
1886 /* Directives: Data. */
1887 /* N.B. the support for relocation suffix in this directive needs to be
1888 implemented properly. */
1889
1890 static void
1891 s_aarch64_elf_cons (int nbytes)
1892 {
1893 expressionS exp;
1894
1895 #ifdef md_flush_pending_output
1896 md_flush_pending_output ();
1897 #endif
1898
1899 if (is_it_end_of_statement ())
1900 {
1901 demand_empty_rest_of_line ();
1902 return;
1903 }
1904
1905 #ifdef md_cons_align
1906 md_cons_align (nbytes);
1907 #endif
1908
1909 mapping_state (MAP_DATA);
1910 do
1911 {
1912 struct reloc_table_entry *reloc;
1913
1914 expression (&exp);
1915
1916 if (exp.X_op != O_symbol)
1917 emit_expr (&exp, (unsigned int) nbytes);
1918 else
1919 {
1920 skip_past_char (&input_line_pointer, '#');
1921 if (skip_past_char (&input_line_pointer, ':'))
1922 {
1923 reloc = find_reloc_table_entry (&input_line_pointer);
1924 if (reloc == NULL)
1925 as_bad (_("unrecognized relocation suffix"));
1926 else
1927 as_bad (_("unimplemented relocation suffix"));
1928 ignore_rest_of_line ();
1929 return;
1930 }
1931 else
1932 emit_expr (&exp, (unsigned int) nbytes);
1933 }
1934 }
1935 while (*input_line_pointer++ == ',');
1936
1937 /* Put terminator back into stream. */
1938 input_line_pointer--;
1939 demand_empty_rest_of_line ();
1940 }
1941
1942 #endif /* OBJ_ELF */
1943
1944 /* Output a 32-bit word, but mark as an instruction. */
1945
1946 static void
1947 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1948 {
1949 expressionS exp;
1950
1951 #ifdef md_flush_pending_output
1952 md_flush_pending_output ();
1953 #endif
1954
1955 if (is_it_end_of_statement ())
1956 {
1957 demand_empty_rest_of_line ();
1958 return;
1959 }
1960
1961 /* Sections are assumed to start aligned. In executable section, there is no
1962 MAP_DATA symbol pending. So we only align the address during
1963 MAP_DATA --> MAP_INSN transition.
1964 For other sections, this is not guaranteed. */
1965 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1966 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1967 frag_align_code (2, 0);
1968
1969 #ifdef OBJ_ELF
1970 mapping_state (MAP_INSN);
1971 #endif
1972
1973 do
1974 {
1975 expression (&exp);
1976 if (exp.X_op != O_constant)
1977 {
1978 as_bad (_("constant expression required"));
1979 ignore_rest_of_line ();
1980 return;
1981 }
1982
1983 if (target_big_endian)
1984 {
1985 unsigned int val = exp.X_add_number;
1986 exp.X_add_number = SWAP_32 (val);
1987 }
1988 emit_expr (&exp, 4);
1989 }
1990 while (*input_line_pointer++ == ',');
1991
1992 /* Put terminator back into stream. */
1993 input_line_pointer--;
1994 demand_empty_rest_of_line ();
1995 }
1996
1997 static void
1998 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
1999 {
2000 demand_empty_rest_of_line ();
2001 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2002 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2003 }
2004
2005 #ifdef OBJ_ELF
2006 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2007
2008 static void
2009 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2010 {
2011 expressionS exp;
2012
2013 expression (&exp);
2014 frag_grow (4);
2015 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2016 BFD_RELOC_AARCH64_TLSDESC_ADD);
2017
2018 demand_empty_rest_of_line ();
2019 }
2020
2021 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2022
2023 static void
2024 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2025 {
2026 expressionS exp;
2027
2028 /* Since we're just labelling the code, there's no need to define a
2029 mapping symbol. */
2030 expression (&exp);
2031 /* Make sure there is enough room in this frag for the following
2032 blr. This trick only works if the blr follows immediately after
2033 the .tlsdesc directive. */
2034 frag_grow (4);
2035 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2036 BFD_RELOC_AARCH64_TLSDESC_CALL);
2037
2038 demand_empty_rest_of_line ();
2039 }
2040
2041 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2042
2043 static void
2044 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2045 {
2046 expressionS exp;
2047
2048 expression (&exp);
2049 frag_grow (4);
2050 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2051 BFD_RELOC_AARCH64_TLSDESC_LDR);
2052
2053 demand_empty_rest_of_line ();
2054 }
2055 #endif /* OBJ_ELF */
2056
2057 static void s_aarch64_arch (int);
2058 static void s_aarch64_cpu (int);
2059 static void s_aarch64_arch_extension (int);
2060
2061 /* This table describes all the machine specific pseudo-ops the assembler
2062 has to support. The fields are:
2063 pseudo-op name without dot
2064 function to call to execute this pseudo-op
2065 Integer arg to pass to the function. */
2066
2067 const pseudo_typeS md_pseudo_table[] = {
2068 /* Never called because '.req' does not start a line. */
2069 {"req", s_req, 0},
2070 {"unreq", s_unreq, 0},
2071 {"bss", s_bss, 0},
2072 {"even", s_even, 0},
2073 {"ltorg", s_ltorg, 0},
2074 {"pool", s_ltorg, 0},
2075 {"cpu", s_aarch64_cpu, 0},
2076 {"arch", s_aarch64_arch, 0},
2077 {"arch_extension", s_aarch64_arch_extension, 0},
2078 {"inst", s_aarch64_inst, 0},
2079 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2080 #ifdef OBJ_ELF
2081 {"tlsdescadd", s_tlsdescadd, 0},
2082 {"tlsdesccall", s_tlsdesccall, 0},
2083 {"tlsdescldr", s_tlsdescldr, 0},
2084 {"word", s_aarch64_elf_cons, 4},
2085 {"long", s_aarch64_elf_cons, 4},
2086 {"xword", s_aarch64_elf_cons, 8},
2087 {"dword", s_aarch64_elf_cons, 8},
2088 #endif
2089 {0, 0, 0}
2090 };
2091 \f
2092
2093 /* Check whether STR points to a register name followed by a comma or the
2094 end of line; REG_TYPE indicates which register types are checked
2095 against. Return TRUE if STR is such a register name; otherwise return
2096 FALSE. The function does not intend to produce any diagnostics, but since
2097 the register parser aarch64_reg_parse, which is called by this function,
2098 does produce diagnostics, we call clear_error to clear any diagnostics
2099 that may be generated by aarch64_reg_parse.
2100 Also, the function returns FALSE directly if there is any user error
2101 present at the function entry. This prevents the existing diagnostics
2102 state from being spoiled.
2103 The function currently serves parse_constant_immediate and
2104 parse_big_immediate only. */
2105 static bfd_boolean
2106 reg_name_p (char *str, aarch64_reg_type reg_type)
2107 {
2108 int reg;
2109
2110 /* Prevent the diagnostics state from being spoiled. */
2111 if (error_p ())
2112 return FALSE;
2113
2114 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2115
2116 /* Clear the parsing error that may be set by the reg parser. */
2117 clear_error ();
2118
2119 if (reg == PARSE_FAIL)
2120 return FALSE;
2121
2122 skip_whitespace (str);
2123 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2124 return TRUE;
2125
2126 return FALSE;
2127 }
2128
2129 /* Parser functions used exclusively in instruction operands. */
2130
2131 /* Parse an immediate expression which may not be constant.
2132
2133 To prevent the expression parser from pushing a register name
2134 into the symbol table as an undefined symbol, firstly a check is
2135 done to find out whether STR is a register of type REG_TYPE followed
2136 by a comma or the end of line. Return FALSE if STR is such a string. */
2137
2138 static bfd_boolean
2139 parse_immediate_expression (char **str, expressionS *exp,
2140 aarch64_reg_type reg_type)
2141 {
2142 if (reg_name_p (*str, reg_type))
2143 {
2144 set_recoverable_error (_("immediate operand required"));
2145 return FALSE;
2146 }
2147
2148 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2149
2150 if (exp->X_op == O_absent)
2151 {
2152 set_fatal_syntax_error (_("missing immediate expression"));
2153 return FALSE;
2154 }
2155
2156 return TRUE;
2157 }
2158
2159 /* Constant immediate-value read function for use in insn parsing.
2160 STR points to the beginning of the immediate (with the optional
2161 leading #); *VAL receives the value. REG_TYPE says which register
2162 names should be treated as registers rather than as symbolic immediates.
2163
2164 Return TRUE on success; otherwise return FALSE. */
2165
2166 static bfd_boolean
2167 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2168 {
2169 expressionS exp;
2170
2171 if (! parse_immediate_expression (str, &exp, reg_type))
2172 return FALSE;
2173
2174 if (exp.X_op != O_constant)
2175 {
2176 set_syntax_error (_("constant expression required"));
2177 return FALSE;
2178 }
2179
2180 *val = exp.X_add_number;
2181 return TRUE;
2182 }
2183
2184 static uint32_t
2185 encode_imm_float_bits (uint32_t imm)
2186 {
2187 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2188 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2189 }
2190
2191 /* Return TRUE if the single-precision floating-point value encoded in IMM
2192 can be expressed in the AArch64 8-bit signed floating-point format with
2193 3-bit exponent and normalized 4 bits of precision; in other words, the
2194 floating-point value must be expressable as
2195 (+/-) n / 16 * power (2, r)
2196 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2197
2198 static bfd_boolean
2199 aarch64_imm_float_p (uint32_t imm)
2200 {
2201 /* If a single-precision floating-point value has the following bit
2202 pattern, it can be expressed in the AArch64 8-bit floating-point
2203 format:
2204
2205 3 32222222 2221111111111
2206 1 09876543 21098765432109876543210
2207 n Eeeeeexx xxxx0000000000000000000
2208
2209 where n, e and each x are either 0 or 1 independently, with
2210 E == ~ e. */
2211
2212 uint32_t pattern;
2213
2214 /* Prepare the pattern for 'Eeeeee'. */
2215 if (((imm >> 30) & 0x1) == 0)
2216 pattern = 0x3e000000;
2217 else
2218 pattern = 0x40000000;
2219
2220 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2221 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2222 }
2223
2224 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2225 as an IEEE float without any loss of precision. Store the value in
2226 *FPWORD if so. */
2227
2228 static bfd_boolean
2229 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2230 {
2231 /* If a double-precision floating-point value has the following bit
2232 pattern, it can be expressed in a float:
2233
2234 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2235 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2236 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2237
2238 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2239 if Eeee_eeee != 1111_1111
2240
2241 where n, e, s and S are either 0 or 1 independently and where ~ is the
2242 inverse of E. */
2243
2244 uint32_t pattern;
2245 uint32_t high32 = imm >> 32;
2246 uint32_t low32 = imm;
2247
2248 /* Lower 29 bits need to be 0s. */
2249 if ((imm & 0x1fffffff) != 0)
2250 return FALSE;
2251
2252 /* Prepare the pattern for 'Eeeeeeeee'. */
2253 if (((high32 >> 30) & 0x1) == 0)
2254 pattern = 0x38000000;
2255 else
2256 pattern = 0x40000000;
2257
2258 /* Check E~~~. */
2259 if ((high32 & 0x78000000) != pattern)
2260 return FALSE;
2261
2262 /* Check Eeee_eeee != 1111_1111. */
2263 if ((high32 & 0x7ff00000) == 0x47f00000)
2264 return FALSE;
2265
2266 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2267 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2268 | (low32 >> 29)); /* 3 S bits. */
2269 return TRUE;
2270 }
2271
2272 /* Return true if we should treat OPERAND as a double-precision
2273 floating-point operand rather than a single-precision one. */
2274 static bfd_boolean
2275 double_precision_operand_p (const aarch64_opnd_info *operand)
2276 {
2277 /* Check for unsuffixed SVE registers, which are allowed
2278 for LDR and STR but not in instructions that require an
2279 immediate. We get better error messages if we arbitrarily
2280 pick one size, parse the immediate normally, and then
2281 report the match failure in the normal way. */
2282 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2283 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2284 }
2285
2286 /* Parse a floating-point immediate. Return TRUE on success and return the
2287 value in *IMMED in the format of IEEE754 single-precision encoding.
2288 *CCP points to the start of the string; DP_P is TRUE when the immediate
2289 is expected to be in double-precision (N.B. this only matters when
2290 hexadecimal representation is involved). REG_TYPE says which register
2291 names should be treated as registers rather than as symbolic immediates.
2292
2293 This routine accepts any IEEE float; it is up to the callers to reject
2294 invalid ones. */
2295
2296 static bfd_boolean
2297 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2298 aarch64_reg_type reg_type)
2299 {
2300 char *str = *ccp;
2301 char *fpnum;
2302 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2303 int64_t val = 0;
2304 unsigned fpword = 0;
2305 bfd_boolean hex_p = FALSE;
2306
2307 skip_past_char (&str, '#');
2308
2309 fpnum = str;
2310 skip_whitespace (fpnum);
2311
2312 if (strncmp (fpnum, "0x", 2) == 0)
2313 {
2314 /* Support the hexadecimal representation of the IEEE754 encoding.
2315 Double-precision is expected when DP_P is TRUE, otherwise the
2316 representation should be in single-precision. */
2317 if (! parse_constant_immediate (&str, &val, reg_type))
2318 goto invalid_fp;
2319
2320 if (dp_p)
2321 {
2322 if (!can_convert_double_to_float (val, &fpword))
2323 goto invalid_fp;
2324 }
2325 else if ((uint64_t) val > 0xffffffff)
2326 goto invalid_fp;
2327 else
2328 fpword = val;
2329
2330 hex_p = TRUE;
2331 }
2332 else if (reg_name_p (str, reg_type))
2333 {
2334 set_recoverable_error (_("immediate operand required"));
2335 return FALSE;
2336 }
2337
2338 if (! hex_p)
2339 {
2340 int i;
2341
2342 if ((str = atof_ieee (str, 's', words)) == NULL)
2343 goto invalid_fp;
2344
2345 /* Our FP word must be 32 bits (single-precision FP). */
2346 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2347 {
2348 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2349 fpword |= words[i];
2350 }
2351 }
2352
2353 *immed = fpword;
2354 *ccp = str;
2355 return TRUE;
2356
2357 invalid_fp:
2358 set_fatal_syntax_error (_("invalid floating-point constant"));
2359 return FALSE;
2360 }
2361
2362 /* Less-generic immediate-value read function with the possibility of loading
2363 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2364 instructions.
2365
2366 To prevent the expression parser from pushing a register name into the
2367 symbol table as an undefined symbol, a check is firstly done to find
2368 out whether STR is a register of type REG_TYPE followed by a comma or
2369 the end of line. Return FALSE if STR is such a register. */
2370
2371 static bfd_boolean
2372 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2373 {
2374 char *ptr = *str;
2375
2376 if (reg_name_p (ptr, reg_type))
2377 {
2378 set_syntax_error (_("immediate operand required"));
2379 return FALSE;
2380 }
2381
2382 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2383
2384 if (inst.reloc.exp.X_op == O_constant)
2385 *imm = inst.reloc.exp.X_add_number;
2386
2387 *str = ptr;
2388
2389 return TRUE;
2390 }
2391
2392 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2393 if NEED_LIBOPCODES is non-zero, the fixup will need
2394 assistance from the libopcodes. */
2395
2396 static inline void
2397 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2398 const aarch64_opnd_info *operand,
2399 int need_libopcodes_p)
2400 {
2401 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2402 reloc->opnd = operand->type;
2403 if (need_libopcodes_p)
2404 reloc->need_libopcodes_p = 1;
2405 };
2406
2407 /* Return TRUE if the instruction needs to be fixed up later internally by
2408 the GAS; otherwise return FALSE. */
2409
2410 static inline bfd_boolean
2411 aarch64_gas_internal_fixup_p (void)
2412 {
2413 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2414 }
2415
2416 /* Assign the immediate value to the relevant field in *OPERAND if
2417 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2418 needs an internal fixup in a later stage.
2419 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2420 IMM.VALUE that may get assigned with the constant. */
2421 static inline void
2422 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2423 aarch64_opnd_info *operand,
2424 int addr_off_p,
2425 int need_libopcodes_p,
2426 int skip_p)
2427 {
2428 if (reloc->exp.X_op == O_constant)
2429 {
2430 if (addr_off_p)
2431 operand->addr.offset.imm = reloc->exp.X_add_number;
2432 else
2433 operand->imm.value = reloc->exp.X_add_number;
2434 reloc->type = BFD_RELOC_UNUSED;
2435 }
2436 else
2437 {
2438 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2439 /* Tell libopcodes to ignore this operand or not. This is helpful
2440 when one of the operands needs to be fixed up later but we need
2441 libopcodes to check the other operands. */
2442 operand->skip = skip_p;
2443 }
2444 }
2445
2446 /* Relocation modifiers. Each entry in the table contains the textual
2447 name for the relocation which may be placed before a symbol used as
2448 a load/store offset, or add immediate. It must be surrounded by a
2449 leading and trailing colon, for example:
2450
2451 ldr x0, [x1, #:rello:varsym]
2452 add x0, x1, #:rello:varsym */
2453
2454 struct reloc_table_entry
2455 {
2456 const char *name;
2457 int pc_rel;
2458 bfd_reloc_code_real_type adr_type;
2459 bfd_reloc_code_real_type adrp_type;
2460 bfd_reloc_code_real_type movw_type;
2461 bfd_reloc_code_real_type add_type;
2462 bfd_reloc_code_real_type ldst_type;
2463 bfd_reloc_code_real_type ld_literal_type;
2464 };
2465
2466 static struct reloc_table_entry reloc_table[] = {
2467 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2468 {"lo12", 0,
2469 0, /* adr_type */
2470 0,
2471 0,
2472 BFD_RELOC_AARCH64_ADD_LO12,
2473 BFD_RELOC_AARCH64_LDST_LO12,
2474 0},
2475
2476 /* Higher 21 bits of pc-relative page offset: ADRP */
2477 {"pg_hi21", 1,
2478 0, /* adr_type */
2479 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2480 0,
2481 0,
2482 0,
2483 0},
2484
2485 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2486 {"pg_hi21_nc", 1,
2487 0, /* adr_type */
2488 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2489 0,
2490 0,
2491 0,
2492 0},
2493
2494 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2495 {"abs_g0", 0,
2496 0, /* adr_type */
2497 0,
2498 BFD_RELOC_AARCH64_MOVW_G0,
2499 0,
2500 0,
2501 0},
2502
2503 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2504 {"abs_g0_s", 0,
2505 0, /* adr_type */
2506 0,
2507 BFD_RELOC_AARCH64_MOVW_G0_S,
2508 0,
2509 0,
2510 0},
2511
2512 /* Less significant bits 0-15 of address/value: MOVK, no check */
2513 {"abs_g0_nc", 0,
2514 0, /* adr_type */
2515 0,
2516 BFD_RELOC_AARCH64_MOVW_G0_NC,
2517 0,
2518 0,
2519 0},
2520
2521 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2522 {"abs_g1", 0,
2523 0, /* adr_type */
2524 0,
2525 BFD_RELOC_AARCH64_MOVW_G1,
2526 0,
2527 0,
2528 0},
2529
2530 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2531 {"abs_g1_s", 0,
2532 0, /* adr_type */
2533 0,
2534 BFD_RELOC_AARCH64_MOVW_G1_S,
2535 0,
2536 0,
2537 0},
2538
2539 /* Less significant bits 16-31 of address/value: MOVK, no check */
2540 {"abs_g1_nc", 0,
2541 0, /* adr_type */
2542 0,
2543 BFD_RELOC_AARCH64_MOVW_G1_NC,
2544 0,
2545 0,
2546 0},
2547
2548 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2549 {"abs_g2", 0,
2550 0, /* adr_type */
2551 0,
2552 BFD_RELOC_AARCH64_MOVW_G2,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2558 {"abs_g2_s", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G2_S,
2562 0,
2563 0,
2564 0},
2565
2566 /* Less significant bits 32-47 of address/value: MOVK, no check */
2567 {"abs_g2_nc", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G2_NC,
2571 0,
2572 0,
2573 0},
2574
2575 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2576 {"abs_g3", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G3,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2585 {"prel_g0", 1,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2594 {"prel_g0_nc", 1,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2598 0,
2599 0,
2600 0},
2601
2602 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2603 {"prel_g1", 1,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2607 0,
2608 0,
2609 0},
2610
2611 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2612 {"prel_g1_nc", 1,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2621 {"prel_g2", 1,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2625 0,
2626 0,
2627 0},
2628
2629 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2630 {"prel_g2_nc", 1,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2639 {"prel_g3", 1,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2643 0,
2644 0,
2645 0},
2646
2647 /* Get to the page containing GOT entry for a symbol. */
2648 {"got", 1,
2649 0, /* adr_type */
2650 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2651 0,
2652 0,
2653 0,
2654 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2655
2656 /* 12 bit offset into the page containing GOT entry for that symbol. */
2657 {"got_lo12", 0,
2658 0, /* adr_type */
2659 0,
2660 0,
2661 0,
2662 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2663 0},
2664
2665 /* 0-15 bits of address/value: MOVk, no check. */
2666 {"gotoff_g0_nc", 0,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 16-31 of address/value: MOVZ. */
2675 {"gotoff_g1", 0,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2679 0,
2680 0,
2681 0},
2682
2683 /* 15 bit offset into the page containing GOT entry for that symbol. */
2684 {"gotoff_lo15", 0,
2685 0, /* adr_type */
2686 0,
2687 0,
2688 0,
2689 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2690 0},
2691
2692 /* Get to the page containing GOT TLS entry for a symbol */
2693 {"gottprel_g0_nc", 0,
2694 0, /* adr_type */
2695 0,
2696 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2697 0,
2698 0,
2699 0},
2700
2701 /* Get to the page containing GOT TLS entry for a symbol */
2702 {"gottprel_g1", 0,
2703 0, /* adr_type */
2704 0,
2705 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2706 0,
2707 0,
2708 0},
2709
2710 /* Get to the page containing GOT TLS entry for a symbol */
2711 {"tlsgd", 0,
2712 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2713 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2714 0,
2715 0,
2716 0,
2717 0},
2718
2719 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2720 {"tlsgd_lo12", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2725 0,
2726 0},
2727
2728 /* Lower 16 bits address/value: MOVk. */
2729 {"tlsgd_g0_nc", 0,
2730 0, /* adr_type */
2731 0,
2732 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2733 0,
2734 0,
2735 0},
2736
2737 /* Most significant bits 16-31 of address/value: MOVZ. */
2738 {"tlsgd_g1", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2742 0,
2743 0,
2744 0},
2745
2746 /* Get to the page containing GOT TLS entry for a symbol */
2747 {"tlsdesc", 0,
2748 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2749 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2750 0,
2751 0,
2752 0,
2753 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2754
2755 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2756 {"tlsdesc_lo12", 0,
2757 0, /* adr_type */
2758 0,
2759 0,
2760 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2761 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2762 0},
2763
2764 /* Get to the page containing GOT TLS entry for a symbol.
2765 The same as GD, we allocate two consecutive GOT slots
2766 for module index and module offset, the only difference
2767 with GD is the module offset should be initialized to
2768 zero without any outstanding runtime relocation. */
2769 {"tlsldm", 0,
2770 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2771 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2772 0,
2773 0,
2774 0,
2775 0},
2776
2777 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2778 {"tlsldm_lo12_nc", 0,
2779 0, /* adr_type */
2780 0,
2781 0,
2782 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2783 0,
2784 0},
2785
2786 /* 12 bit offset into the module TLS base address. */
2787 {"dtprel_lo12", 0,
2788 0, /* adr_type */
2789 0,
2790 0,
2791 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2792 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2793 0},
2794
2795 /* Same as dtprel_lo12, no overflow check. */
2796 {"dtprel_lo12_nc", 0,
2797 0, /* adr_type */
2798 0,
2799 0,
2800 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2801 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2802 0},
2803
2804 /* bits[23:12] of offset to the module TLS base address. */
2805 {"dtprel_hi12", 0,
2806 0, /* adr_type */
2807 0,
2808 0,
2809 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2810 0,
2811 0},
2812
2813 /* bits[15:0] of offset to the module TLS base address. */
2814 {"dtprel_g0", 0,
2815 0, /* adr_type */
2816 0,
2817 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2818 0,
2819 0,
2820 0},
2821
2822 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2823 {"dtprel_g0_nc", 0,
2824 0, /* adr_type */
2825 0,
2826 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2827 0,
2828 0,
2829 0},
2830
2831 /* bits[31:16] of offset to the module TLS base address. */
2832 {"dtprel_g1", 0,
2833 0, /* adr_type */
2834 0,
2835 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2836 0,
2837 0,
2838 0},
2839
2840 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2841 {"dtprel_g1_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2845 0,
2846 0,
2847 0},
2848
2849 /* bits[47:32] of offset to the module TLS base address. */
2850 {"dtprel_g2", 0,
2851 0, /* adr_type */
2852 0,
2853 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2854 0,
2855 0,
2856 0},
2857
2858 /* Lower 16 bit offset into GOT entry for a symbol */
2859 {"tlsdesc_off_g0_nc", 0,
2860 0, /* adr_type */
2861 0,
2862 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2863 0,
2864 0,
2865 0},
2866
2867 /* Higher 16 bit offset into GOT entry for a symbol */
2868 {"tlsdesc_off_g1", 0,
2869 0, /* adr_type */
2870 0,
2871 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2872 0,
2873 0,
2874 0},
2875
2876 /* Get to the page containing GOT TLS entry for a symbol */
2877 {"gottprel", 0,
2878 0, /* adr_type */
2879 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2880 0,
2881 0,
2882 0,
2883 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2884
2885 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2886 {"gottprel_lo12", 0,
2887 0, /* adr_type */
2888 0,
2889 0,
2890 0,
2891 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2892 0},
2893
2894 /* Get tp offset for a symbol. */
2895 {"tprel", 0,
2896 0, /* adr_type */
2897 0,
2898 0,
2899 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2900 0,
2901 0},
2902
2903 /* Get tp offset for a symbol. */
2904 {"tprel_lo12", 0,
2905 0, /* adr_type */
2906 0,
2907 0,
2908 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2909 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2910 0},
2911
2912 /* Get tp offset for a symbol. */
2913 {"tprel_hi12", 0,
2914 0, /* adr_type */
2915 0,
2916 0,
2917 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2918 0,
2919 0},
2920
2921 /* Get tp offset for a symbol. */
2922 {"tprel_lo12_nc", 0,
2923 0, /* adr_type */
2924 0,
2925 0,
2926 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2927 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2928 0},
2929
2930 /* Most significant bits 32-47 of address/value: MOVZ. */
2931 {"tprel_g2", 0,
2932 0, /* adr_type */
2933 0,
2934 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2935 0,
2936 0,
2937 0},
2938
2939 /* Most significant bits 16-31 of address/value: MOVZ. */
2940 {"tprel_g1", 0,
2941 0, /* adr_type */
2942 0,
2943 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2944 0,
2945 0,
2946 0},
2947
2948 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2949 {"tprel_g1_nc", 0,
2950 0, /* adr_type */
2951 0,
2952 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2953 0,
2954 0,
2955 0},
2956
2957 /* Most significant bits 0-15 of address/value: MOVZ. */
2958 {"tprel_g0", 0,
2959 0, /* adr_type */
2960 0,
2961 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2962 0,
2963 0,
2964 0},
2965
2966 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2967 {"tprel_g0_nc", 0,
2968 0, /* adr_type */
2969 0,
2970 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2971 0,
2972 0,
2973 0},
2974
2975 /* 15bit offset from got entry to base address of GOT table. */
2976 {"gotpage_lo15", 0,
2977 0,
2978 0,
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2982 0},
2983
2984 /* 14bit offset from got entry to base address of GOT table. */
2985 {"gotpage_lo14", 0,
2986 0,
2987 0,
2988 0,
2989 0,
2990 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2991 0},
2992 };
2993
2994 /* Given the address of a pointer pointing to the textual name of a
2995 relocation as may appear in assembler source, attempt to find its
2996 details in reloc_table. The pointer will be updated to the character
2997 after the trailing colon. On failure, NULL will be returned;
2998 otherwise return the reloc_table_entry. */
2999
3000 static struct reloc_table_entry *
3001 find_reloc_table_entry (char **str)
3002 {
3003 unsigned int i;
3004 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3005 {
3006 int length = strlen (reloc_table[i].name);
3007
3008 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3009 && (*str)[length] == ':')
3010 {
3011 *str += (length + 1);
3012 return &reloc_table[i];
3013 }
3014 }
3015
3016 return NULL;
3017 }
3018
3019 /* Mode argument to parse_shift and parser_shifter_operand. */
3020 enum parse_shift_mode
3021 {
3022 SHIFTED_NONE, /* no shifter allowed */
3023 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3024 "#imm{,lsl #n}" */
3025 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3026 "#imm" */
3027 SHIFTED_LSL, /* bare "lsl #n" */
3028 SHIFTED_MUL, /* bare "mul #n" */
3029 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3030 SHIFTED_MUL_VL, /* "mul vl" */
3031 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3032 };
3033
3034 /* Parse a <shift> operator on an AArch64 data processing instruction.
3035 Return TRUE on success; otherwise return FALSE. */
3036 static bfd_boolean
3037 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3038 {
3039 const struct aarch64_name_value_pair *shift_op;
3040 enum aarch64_modifier_kind kind;
3041 expressionS exp;
3042 int exp_has_prefix;
3043 char *s = *str;
3044 char *p = s;
3045
3046 for (p = *str; ISALPHA (*p); p++)
3047 ;
3048
3049 if (p == *str)
3050 {
3051 set_syntax_error (_("shift expression expected"));
3052 return FALSE;
3053 }
3054
3055 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3056
3057 if (shift_op == NULL)
3058 {
3059 set_syntax_error (_("shift operator expected"));
3060 return FALSE;
3061 }
3062
3063 kind = aarch64_get_operand_modifier (shift_op);
3064
3065 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3066 {
3067 set_syntax_error (_("invalid use of 'MSL'"));
3068 return FALSE;
3069 }
3070
3071 if (kind == AARCH64_MOD_MUL
3072 && mode != SHIFTED_MUL
3073 && mode != SHIFTED_MUL_VL)
3074 {
3075 set_syntax_error (_("invalid use of 'MUL'"));
3076 return FALSE;
3077 }
3078
3079 switch (mode)
3080 {
3081 case SHIFTED_LOGIC_IMM:
3082 if (aarch64_extend_operator_p (kind))
3083 {
3084 set_syntax_error (_("extending shift is not permitted"));
3085 return FALSE;
3086 }
3087 break;
3088
3089 case SHIFTED_ARITH_IMM:
3090 if (kind == AARCH64_MOD_ROR)
3091 {
3092 set_syntax_error (_("'ROR' shift is not permitted"));
3093 return FALSE;
3094 }
3095 break;
3096
3097 case SHIFTED_LSL:
3098 if (kind != AARCH64_MOD_LSL)
3099 {
3100 set_syntax_error (_("only 'LSL' shift is permitted"));
3101 return FALSE;
3102 }
3103 break;
3104
3105 case SHIFTED_MUL:
3106 if (kind != AARCH64_MOD_MUL)
3107 {
3108 set_syntax_error (_("only 'MUL' is permitted"));
3109 return FALSE;
3110 }
3111 break;
3112
3113 case SHIFTED_MUL_VL:
3114 /* "MUL VL" consists of two separate tokens. Require the first
3115 token to be "MUL" and look for a following "VL". */
3116 if (kind == AARCH64_MOD_MUL)
3117 {
3118 skip_whitespace (p);
3119 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3120 {
3121 p += 2;
3122 kind = AARCH64_MOD_MUL_VL;
3123 break;
3124 }
3125 }
3126 set_syntax_error (_("only 'MUL VL' is permitted"));
3127 return FALSE;
3128
3129 case SHIFTED_REG_OFFSET:
3130 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3131 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3132 {
3133 set_fatal_syntax_error
3134 (_("invalid shift for the register offset addressing mode"));
3135 return FALSE;
3136 }
3137 break;
3138
3139 case SHIFTED_LSL_MSL:
3140 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3141 {
3142 set_syntax_error (_("invalid shift operator"));
3143 return FALSE;
3144 }
3145 break;
3146
3147 default:
3148 abort ();
3149 }
3150
3151 /* Whitespace can appear here if the next thing is a bare digit. */
3152 skip_whitespace (p);
3153
3154 /* Parse shift amount. */
3155 exp_has_prefix = 0;
3156 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3157 exp.X_op = O_absent;
3158 else
3159 {
3160 if (is_immediate_prefix (*p))
3161 {
3162 p++;
3163 exp_has_prefix = 1;
3164 }
3165 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3166 }
3167 if (kind == AARCH64_MOD_MUL_VL)
3168 /* For consistency, give MUL VL the same shift amount as an implicit
3169 MUL #1. */
3170 operand->shifter.amount = 1;
3171 else if (exp.X_op == O_absent)
3172 {
3173 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3174 {
3175 set_syntax_error (_("missing shift amount"));
3176 return FALSE;
3177 }
3178 operand->shifter.amount = 0;
3179 }
3180 else if (exp.X_op != O_constant)
3181 {
3182 set_syntax_error (_("constant shift amount required"));
3183 return FALSE;
3184 }
3185 /* For parsing purposes, MUL #n has no inherent range. The range
3186 depends on the operand and will be checked by operand-specific
3187 routines. */
3188 else if (kind != AARCH64_MOD_MUL
3189 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3190 {
3191 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3192 return FALSE;
3193 }
3194 else
3195 {
3196 operand->shifter.amount = exp.X_add_number;
3197 operand->shifter.amount_present = 1;
3198 }
3199
3200 operand->shifter.operator_present = 1;
3201 operand->shifter.kind = kind;
3202
3203 *str = p;
3204 return TRUE;
3205 }
3206
3207 /* Parse a <shifter_operand> for a data processing instruction:
3208
3209 #<immediate>
3210 #<immediate>, LSL #imm
3211
3212 Validation of immediate operands is deferred to md_apply_fix.
3213
3214 Return TRUE on success; otherwise return FALSE. */
3215
3216 static bfd_boolean
3217 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3218 enum parse_shift_mode mode)
3219 {
3220 char *p;
3221
3222 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3223 return FALSE;
3224
3225 p = *str;
3226
3227 /* Accept an immediate expression. */
3228 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3229 return FALSE;
3230
3231 /* Accept optional LSL for arithmetic immediate values. */
3232 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3233 if (! parse_shift (&p, operand, SHIFTED_LSL))
3234 return FALSE;
3235
3236 /* Not accept any shifter for logical immediate values. */
3237 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3238 && parse_shift (&p, operand, mode))
3239 {
3240 set_syntax_error (_("unexpected shift operator"));
3241 return FALSE;
3242 }
3243
3244 *str = p;
3245 return TRUE;
3246 }
3247
3248 /* Parse a <shifter_operand> for a data processing instruction:
3249
3250 <Rm>
3251 <Rm>, <shift>
3252 #<immediate>
3253 #<immediate>, LSL #imm
3254
3255 where <shift> is handled by parse_shift above, and the last two
3256 cases are handled by the function above.
3257
3258 Validation of immediate operands is deferred to md_apply_fix.
3259
3260 Return TRUE on success; otherwise return FALSE. */
3261
3262 static bfd_boolean
3263 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3264 enum parse_shift_mode mode)
3265 {
3266 const reg_entry *reg;
3267 aarch64_opnd_qualifier_t qualifier;
3268 enum aarch64_operand_class opd_class
3269 = aarch64_get_operand_class (operand->type);
3270
3271 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3272 if (reg)
3273 {
3274 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3275 {
3276 set_syntax_error (_("unexpected register in the immediate operand"));
3277 return FALSE;
3278 }
3279
3280 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3281 {
3282 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3283 return FALSE;
3284 }
3285
3286 operand->reg.regno = reg->number;
3287 operand->qualifier = qualifier;
3288
3289 /* Accept optional shift operation on register. */
3290 if (! skip_past_comma (str))
3291 return TRUE;
3292
3293 if (! parse_shift (str, operand, mode))
3294 return FALSE;
3295
3296 return TRUE;
3297 }
3298 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3299 {
3300 set_syntax_error
3301 (_("integer register expected in the extended/shifted operand "
3302 "register"));
3303 return FALSE;
3304 }
3305
3306 /* We have a shifted immediate variable. */
3307 return parse_shifter_operand_imm (str, operand, mode);
3308 }
3309
3310 /* Return TRUE on success; return FALSE otherwise. */
3311
3312 static bfd_boolean
3313 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3314 enum parse_shift_mode mode)
3315 {
3316 char *p = *str;
3317
3318 /* Determine if we have the sequence of characters #: or just :
3319 coming next. If we do, then we check for a :rello: relocation
3320 modifier. If we don't, punt the whole lot to
3321 parse_shifter_operand. */
3322
3323 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3324 {
3325 struct reloc_table_entry *entry;
3326
3327 if (p[0] == '#')
3328 p += 2;
3329 else
3330 p++;
3331 *str = p;
3332
3333 /* Try to parse a relocation. Anything else is an error. */
3334 if (!(entry = find_reloc_table_entry (str)))
3335 {
3336 set_syntax_error (_("unknown relocation modifier"));
3337 return FALSE;
3338 }
3339
3340 if (entry->add_type == 0)
3341 {
3342 set_syntax_error
3343 (_("this relocation modifier is not allowed on this instruction"));
3344 return FALSE;
3345 }
3346
3347 /* Save str before we decompose it. */
3348 p = *str;
3349
3350 /* Next, we parse the expression. */
3351 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3352 return FALSE;
3353
3354 /* Record the relocation type (use the ADD variant here). */
3355 inst.reloc.type = entry->add_type;
3356 inst.reloc.pc_rel = entry->pc_rel;
3357
3358 /* If str is empty, we've reached the end, stop here. */
3359 if (**str == '\0')
3360 return TRUE;
3361
3362 /* Otherwise, we have a shifted reloc modifier, so rewind to
3363 recover the variable name and continue parsing for the shifter. */
3364 *str = p;
3365 return parse_shifter_operand_imm (str, operand, mode);
3366 }
3367
3368 return parse_shifter_operand (str, operand, mode);
3369 }
3370
3371 /* Parse all forms of an address expression. Information is written
3372 to *OPERAND and/or inst.reloc.
3373
3374 The A64 instruction set has the following addressing modes:
3375
3376 Offset
3377 [base] // in SIMD ld/st structure
3378 [base{,#0}] // in ld/st exclusive
3379 [base{,#imm}]
3380 [base,Xm{,LSL #imm}]
3381 [base,Xm,SXTX {#imm}]
3382 [base,Wm,(S|U)XTW {#imm}]
3383 Pre-indexed
3384 [base,#imm]!
3385 Post-indexed
3386 [base],#imm
3387 [base],Xm // in SIMD ld/st structure
3388 PC-relative (literal)
3389 label
3390 SVE:
3391 [base,#imm,MUL VL]
3392 [base,Zm.D{,LSL #imm}]
3393 [base,Zm.S,(S|U)XTW {#imm}]
3394 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3395 [Zn.S,#imm]
3396 [Zn.D,#imm]
3397 [Zn.S{, Xm}]
3398 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3399 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3400 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3401
3402 (As a convenience, the notation "=immediate" is permitted in conjunction
3403 with the pc-relative literal load instructions to automatically place an
3404 immediate value or symbolic address in a nearby literal pool and generate
3405 a hidden label which references it.)
3406
3407 Upon a successful parsing, the address structure in *OPERAND will be
3408 filled in the following way:
3409
3410 .base_regno = <base>
3411 .offset.is_reg // 1 if the offset is a register
3412 .offset.imm = <imm>
3413 .offset.regno = <Rm>
3414
3415 For different addressing modes defined in the A64 ISA:
3416
3417 Offset
3418 .pcrel=0; .preind=1; .postind=0; .writeback=0
3419 Pre-indexed
3420 .pcrel=0; .preind=1; .postind=0; .writeback=1
3421 Post-indexed
3422 .pcrel=0; .preind=0; .postind=1; .writeback=1
3423 PC-relative (literal)
3424 .pcrel=1; .preind=1; .postind=0; .writeback=0
3425
3426 The shift/extension information, if any, will be stored in .shifter.
3427 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3428 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3429 corresponding register.
3430
3431 BASE_TYPE says which types of base register should be accepted and
3432 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3433 is the type of shifter that is allowed for immediate offsets,
3434 or SHIFTED_NONE if none.
3435
3436 In all other respects, it is the caller's responsibility to check
3437 for addressing modes not supported by the instruction, and to set
3438 inst.reloc.type. */
3439
3440 static bfd_boolean
3441 parse_address_main (char **str, aarch64_opnd_info *operand,
3442 aarch64_opnd_qualifier_t *base_qualifier,
3443 aarch64_opnd_qualifier_t *offset_qualifier,
3444 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3445 enum parse_shift_mode imm_shift_mode)
3446 {
3447 char *p = *str;
3448 const reg_entry *reg;
3449 expressionS *exp = &inst.reloc.exp;
3450
3451 *base_qualifier = AARCH64_OPND_QLF_NIL;
3452 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3453 if (! skip_past_char (&p, '['))
3454 {
3455 /* =immediate or label. */
3456 operand->addr.pcrel = 1;
3457 operand->addr.preind = 1;
3458
3459 /* #:<reloc_op>:<symbol> */
3460 skip_past_char (&p, '#');
3461 if (skip_past_char (&p, ':'))
3462 {
3463 bfd_reloc_code_real_type ty;
3464 struct reloc_table_entry *entry;
3465
3466 /* Try to parse a relocation modifier. Anything else is
3467 an error. */
3468 entry = find_reloc_table_entry (&p);
3469 if (! entry)
3470 {
3471 set_syntax_error (_("unknown relocation modifier"));
3472 return FALSE;
3473 }
3474
3475 switch (operand->type)
3476 {
3477 case AARCH64_OPND_ADDR_PCREL21:
3478 /* adr */
3479 ty = entry->adr_type;
3480 break;
3481
3482 default:
3483 ty = entry->ld_literal_type;
3484 break;
3485 }
3486
3487 if (ty == 0)
3488 {
3489 set_syntax_error
3490 (_("this relocation modifier is not allowed on this "
3491 "instruction"));
3492 return FALSE;
3493 }
3494
3495 /* #:<reloc_op>: */
3496 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3497 {
3498 set_syntax_error (_("invalid relocation expression"));
3499 return FALSE;
3500 }
3501
3502 /* #:<reloc_op>:<expr> */
3503 /* Record the relocation type. */
3504 inst.reloc.type = ty;
3505 inst.reloc.pc_rel = entry->pc_rel;
3506 }
3507 else
3508 {
3509
3510 if (skip_past_char (&p, '='))
3511 /* =immediate; need to generate the literal in the literal pool. */
3512 inst.gen_lit_pool = 1;
3513
3514 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3515 {
3516 set_syntax_error (_("invalid address"));
3517 return FALSE;
3518 }
3519 }
3520
3521 *str = p;
3522 return TRUE;
3523 }
3524
3525 /* [ */
3526
3527 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3528 if (!reg || !aarch64_check_reg_type (reg, base_type))
3529 {
3530 set_syntax_error (_(get_reg_expected_msg (base_type)));
3531 return FALSE;
3532 }
3533 operand->addr.base_regno = reg->number;
3534
3535 /* [Xn */
3536 if (skip_past_comma (&p))
3537 {
3538 /* [Xn, */
3539 operand->addr.preind = 1;
3540
3541 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3542 if (reg)
3543 {
3544 if (!aarch64_check_reg_type (reg, offset_type))
3545 {
3546 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3547 return FALSE;
3548 }
3549
3550 /* [Xn,Rm */
3551 operand->addr.offset.regno = reg->number;
3552 operand->addr.offset.is_reg = 1;
3553 /* Shifted index. */
3554 if (skip_past_comma (&p))
3555 {
3556 /* [Xn,Rm, */
3557 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3558 /* Use the diagnostics set in parse_shift, so not set new
3559 error message here. */
3560 return FALSE;
3561 }
3562 /* We only accept:
3563 [base,Xm] # For vector plus scalar SVE2 indexing.
3564 [base,Xm{,LSL #imm}]
3565 [base,Xm,SXTX {#imm}]
3566 [base,Wm,(S|U)XTW {#imm}] */
3567 if (operand->shifter.kind == AARCH64_MOD_NONE
3568 || operand->shifter.kind == AARCH64_MOD_LSL
3569 || operand->shifter.kind == AARCH64_MOD_SXTX)
3570 {
3571 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3572 {
3573 set_syntax_error (_("invalid use of 32-bit register offset"));
3574 return FALSE;
3575 }
3576 if (aarch64_get_qualifier_esize (*base_qualifier)
3577 != aarch64_get_qualifier_esize (*offset_qualifier)
3578 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3579 || *base_qualifier != AARCH64_OPND_QLF_S_S
3580 || *offset_qualifier != AARCH64_OPND_QLF_X))
3581 {
3582 set_syntax_error (_("offset has different size from base"));
3583 return FALSE;
3584 }
3585 }
3586 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3587 {
3588 set_syntax_error (_("invalid use of 64-bit register offset"));
3589 return FALSE;
3590 }
3591 }
3592 else
3593 {
3594 /* [Xn,#:<reloc_op>:<symbol> */
3595 skip_past_char (&p, '#');
3596 if (skip_past_char (&p, ':'))
3597 {
3598 struct reloc_table_entry *entry;
3599
3600 /* Try to parse a relocation modifier. Anything else is
3601 an error. */
3602 if (!(entry = find_reloc_table_entry (&p)))
3603 {
3604 set_syntax_error (_("unknown relocation modifier"));
3605 return FALSE;
3606 }
3607
3608 if (entry->ldst_type == 0)
3609 {
3610 set_syntax_error
3611 (_("this relocation modifier is not allowed on this "
3612 "instruction"));
3613 return FALSE;
3614 }
3615
3616 /* [Xn,#:<reloc_op>: */
3617 /* We now have the group relocation table entry corresponding to
3618 the name in the assembler source. Next, we parse the
3619 expression. */
3620 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3621 {
3622 set_syntax_error (_("invalid relocation expression"));
3623 return FALSE;
3624 }
3625
3626 /* [Xn,#:<reloc_op>:<expr> */
3627 /* Record the load/store relocation type. */
3628 inst.reloc.type = entry->ldst_type;
3629 inst.reloc.pc_rel = entry->pc_rel;
3630 }
3631 else
3632 {
3633 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3634 {
3635 set_syntax_error (_("invalid expression in the address"));
3636 return FALSE;
3637 }
3638 /* [Xn,<expr> */
3639 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3640 /* [Xn,<expr>,<shifter> */
3641 if (! parse_shift (&p, operand, imm_shift_mode))
3642 return FALSE;
3643 }
3644 }
3645 }
3646
3647 if (! skip_past_char (&p, ']'))
3648 {
3649 set_syntax_error (_("']' expected"));
3650 return FALSE;
3651 }
3652
3653 if (skip_past_char (&p, '!'))
3654 {
3655 if (operand->addr.preind && operand->addr.offset.is_reg)
3656 {
3657 set_syntax_error (_("register offset not allowed in pre-indexed "
3658 "addressing mode"));
3659 return FALSE;
3660 }
3661 /* [Xn]! */
3662 operand->addr.writeback = 1;
3663 }
3664 else if (skip_past_comma (&p))
3665 {
3666 /* [Xn], */
3667 operand->addr.postind = 1;
3668 operand->addr.writeback = 1;
3669
3670 if (operand->addr.preind)
3671 {
3672 set_syntax_error (_("cannot combine pre- and post-indexing"));
3673 return FALSE;
3674 }
3675
3676 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3677 if (reg)
3678 {
3679 /* [Xn],Xm */
3680 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3681 {
3682 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3683 return FALSE;
3684 }
3685
3686 operand->addr.offset.regno = reg->number;
3687 operand->addr.offset.is_reg = 1;
3688 }
3689 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3690 {
3691 /* [Xn],#expr */
3692 set_syntax_error (_("invalid expression in the address"));
3693 return FALSE;
3694 }
3695 }
3696
3697 /* If at this point neither .preind nor .postind is set, we have a
3698 bare [Rn]{!}; reject [Rn]! accept [Rn] as a shorthand for [Rn,#0].
3699 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3700 [Zn.<T>, xzr]. */
3701 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3702 {
3703 if (operand->addr.writeback)
3704 {
3705 /* Reject [Rn]! */
3706 set_syntax_error (_("missing offset in the pre-indexed address"));
3707 return FALSE;
3708 }
3709
3710 operand->addr.preind = 1;
3711 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3712 {
3713 operand->addr.offset.is_reg = 1;
3714 operand->addr.offset.regno = REG_ZR;
3715 *offset_qualifier = AARCH64_OPND_QLF_X;
3716 }
3717 else
3718 {
3719 inst.reloc.exp.X_op = O_constant;
3720 inst.reloc.exp.X_add_number = 0;
3721 }
3722 }
3723
3724 *str = p;
3725 return TRUE;
3726 }
3727
3728 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3729 on success. */
3730 static bfd_boolean
3731 parse_address (char **str, aarch64_opnd_info *operand)
3732 {
3733 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3734 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3735 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3736 }
3737
3738 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3739 The arguments have the same meaning as for parse_address_main.
3740 Return TRUE on success. */
3741 static bfd_boolean
3742 parse_sve_address (char **str, aarch64_opnd_info *operand,
3743 aarch64_opnd_qualifier_t *base_qualifier,
3744 aarch64_opnd_qualifier_t *offset_qualifier)
3745 {
3746 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3747 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3748 SHIFTED_MUL_VL);
3749 }
3750
3751 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3752 Return TRUE on success; otherwise return FALSE. */
3753 static bfd_boolean
3754 parse_half (char **str, int *internal_fixup_p)
3755 {
3756 char *p = *str;
3757
3758 skip_past_char (&p, '#');
3759
3760 gas_assert (internal_fixup_p);
3761 *internal_fixup_p = 0;
3762
3763 if (*p == ':')
3764 {
3765 struct reloc_table_entry *entry;
3766
3767 /* Try to parse a relocation. Anything else is an error. */
3768 ++p;
3769 if (!(entry = find_reloc_table_entry (&p)))
3770 {
3771 set_syntax_error (_("unknown relocation modifier"));
3772 return FALSE;
3773 }
3774
3775 if (entry->movw_type == 0)
3776 {
3777 set_syntax_error
3778 (_("this relocation modifier is not allowed on this instruction"));
3779 return FALSE;
3780 }
3781
3782 inst.reloc.type = entry->movw_type;
3783 }
3784 else
3785 *internal_fixup_p = 1;
3786
3787 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3788 return FALSE;
3789
3790 *str = p;
3791 return TRUE;
3792 }
3793
3794 /* Parse an operand for an ADRP instruction:
3795 ADRP <Xd>, <label>
3796 Return TRUE on success; otherwise return FALSE. */
3797
3798 static bfd_boolean
3799 parse_adrp (char **str)
3800 {
3801 char *p;
3802
3803 p = *str;
3804 if (*p == ':')
3805 {
3806 struct reloc_table_entry *entry;
3807
3808 /* Try to parse a relocation. Anything else is an error. */
3809 ++p;
3810 if (!(entry = find_reloc_table_entry (&p)))
3811 {
3812 set_syntax_error (_("unknown relocation modifier"));
3813 return FALSE;
3814 }
3815
3816 if (entry->adrp_type == 0)
3817 {
3818 set_syntax_error
3819 (_("this relocation modifier is not allowed on this instruction"));
3820 return FALSE;
3821 }
3822
3823 inst.reloc.type = entry->adrp_type;
3824 }
3825 else
3826 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3827
3828 inst.reloc.pc_rel = 1;
3829
3830 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3831 return FALSE;
3832
3833 *str = p;
3834 return TRUE;
3835 }
3836
3837 /* Miscellaneous. */
3838
3839 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3840 of SIZE tokens in which index I gives the token for field value I,
3841 or is null if field value I is invalid. REG_TYPE says which register
3842 names should be treated as registers rather than as symbolic immediates.
3843
3844 Return true on success, moving *STR past the operand and storing the
3845 field value in *VAL. */
3846
3847 static int
3848 parse_enum_string (char **str, int64_t *val, const char *const *array,
3849 size_t size, aarch64_reg_type reg_type)
3850 {
3851 expressionS exp;
3852 char *p, *q;
3853 size_t i;
3854
3855 /* Match C-like tokens. */
3856 p = q = *str;
3857 while (ISALNUM (*q))
3858 q++;
3859
3860 for (i = 0; i < size; ++i)
3861 if (array[i]
3862 && strncasecmp (array[i], p, q - p) == 0
3863 && array[i][q - p] == 0)
3864 {
3865 *val = i;
3866 *str = q;
3867 return TRUE;
3868 }
3869
3870 if (!parse_immediate_expression (&p, &exp, reg_type))
3871 return FALSE;
3872
3873 if (exp.X_op == O_constant
3874 && (uint64_t) exp.X_add_number < size)
3875 {
3876 *val = exp.X_add_number;
3877 *str = p;
3878 return TRUE;
3879 }
3880
3881 /* Use the default error for this operand. */
3882 return FALSE;
3883 }
3884
3885 /* Parse an option for a preload instruction. Returns the encoding for the
3886 option, or PARSE_FAIL. */
3887
3888 static int
3889 parse_pldop (char **str)
3890 {
3891 char *p, *q;
3892 const struct aarch64_name_value_pair *o;
3893
3894 p = q = *str;
3895 while (ISALNUM (*q))
3896 q++;
3897
3898 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3899 if (!o)
3900 return PARSE_FAIL;
3901
3902 *str = q;
3903 return o->value;
3904 }
3905
3906 /* Parse an option for a barrier instruction. Returns the encoding for the
3907 option, or PARSE_FAIL. */
3908
3909 static int
3910 parse_barrier (char **str)
3911 {
3912 char *p, *q;
3913 const asm_barrier_opt *o;
3914
3915 p = q = *str;
3916 while (ISALPHA (*q))
3917 q++;
3918
3919 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3920 if (!o)
3921 return PARSE_FAIL;
3922
3923 *str = q;
3924 return o->value;
3925 }
3926
3927 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3928 return 0 if successful. Otherwise return PARSE_FAIL. */
3929
3930 static int
3931 parse_barrier_psb (char **str,
3932 const struct aarch64_name_value_pair ** hint_opt)
3933 {
3934 char *p, *q;
3935 const struct aarch64_name_value_pair *o;
3936
3937 p = q = *str;
3938 while (ISALPHA (*q))
3939 q++;
3940
3941 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3942 if (!o)
3943 {
3944 set_fatal_syntax_error
3945 ( _("unknown or missing option to PSB"));
3946 return PARSE_FAIL;
3947 }
3948
3949 if (o->value != 0x11)
3950 {
3951 /* PSB only accepts option name 'CSYNC'. */
3952 set_syntax_error
3953 (_("the specified option is not accepted for PSB"));
3954 return PARSE_FAIL;
3955 }
3956
3957 *str = q;
3958 *hint_opt = o;
3959 return 0;
3960 }
3961
3962 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3963 return 0 if successful. Otherwise return PARSE_FAIL. */
3964
3965 static int
3966 parse_bti_operand (char **str,
3967 const struct aarch64_name_value_pair ** hint_opt)
3968 {
3969 char *p, *q;
3970 const struct aarch64_name_value_pair *o;
3971
3972 p = q = *str;
3973 while (ISALPHA (*q))
3974 q++;
3975
3976 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3977 if (!o)
3978 {
3979 set_fatal_syntax_error
3980 ( _("unknown option to BTI"));
3981 return PARSE_FAIL;
3982 }
3983
3984 switch (o->value)
3985 {
3986 /* Valid BTI operands. */
3987 case HINT_OPD_C:
3988 case HINT_OPD_J:
3989 case HINT_OPD_JC:
3990 break;
3991
3992 default:
3993 set_syntax_error
3994 (_("unknown option to BTI"));
3995 return PARSE_FAIL;
3996 }
3997
3998 *str = q;
3999 *hint_opt = o;
4000 return 0;
4001 }
4002
4003 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4004 Returns the encoding for the option, or PARSE_FAIL.
4005
4006 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4007 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4008
4009 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4010 field, otherwise as a system register.
4011 */
4012
4013 static int
4014 parse_sys_reg (char **str, struct hash_control *sys_regs,
4015 int imple_defined_p, int pstatefield_p,
4016 uint32_t* flags)
4017 {
4018 char *p, *q;
4019 char buf[32];
4020 const aarch64_sys_reg *o;
4021 int value;
4022
4023 p = buf;
4024 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4025 if (p < buf + 31)
4026 *p++ = TOLOWER (*q);
4027 *p = '\0';
4028 /* Assert that BUF be large enough. */
4029 gas_assert (p - buf == q - *str);
4030
4031 o = hash_find (sys_regs, buf);
4032 if (!o)
4033 {
4034 if (!imple_defined_p)
4035 return PARSE_FAIL;
4036 else
4037 {
4038 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4039 unsigned int op0, op1, cn, cm, op2;
4040
4041 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4042 != 5)
4043 return PARSE_FAIL;
4044 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4045 return PARSE_FAIL;
4046 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4047 if (flags)
4048 *flags = 0;
4049 }
4050 }
4051 else
4052 {
4053 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4054 as_bad (_("selected processor does not support PSTATE field "
4055 "name '%s'"), buf);
4056 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4057 as_bad (_("selected processor does not support system register "
4058 "name '%s'"), buf);
4059 if (aarch64_sys_reg_deprecated_p (o))
4060 as_warn (_("system register name '%s' is deprecated and may be "
4061 "removed in a future release"), buf);
4062 value = o->value;
4063 if (flags)
4064 *flags = o->flags;
4065 }
4066
4067 *str = q;
4068 return value;
4069 }
4070
4071 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4072 for the option, or NULL. */
4073
4074 static const aarch64_sys_ins_reg *
4075 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4076 {
4077 char *p, *q;
4078 char buf[32];
4079 const aarch64_sys_ins_reg *o;
4080
4081 p = buf;
4082 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4083 if (p < buf + 31)
4084 *p++ = TOLOWER (*q);
4085 *p = '\0';
4086
4087 o = hash_find (sys_ins_regs, buf);
4088 if (!o)
4089 return NULL;
4090
4091 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4092 as_bad (_("selected processor does not support system register "
4093 "name '%s'"), buf);
4094
4095 *str = q;
4096 return o;
4097 }
4098 \f
4099 #define po_char_or_fail(chr) do { \
4100 if (! skip_past_char (&str, chr)) \
4101 goto failure; \
4102 } while (0)
4103
4104 #define po_reg_or_fail(regtype) do { \
4105 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4106 if (val == PARSE_FAIL) \
4107 { \
4108 set_default_error (); \
4109 goto failure; \
4110 } \
4111 } while (0)
4112
4113 #define po_int_reg_or_fail(reg_type) do { \
4114 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4115 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4116 { \
4117 set_default_error (); \
4118 goto failure; \
4119 } \
4120 info->reg.regno = reg->number; \
4121 info->qualifier = qualifier; \
4122 } while (0)
4123
4124 #define po_imm_nc_or_fail() do { \
4125 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4126 goto failure; \
4127 } while (0)
4128
4129 #define po_imm_or_fail(min, max) do { \
4130 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4131 goto failure; \
4132 if (val < min || val > max) \
4133 { \
4134 set_fatal_syntax_error (_("immediate value out of range "\
4135 #min " to "#max)); \
4136 goto failure; \
4137 } \
4138 } while (0)
4139
4140 #define po_enum_or_fail(array) do { \
4141 if (!parse_enum_string (&str, &val, array, \
4142 ARRAY_SIZE (array), imm_reg_type)) \
4143 goto failure; \
4144 } while (0)
4145
4146 #define po_misc_or_fail(expr) do { \
4147 if (!expr) \
4148 goto failure; \
4149 } while (0)
4150 \f
4151 /* encode the 12-bit imm field of Add/sub immediate */
4152 static inline uint32_t
4153 encode_addsub_imm (uint32_t imm)
4154 {
4155 return imm << 10;
4156 }
4157
4158 /* encode the shift amount field of Add/sub immediate */
4159 static inline uint32_t
4160 encode_addsub_imm_shift_amount (uint32_t cnt)
4161 {
4162 return cnt << 22;
4163 }
4164
4165
4166 /* encode the imm field of Adr instruction */
4167 static inline uint32_t
4168 encode_adr_imm (uint32_t imm)
4169 {
4170 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4171 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4172 }
4173
4174 /* encode the immediate field of Move wide immediate */
4175 static inline uint32_t
4176 encode_movw_imm (uint32_t imm)
4177 {
4178 return imm << 5;
4179 }
4180
4181 /* encode the 26-bit offset of unconditional branch */
4182 static inline uint32_t
4183 encode_branch_ofs_26 (uint32_t ofs)
4184 {
4185 return ofs & ((1 << 26) - 1);
4186 }
4187
4188 /* encode the 19-bit offset of conditional branch and compare & branch */
4189 static inline uint32_t
4190 encode_cond_branch_ofs_19 (uint32_t ofs)
4191 {
4192 return (ofs & ((1 << 19) - 1)) << 5;
4193 }
4194
4195 /* encode the 19-bit offset of ld literal */
4196 static inline uint32_t
4197 encode_ld_lit_ofs_19 (uint32_t ofs)
4198 {
4199 return (ofs & ((1 << 19) - 1)) << 5;
4200 }
4201
4202 /* Encode the 14-bit offset of test & branch. */
4203 static inline uint32_t
4204 encode_tst_branch_ofs_14 (uint32_t ofs)
4205 {
4206 return (ofs & ((1 << 14) - 1)) << 5;
4207 }
4208
4209 /* Encode the 16-bit imm field of svc/hvc/smc. */
4210 static inline uint32_t
4211 encode_svc_imm (uint32_t imm)
4212 {
4213 return imm << 5;
4214 }
4215
4216 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4217 static inline uint32_t
4218 reencode_addsub_switch_add_sub (uint32_t opcode)
4219 {
4220 return opcode ^ (1 << 30);
4221 }
4222
4223 static inline uint32_t
4224 reencode_movzn_to_movz (uint32_t opcode)
4225 {
4226 return opcode | (1 << 30);
4227 }
4228
4229 static inline uint32_t
4230 reencode_movzn_to_movn (uint32_t opcode)
4231 {
4232 return opcode & ~(1 << 30);
4233 }
4234
4235 /* Overall per-instruction processing. */
4236
4237 /* We need to be able to fix up arbitrary expressions in some statements.
4238 This is so that we can handle symbols that are an arbitrary distance from
4239 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4240 which returns part of an address in a form which will be valid for
4241 a data instruction. We do this by pushing the expression into a symbol
4242 in the expr_section, and creating a fix for that. */
4243
4244 static fixS *
4245 fix_new_aarch64 (fragS * frag,
4246 int where,
4247 short int size, expressionS * exp, int pc_rel, int reloc)
4248 {
4249 fixS *new_fix;
4250
4251 switch (exp->X_op)
4252 {
4253 case O_constant:
4254 case O_symbol:
4255 case O_add:
4256 case O_subtract:
4257 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4258 break;
4259
4260 default:
4261 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4262 pc_rel, reloc);
4263 break;
4264 }
4265 return new_fix;
4266 }
4267 \f
4268 /* Diagnostics on operands errors. */
4269
4270 /* By default, output verbose error message.
4271 Disable the verbose error message by -mno-verbose-error. */
4272 static int verbose_error_p = 1;
4273
4274 #ifdef DEBUG_AARCH64
4275 /* N.B. this is only for the purpose of debugging. */
4276 const char* operand_mismatch_kind_names[] =
4277 {
4278 "AARCH64_OPDE_NIL",
4279 "AARCH64_OPDE_RECOVERABLE",
4280 "AARCH64_OPDE_SYNTAX_ERROR",
4281 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4282 "AARCH64_OPDE_INVALID_VARIANT",
4283 "AARCH64_OPDE_OUT_OF_RANGE",
4284 "AARCH64_OPDE_UNALIGNED",
4285 "AARCH64_OPDE_REG_LIST",
4286 "AARCH64_OPDE_OTHER_ERROR",
4287 };
4288 #endif /* DEBUG_AARCH64 */
4289
4290 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4291
4292 When multiple errors of different kinds are found in the same assembly
4293 line, only the error of the highest severity will be picked up for
4294 issuing the diagnostics. */
4295
4296 static inline bfd_boolean
4297 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4298 enum aarch64_operand_error_kind rhs)
4299 {
4300 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4301 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4302 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4303 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4304 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4305 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4306 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4307 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4308 return lhs > rhs;
4309 }
4310
4311 /* Helper routine to get the mnemonic name from the assembly instruction
4312 line; should only be called for the diagnosis purpose, as there is
4313 string copy operation involved, which may affect the runtime
4314 performance if used in elsewhere. */
4315
4316 static const char*
4317 get_mnemonic_name (const char *str)
4318 {
4319 static char mnemonic[32];
4320 char *ptr;
4321
4322 /* Get the first 15 bytes and assume that the full name is included. */
4323 strncpy (mnemonic, str, 31);
4324 mnemonic[31] = '\0';
4325
4326 /* Scan up to the end of the mnemonic, which must end in white space,
4327 '.', or end of string. */
4328 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4329 ;
4330
4331 *ptr = '\0';
4332
4333 /* Append '...' to the truncated long name. */
4334 if (ptr - mnemonic == 31)
4335 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4336
4337 return mnemonic;
4338 }
4339
4340 static void
4341 reset_aarch64_instruction (aarch64_instruction *instruction)
4342 {
4343 memset (instruction, '\0', sizeof (aarch64_instruction));
4344 instruction->reloc.type = BFD_RELOC_UNUSED;
4345 }
4346
4347 /* Data structures storing one user error in the assembly code related to
4348 operands. */
4349
4350 struct operand_error_record
4351 {
4352 const aarch64_opcode *opcode;
4353 aarch64_operand_error detail;
4354 struct operand_error_record *next;
4355 };
4356
4357 typedef struct operand_error_record operand_error_record;
4358
4359 struct operand_errors
4360 {
4361 operand_error_record *head;
4362 operand_error_record *tail;
4363 };
4364
4365 typedef struct operand_errors operand_errors;
4366
4367 /* Top-level data structure reporting user errors for the current line of
4368 the assembly code.
4369 The way md_assemble works is that all opcodes sharing the same mnemonic
4370 name are iterated to find a match to the assembly line. In this data
4371 structure, each of the such opcodes will have one operand_error_record
4372 allocated and inserted. In other words, excessive errors related with
4373 a single opcode are disregarded. */
4374 operand_errors operand_error_report;
4375
4376 /* Free record nodes. */
4377 static operand_error_record *free_opnd_error_record_nodes = NULL;
4378
4379 /* Initialize the data structure that stores the operand mismatch
4380 information on assembling one line of the assembly code. */
4381 static void
4382 init_operand_error_report (void)
4383 {
4384 if (operand_error_report.head != NULL)
4385 {
4386 gas_assert (operand_error_report.tail != NULL);
4387 operand_error_report.tail->next = free_opnd_error_record_nodes;
4388 free_opnd_error_record_nodes = operand_error_report.head;
4389 operand_error_report.head = NULL;
4390 operand_error_report.tail = NULL;
4391 return;
4392 }
4393 gas_assert (operand_error_report.tail == NULL);
4394 }
4395
4396 /* Return TRUE if some operand error has been recorded during the
4397 parsing of the current assembly line using the opcode *OPCODE;
4398 otherwise return FALSE. */
4399 static inline bfd_boolean
4400 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4401 {
4402 operand_error_record *record = operand_error_report.head;
4403 return record && record->opcode == opcode;
4404 }
4405
4406 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4407 OPCODE field is initialized with OPCODE.
4408 N.B. only one record for each opcode, i.e. the maximum of one error is
4409 recorded for each instruction template. */
4410
4411 static void
4412 add_operand_error_record (const operand_error_record* new_record)
4413 {
4414 const aarch64_opcode *opcode = new_record->opcode;
4415 operand_error_record* record = operand_error_report.head;
4416
4417 /* The record may have been created for this opcode. If not, we need
4418 to prepare one. */
4419 if (! opcode_has_operand_error_p (opcode))
4420 {
4421 /* Get one empty record. */
4422 if (free_opnd_error_record_nodes == NULL)
4423 {
4424 record = XNEW (operand_error_record);
4425 }
4426 else
4427 {
4428 record = free_opnd_error_record_nodes;
4429 free_opnd_error_record_nodes = record->next;
4430 }
4431 record->opcode = opcode;
4432 /* Insert at the head. */
4433 record->next = operand_error_report.head;
4434 operand_error_report.head = record;
4435 if (operand_error_report.tail == NULL)
4436 operand_error_report.tail = record;
4437 }
4438 else if (record->detail.kind != AARCH64_OPDE_NIL
4439 && record->detail.index <= new_record->detail.index
4440 && operand_error_higher_severity_p (record->detail.kind,
4441 new_record->detail.kind))
4442 {
4443 /* In the case of multiple errors found on operands related with a
4444 single opcode, only record the error of the leftmost operand and
4445 only if the error is of higher severity. */
4446 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4447 " the existing error %s on operand %d",
4448 operand_mismatch_kind_names[new_record->detail.kind],
4449 new_record->detail.index,
4450 operand_mismatch_kind_names[record->detail.kind],
4451 record->detail.index);
4452 return;
4453 }
4454
4455 record->detail = new_record->detail;
4456 }
4457
4458 static inline void
4459 record_operand_error_info (const aarch64_opcode *opcode,
4460 aarch64_operand_error *error_info)
4461 {
4462 operand_error_record record;
4463 record.opcode = opcode;
4464 record.detail = *error_info;
4465 add_operand_error_record (&record);
4466 }
4467
4468 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4469 error message *ERROR, for operand IDX (count from 0). */
4470
4471 static void
4472 record_operand_error (const aarch64_opcode *opcode, int idx,
4473 enum aarch64_operand_error_kind kind,
4474 const char* error)
4475 {
4476 aarch64_operand_error info;
4477 memset(&info, 0, sizeof (info));
4478 info.index = idx;
4479 info.kind = kind;
4480 info.error = error;
4481 info.non_fatal = FALSE;
4482 record_operand_error_info (opcode, &info);
4483 }
4484
4485 static void
4486 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4487 enum aarch64_operand_error_kind kind,
4488 const char* error, const int *extra_data)
4489 {
4490 aarch64_operand_error info;
4491 info.index = idx;
4492 info.kind = kind;
4493 info.error = error;
4494 info.data[0] = extra_data[0];
4495 info.data[1] = extra_data[1];
4496 info.data[2] = extra_data[2];
4497 info.non_fatal = FALSE;
4498 record_operand_error_info (opcode, &info);
4499 }
4500
4501 static void
4502 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4503 const char* error, int lower_bound,
4504 int upper_bound)
4505 {
4506 int data[3] = {lower_bound, upper_bound, 0};
4507 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4508 error, data);
4509 }
4510
4511 /* Remove the operand error record for *OPCODE. */
4512 static void ATTRIBUTE_UNUSED
4513 remove_operand_error_record (const aarch64_opcode *opcode)
4514 {
4515 if (opcode_has_operand_error_p (opcode))
4516 {
4517 operand_error_record* record = operand_error_report.head;
4518 gas_assert (record != NULL && operand_error_report.tail != NULL);
4519 operand_error_report.head = record->next;
4520 record->next = free_opnd_error_record_nodes;
4521 free_opnd_error_record_nodes = record;
4522 if (operand_error_report.head == NULL)
4523 {
4524 gas_assert (operand_error_report.tail == record);
4525 operand_error_report.tail = NULL;
4526 }
4527 }
4528 }
4529
4530 /* Given the instruction in *INSTR, return the index of the best matched
4531 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4532
4533 Return -1 if there is no qualifier sequence; return the first match
4534 if there is multiple matches found. */
4535
4536 static int
4537 find_best_match (const aarch64_inst *instr,
4538 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4539 {
4540 int i, num_opnds, max_num_matched, idx;
4541
4542 num_opnds = aarch64_num_of_operands (instr->opcode);
4543 if (num_opnds == 0)
4544 {
4545 DEBUG_TRACE ("no operand");
4546 return -1;
4547 }
4548
4549 max_num_matched = 0;
4550 idx = 0;
4551
4552 /* For each pattern. */
4553 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4554 {
4555 int j, num_matched;
4556 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4557
4558 /* Most opcodes has much fewer patterns in the list. */
4559 if (empty_qualifier_sequence_p (qualifiers))
4560 {
4561 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4562 break;
4563 }
4564
4565 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4566 if (*qualifiers == instr->operands[j].qualifier)
4567 ++num_matched;
4568
4569 if (num_matched > max_num_matched)
4570 {
4571 max_num_matched = num_matched;
4572 idx = i;
4573 }
4574 }
4575
4576 DEBUG_TRACE ("return with %d", idx);
4577 return idx;
4578 }
4579
4580 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4581 corresponding operands in *INSTR. */
4582
4583 static inline void
4584 assign_qualifier_sequence (aarch64_inst *instr,
4585 const aarch64_opnd_qualifier_t *qualifiers)
4586 {
4587 int i = 0;
4588 int num_opnds = aarch64_num_of_operands (instr->opcode);
4589 gas_assert (num_opnds);
4590 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4591 instr->operands[i].qualifier = *qualifiers;
4592 }
4593
4594 /* Print operands for the diagnosis purpose. */
4595
4596 static void
4597 print_operands (char *buf, const aarch64_opcode *opcode,
4598 const aarch64_opnd_info *opnds)
4599 {
4600 int i;
4601
4602 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4603 {
4604 char str[128];
4605
4606 /* We regard the opcode operand info more, however we also look into
4607 the inst->operands to support the disassembling of the optional
4608 operand.
4609 The two operand code should be the same in all cases, apart from
4610 when the operand can be optional. */
4611 if (opcode->operands[i] == AARCH64_OPND_NIL
4612 || opnds[i].type == AARCH64_OPND_NIL)
4613 break;
4614
4615 /* Generate the operand string in STR. */
4616 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4617 NULL);
4618
4619 /* Delimiter. */
4620 if (str[0] != '\0')
4621 strcat (buf, i == 0 ? " " : ", ");
4622
4623 /* Append the operand string. */
4624 strcat (buf, str);
4625 }
4626 }
4627
4628 /* Send to stderr a string as information. */
4629
4630 static void
4631 output_info (const char *format, ...)
4632 {
4633 const char *file;
4634 unsigned int line;
4635 va_list args;
4636
4637 file = as_where (&line);
4638 if (file)
4639 {
4640 if (line != 0)
4641 fprintf (stderr, "%s:%u: ", file, line);
4642 else
4643 fprintf (stderr, "%s: ", file);
4644 }
4645 fprintf (stderr, _("Info: "));
4646 va_start (args, format);
4647 vfprintf (stderr, format, args);
4648 va_end (args);
4649 (void) putc ('\n', stderr);
4650 }
4651
4652 /* Output one operand error record. */
4653
4654 static void
4655 output_operand_error_record (const operand_error_record *record, char *str)
4656 {
4657 const aarch64_operand_error *detail = &record->detail;
4658 int idx = detail->index;
4659 const aarch64_opcode *opcode = record->opcode;
4660 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4661 : AARCH64_OPND_NIL);
4662
4663 typedef void (*handler_t)(const char *format, ...);
4664 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4665
4666 switch (detail->kind)
4667 {
4668 case AARCH64_OPDE_NIL:
4669 gas_assert (0);
4670 break;
4671 case AARCH64_OPDE_SYNTAX_ERROR:
4672 case AARCH64_OPDE_RECOVERABLE:
4673 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4674 case AARCH64_OPDE_OTHER_ERROR:
4675 /* Use the prepared error message if there is, otherwise use the
4676 operand description string to describe the error. */
4677 if (detail->error != NULL)
4678 {
4679 if (idx < 0)
4680 handler (_("%s -- `%s'"), detail->error, str);
4681 else
4682 handler (_("%s at operand %d -- `%s'"),
4683 detail->error, idx + 1, str);
4684 }
4685 else
4686 {
4687 gas_assert (idx >= 0);
4688 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4689 aarch64_get_operand_desc (opd_code), str);
4690 }
4691 break;
4692
4693 case AARCH64_OPDE_INVALID_VARIANT:
4694 handler (_("operand mismatch -- `%s'"), str);
4695 if (verbose_error_p)
4696 {
4697 /* We will try to correct the erroneous instruction and also provide
4698 more information e.g. all other valid variants.
4699
4700 The string representation of the corrected instruction and other
4701 valid variants are generated by
4702
4703 1) obtaining the intermediate representation of the erroneous
4704 instruction;
4705 2) manipulating the IR, e.g. replacing the operand qualifier;
4706 3) printing out the instruction by calling the printer functions
4707 shared with the disassembler.
4708
4709 The limitation of this method is that the exact input assembly
4710 line cannot be accurately reproduced in some cases, for example an
4711 optional operand present in the actual assembly line will be
4712 omitted in the output; likewise for the optional syntax rules,
4713 e.g. the # before the immediate. Another limitation is that the
4714 assembly symbols and relocation operations in the assembly line
4715 currently cannot be printed out in the error report. Last but not
4716 least, when there is other error(s) co-exist with this error, the
4717 'corrected' instruction may be still incorrect, e.g. given
4718 'ldnp h0,h1,[x0,#6]!'
4719 this diagnosis will provide the version:
4720 'ldnp s0,s1,[x0,#6]!'
4721 which is still not right. */
4722 size_t len = strlen (get_mnemonic_name (str));
4723 int i, qlf_idx;
4724 bfd_boolean result;
4725 char buf[2048];
4726 aarch64_inst *inst_base = &inst.base;
4727 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4728
4729 /* Init inst. */
4730 reset_aarch64_instruction (&inst);
4731 inst_base->opcode = opcode;
4732
4733 /* Reset the error report so that there is no side effect on the
4734 following operand parsing. */
4735 init_operand_error_report ();
4736
4737 /* Fill inst. */
4738 result = parse_operands (str + len, opcode)
4739 && programmer_friendly_fixup (&inst);
4740 gas_assert (result);
4741 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4742 NULL, NULL, insn_sequence);
4743 gas_assert (!result);
4744
4745 /* Find the most matched qualifier sequence. */
4746 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4747 gas_assert (qlf_idx > -1);
4748
4749 /* Assign the qualifiers. */
4750 assign_qualifier_sequence (inst_base,
4751 opcode->qualifiers_list[qlf_idx]);
4752
4753 /* Print the hint. */
4754 output_info (_(" did you mean this?"));
4755 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4756 print_operands (buf, opcode, inst_base->operands);
4757 output_info (_(" %s"), buf);
4758
4759 /* Print out other variant(s) if there is any. */
4760 if (qlf_idx != 0 ||
4761 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4762 output_info (_(" other valid variant(s):"));
4763
4764 /* For each pattern. */
4765 qualifiers_list = opcode->qualifiers_list;
4766 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4767 {
4768 /* Most opcodes has much fewer patterns in the list.
4769 First NIL qualifier indicates the end in the list. */
4770 if (empty_qualifier_sequence_p (*qualifiers_list))
4771 break;
4772
4773 if (i != qlf_idx)
4774 {
4775 /* Mnemonics name. */
4776 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4777
4778 /* Assign the qualifiers. */
4779 assign_qualifier_sequence (inst_base, *qualifiers_list);
4780
4781 /* Print instruction. */
4782 print_operands (buf, opcode, inst_base->operands);
4783
4784 output_info (_(" %s"), buf);
4785 }
4786 }
4787 }
4788 break;
4789
4790 case AARCH64_OPDE_UNTIED_OPERAND:
4791 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4792 detail->index + 1, str);
4793 break;
4794
4795 case AARCH64_OPDE_OUT_OF_RANGE:
4796 if (detail->data[0] != detail->data[1])
4797 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4798 detail->error ? detail->error : _("immediate value"),
4799 detail->data[0], detail->data[1], idx + 1, str);
4800 else
4801 handler (_("%s must be %d at operand %d -- `%s'"),
4802 detail->error ? detail->error : _("immediate value"),
4803 detail->data[0], idx + 1, str);
4804 break;
4805
4806 case AARCH64_OPDE_REG_LIST:
4807 if (detail->data[0] == 1)
4808 handler (_("invalid number of registers in the list; "
4809 "only 1 register is expected at operand %d -- `%s'"),
4810 idx + 1, str);
4811 else
4812 handler (_("invalid number of registers in the list; "
4813 "%d registers are expected at operand %d -- `%s'"),
4814 detail->data[0], idx + 1, str);
4815 break;
4816
4817 case AARCH64_OPDE_UNALIGNED:
4818 handler (_("immediate value must be a multiple of "
4819 "%d at operand %d -- `%s'"),
4820 detail->data[0], idx + 1, str);
4821 break;
4822
4823 default:
4824 gas_assert (0);
4825 break;
4826 }
4827 }
4828
4829 /* Process and output the error message about the operand mismatching.
4830
4831 When this function is called, the operand error information had
4832 been collected for an assembly line and there will be multiple
4833 errors in the case of multiple instruction templates; output the
4834 error message that most closely describes the problem.
4835
4836 The errors to be printed can be filtered on printing all errors
4837 or only non-fatal errors. This distinction has to be made because
4838 the error buffer may already be filled with fatal errors we don't want to
4839 print due to the different instruction templates. */
4840
4841 static void
4842 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4843 {
4844 int largest_error_pos;
4845 const char *msg = NULL;
4846 enum aarch64_operand_error_kind kind;
4847 operand_error_record *curr;
4848 operand_error_record *head = operand_error_report.head;
4849 operand_error_record *record = NULL;
4850
4851 /* No error to report. */
4852 if (head == NULL)
4853 return;
4854
4855 gas_assert (head != NULL && operand_error_report.tail != NULL);
4856
4857 /* Only one error. */
4858 if (head == operand_error_report.tail)
4859 {
4860 /* If the only error is a non-fatal one and we don't want to print it,
4861 just exit. */
4862 if (!non_fatal_only || head->detail.non_fatal)
4863 {
4864 DEBUG_TRACE ("single opcode entry with error kind: %s",
4865 operand_mismatch_kind_names[head->detail.kind]);
4866 output_operand_error_record (head, str);
4867 }
4868 return;
4869 }
4870
4871 /* Find the error kind of the highest severity. */
4872 DEBUG_TRACE ("multiple opcode entries with error kind");
4873 kind = AARCH64_OPDE_NIL;
4874 for (curr = head; curr != NULL; curr = curr->next)
4875 {
4876 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4877 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4878 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4879 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4880 kind = curr->detail.kind;
4881 }
4882
4883 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4884
4885 /* Pick up one of errors of KIND to report. */
4886 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4887 for (curr = head; curr != NULL; curr = curr->next)
4888 {
4889 /* If we don't want to print non-fatal errors then don't consider them
4890 at all. */
4891 if (curr->detail.kind != kind
4892 || (non_fatal_only && !curr->detail.non_fatal))
4893 continue;
4894 /* If there are multiple errors, pick up the one with the highest
4895 mismatching operand index. In the case of multiple errors with
4896 the equally highest operand index, pick up the first one or the
4897 first one with non-NULL error message. */
4898 if (curr->detail.index > largest_error_pos
4899 || (curr->detail.index == largest_error_pos && msg == NULL
4900 && curr->detail.error != NULL))
4901 {
4902 largest_error_pos = curr->detail.index;
4903 record = curr;
4904 msg = record->detail.error;
4905 }
4906 }
4907
4908 /* The way errors are collected in the back-end is a bit non-intuitive. But
4909 essentially, because each operand template is tried recursively you may
4910 always have errors collected from the previous tried OPND. These are
4911 usually skipped if there is one successful match. However now with the
4912 non-fatal errors we have to ignore those previously collected hard errors
4913 when we're only interested in printing the non-fatal ones. This condition
4914 prevents us from printing errors that are not appropriate, since we did
4915 match a condition, but it also has warnings that it wants to print. */
4916 if (non_fatal_only && !record)
4917 return;
4918
4919 gas_assert (largest_error_pos != -2 && record != NULL);
4920 DEBUG_TRACE ("Pick up error kind %s to report",
4921 operand_mismatch_kind_names[record->detail.kind]);
4922
4923 /* Output. */
4924 output_operand_error_record (record, str);
4925 }
4926 \f
4927 /* Write an AARCH64 instruction to buf - always little-endian. */
4928 static void
4929 put_aarch64_insn (char *buf, uint32_t insn)
4930 {
4931 unsigned char *where = (unsigned char *) buf;
4932 where[0] = insn;
4933 where[1] = insn >> 8;
4934 where[2] = insn >> 16;
4935 where[3] = insn >> 24;
4936 }
4937
4938 static uint32_t
4939 get_aarch64_insn (char *buf)
4940 {
4941 unsigned char *where = (unsigned char *) buf;
4942 uint32_t result;
4943 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4944 return result;
4945 }
4946
4947 static void
4948 output_inst (struct aarch64_inst *new_inst)
4949 {
4950 char *to = NULL;
4951
4952 to = frag_more (INSN_SIZE);
4953
4954 frag_now->tc_frag_data.recorded = 1;
4955
4956 put_aarch64_insn (to, inst.base.value);
4957
4958 if (inst.reloc.type != BFD_RELOC_UNUSED)
4959 {
4960 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4961 INSN_SIZE, &inst.reloc.exp,
4962 inst.reloc.pc_rel,
4963 inst.reloc.type);
4964 DEBUG_TRACE ("Prepared relocation fix up");
4965 /* Don't check the addend value against the instruction size,
4966 that's the job of our code in md_apply_fix(). */
4967 fixp->fx_no_overflow = 1;
4968 if (new_inst != NULL)
4969 fixp->tc_fix_data.inst = new_inst;
4970 if (aarch64_gas_internal_fixup_p ())
4971 {
4972 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4973 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4974 fixp->fx_addnumber = inst.reloc.flags;
4975 }
4976 }
4977
4978 dwarf2_emit_insn (INSN_SIZE);
4979 }
4980
4981 /* Link together opcodes of the same name. */
4982
4983 struct templates
4984 {
4985 aarch64_opcode *opcode;
4986 struct templates *next;
4987 };
4988
4989 typedef struct templates templates;
4990
4991 static templates *
4992 lookup_mnemonic (const char *start, int len)
4993 {
4994 templates *templ = NULL;
4995
4996 templ = hash_find_n (aarch64_ops_hsh, start, len);
4997 return templ;
4998 }
4999
5000 /* Subroutine of md_assemble, responsible for looking up the primary
5001 opcode from the mnemonic the user wrote. STR points to the
5002 beginning of the mnemonic. */
5003
5004 static templates *
5005 opcode_lookup (char **str)
5006 {
5007 char *end, *base, *dot;
5008 const aarch64_cond *cond;
5009 char condname[16];
5010 int len;
5011
5012 /* Scan up to the end of the mnemonic, which must end in white space,
5013 '.', or end of string. */
5014 dot = 0;
5015 for (base = end = *str; is_part_of_name(*end); end++)
5016 if (*end == '.' && !dot)
5017 dot = end;
5018
5019 if (end == base || dot == base)
5020 return 0;
5021
5022 inst.cond = COND_ALWAYS;
5023
5024 /* Handle a possible condition. */
5025 if (dot)
5026 {
5027 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5028 if (cond)
5029 {
5030 inst.cond = cond->value;
5031 *str = end;
5032 }
5033 else
5034 {
5035 *str = dot;
5036 return 0;
5037 }
5038 len = dot - base;
5039 }
5040 else
5041 {
5042 *str = end;
5043 len = end - base;
5044 }
5045
5046 if (inst.cond == COND_ALWAYS)
5047 {
5048 /* Look for unaffixed mnemonic. */
5049 return lookup_mnemonic (base, len);
5050 }
5051 else if (len <= 13)
5052 {
5053 /* append ".c" to mnemonic if conditional */
5054 memcpy (condname, base, len);
5055 memcpy (condname + len, ".c", 2);
5056 base = condname;
5057 len += 2;
5058 return lookup_mnemonic (base, len);
5059 }
5060
5061 return NULL;
5062 }
5063
5064 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5065 to a corresponding operand qualifier. */
5066
5067 static inline aarch64_opnd_qualifier_t
5068 vectype_to_qualifier (const struct vector_type_el *vectype)
5069 {
5070 /* Element size in bytes indexed by vector_el_type. */
5071 const unsigned char ele_size[5]
5072 = {1, 2, 4, 8, 16};
5073 const unsigned int ele_base [5] =
5074 {
5075 AARCH64_OPND_QLF_V_4B,
5076 AARCH64_OPND_QLF_V_2H,
5077 AARCH64_OPND_QLF_V_2S,
5078 AARCH64_OPND_QLF_V_1D,
5079 AARCH64_OPND_QLF_V_1Q
5080 };
5081
5082 if (!vectype->defined || vectype->type == NT_invtype)
5083 goto vectype_conversion_fail;
5084
5085 if (vectype->type == NT_zero)
5086 return AARCH64_OPND_QLF_P_Z;
5087 if (vectype->type == NT_merge)
5088 return AARCH64_OPND_QLF_P_M;
5089
5090 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5091
5092 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5093 {
5094 /* Special case S_4B. */
5095 if (vectype->type == NT_b && vectype->width == 4)
5096 return AARCH64_OPND_QLF_S_4B;
5097
5098 /* Vector element register. */
5099 return AARCH64_OPND_QLF_S_B + vectype->type;
5100 }
5101 else
5102 {
5103 /* Vector register. */
5104 int reg_size = ele_size[vectype->type] * vectype->width;
5105 unsigned offset;
5106 unsigned shift;
5107 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5108 goto vectype_conversion_fail;
5109
5110 /* The conversion is by calculating the offset from the base operand
5111 qualifier for the vector type. The operand qualifiers are regular
5112 enough that the offset can established by shifting the vector width by
5113 a vector-type dependent amount. */
5114 shift = 0;
5115 if (vectype->type == NT_b)
5116 shift = 3;
5117 else if (vectype->type == NT_h || vectype->type == NT_s)
5118 shift = 2;
5119 else if (vectype->type >= NT_d)
5120 shift = 1;
5121 else
5122 gas_assert (0);
5123
5124 offset = ele_base [vectype->type] + (vectype->width >> shift);
5125 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5126 && offset <= AARCH64_OPND_QLF_V_1Q);
5127 return offset;
5128 }
5129
5130 vectype_conversion_fail:
5131 first_error (_("bad vector arrangement type"));
5132 return AARCH64_OPND_QLF_NIL;
5133 }
5134
5135 /* Process an optional operand that is found omitted from the assembly line.
5136 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5137 instruction's opcode entry while IDX is the index of this omitted operand.
5138 */
5139
5140 static void
5141 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5142 int idx, aarch64_opnd_info *operand)
5143 {
5144 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5145 gas_assert (optional_operand_p (opcode, idx));
5146 gas_assert (!operand->present);
5147
5148 switch (type)
5149 {
5150 case AARCH64_OPND_Rd:
5151 case AARCH64_OPND_Rn:
5152 case AARCH64_OPND_Rm:
5153 case AARCH64_OPND_Rt:
5154 case AARCH64_OPND_Rt2:
5155 case AARCH64_OPND_Rt_SP:
5156 case AARCH64_OPND_Rs:
5157 case AARCH64_OPND_Ra:
5158 case AARCH64_OPND_Rt_SYS:
5159 case AARCH64_OPND_Rd_SP:
5160 case AARCH64_OPND_Rn_SP:
5161 case AARCH64_OPND_Rm_SP:
5162 case AARCH64_OPND_Fd:
5163 case AARCH64_OPND_Fn:
5164 case AARCH64_OPND_Fm:
5165 case AARCH64_OPND_Fa:
5166 case AARCH64_OPND_Ft:
5167 case AARCH64_OPND_Ft2:
5168 case AARCH64_OPND_Sd:
5169 case AARCH64_OPND_Sn:
5170 case AARCH64_OPND_Sm:
5171 case AARCH64_OPND_Va:
5172 case AARCH64_OPND_Vd:
5173 case AARCH64_OPND_Vn:
5174 case AARCH64_OPND_Vm:
5175 case AARCH64_OPND_VdD1:
5176 case AARCH64_OPND_VnD1:
5177 operand->reg.regno = default_value;
5178 break;
5179
5180 case AARCH64_OPND_Ed:
5181 case AARCH64_OPND_En:
5182 case AARCH64_OPND_Em:
5183 case AARCH64_OPND_Em16:
5184 case AARCH64_OPND_SM3_IMM2:
5185 operand->reglane.regno = default_value;
5186 break;
5187
5188 case AARCH64_OPND_IDX:
5189 case AARCH64_OPND_BIT_NUM:
5190 case AARCH64_OPND_IMMR:
5191 case AARCH64_OPND_IMMS:
5192 case AARCH64_OPND_SHLL_IMM:
5193 case AARCH64_OPND_IMM_VLSL:
5194 case AARCH64_OPND_IMM_VLSR:
5195 case AARCH64_OPND_CCMP_IMM:
5196 case AARCH64_OPND_FBITS:
5197 case AARCH64_OPND_UIMM4:
5198 case AARCH64_OPND_UIMM3_OP1:
5199 case AARCH64_OPND_UIMM3_OP2:
5200 case AARCH64_OPND_IMM:
5201 case AARCH64_OPND_IMM_2:
5202 case AARCH64_OPND_WIDTH:
5203 case AARCH64_OPND_UIMM7:
5204 case AARCH64_OPND_NZCV:
5205 case AARCH64_OPND_SVE_PATTERN:
5206 case AARCH64_OPND_SVE_PRFOP:
5207 operand->imm.value = default_value;
5208 break;
5209
5210 case AARCH64_OPND_SVE_PATTERN_SCALED:
5211 operand->imm.value = default_value;
5212 operand->shifter.kind = AARCH64_MOD_MUL;
5213 operand->shifter.amount = 1;
5214 break;
5215
5216 case AARCH64_OPND_EXCEPTION:
5217 inst.reloc.type = BFD_RELOC_UNUSED;
5218 break;
5219
5220 case AARCH64_OPND_BARRIER_ISB:
5221 operand->barrier = aarch64_barrier_options + default_value;
5222 break;
5223
5224 case AARCH64_OPND_BTI_TARGET:
5225 operand->hint_option = aarch64_hint_options + default_value;
5226 break;
5227
5228 default:
5229 break;
5230 }
5231 }
5232
5233 /* Process the relocation type for move wide instructions.
5234 Return TRUE on success; otherwise return FALSE. */
5235
5236 static bfd_boolean
5237 process_movw_reloc_info (void)
5238 {
5239 int is32;
5240 unsigned shift;
5241
5242 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5243
5244 if (inst.base.opcode->op == OP_MOVK)
5245 switch (inst.reloc.type)
5246 {
5247 case BFD_RELOC_AARCH64_MOVW_G0_S:
5248 case BFD_RELOC_AARCH64_MOVW_G1_S:
5249 case BFD_RELOC_AARCH64_MOVW_G2_S:
5250 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5251 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5252 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5253 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5254 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5255 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5256 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5257 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5258 set_syntax_error
5259 (_("the specified relocation type is not allowed for MOVK"));
5260 return FALSE;
5261 default:
5262 break;
5263 }
5264
5265 switch (inst.reloc.type)
5266 {
5267 case BFD_RELOC_AARCH64_MOVW_G0:
5268 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5269 case BFD_RELOC_AARCH64_MOVW_G0_S:
5270 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5271 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5272 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5273 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5274 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5275 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5276 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5277 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5278 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5279 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5280 shift = 0;
5281 break;
5282 case BFD_RELOC_AARCH64_MOVW_G1:
5283 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5284 case BFD_RELOC_AARCH64_MOVW_G1_S:
5285 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5286 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5287 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5288 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5289 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5290 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5291 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5292 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5293 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5294 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5295 shift = 16;
5296 break;
5297 case BFD_RELOC_AARCH64_MOVW_G2:
5298 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5299 case BFD_RELOC_AARCH64_MOVW_G2_S:
5300 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5301 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5302 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5304 if (is32)
5305 {
5306 set_fatal_syntax_error
5307 (_("the specified relocation type is not allowed for 32-bit "
5308 "register"));
5309 return FALSE;
5310 }
5311 shift = 32;
5312 break;
5313 case BFD_RELOC_AARCH64_MOVW_G3:
5314 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5315 if (is32)
5316 {
5317 set_fatal_syntax_error
5318 (_("the specified relocation type is not allowed for 32-bit "
5319 "register"));
5320 return FALSE;
5321 }
5322 shift = 48;
5323 break;
5324 default:
5325 /* More cases should be added when more MOVW-related relocation types
5326 are supported in GAS. */
5327 gas_assert (aarch64_gas_internal_fixup_p ());
5328 /* The shift amount should have already been set by the parser. */
5329 return TRUE;
5330 }
5331 inst.base.operands[1].shifter.amount = shift;
5332 return TRUE;
5333 }
5334
5335 /* A primitive log calculator. */
5336
5337 static inline unsigned int
5338 get_logsz (unsigned int size)
5339 {
5340 const unsigned char ls[16] =
5341 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5342 if (size > 16)
5343 {
5344 gas_assert (0);
5345 return -1;
5346 }
5347 gas_assert (ls[size - 1] != (unsigned char)-1);
5348 return ls[size - 1];
5349 }
5350
5351 /* Determine and return the real reloc type code for an instruction
5352 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5353
5354 static inline bfd_reloc_code_real_type
5355 ldst_lo12_determine_real_reloc_type (void)
5356 {
5357 unsigned logsz;
5358 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5359 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5360
5361 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5362 {
5363 BFD_RELOC_AARCH64_LDST8_LO12,
5364 BFD_RELOC_AARCH64_LDST16_LO12,
5365 BFD_RELOC_AARCH64_LDST32_LO12,
5366 BFD_RELOC_AARCH64_LDST64_LO12,
5367 BFD_RELOC_AARCH64_LDST128_LO12
5368 },
5369 {
5370 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5371 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5372 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5373 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5374 BFD_RELOC_AARCH64_NONE
5375 },
5376 {
5377 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5378 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5379 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5380 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5381 BFD_RELOC_AARCH64_NONE
5382 },
5383 {
5384 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5385 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5386 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5387 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5388 BFD_RELOC_AARCH64_NONE
5389 },
5390 {
5391 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5392 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5393 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5394 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5395 BFD_RELOC_AARCH64_NONE
5396 }
5397 };
5398
5399 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5400 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5401 || (inst.reloc.type
5402 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5403 || (inst.reloc.type
5404 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5405 || (inst.reloc.type
5406 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5407 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5408
5409 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5410 opd1_qlf =
5411 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5412 1, opd0_qlf, 0);
5413 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5414
5415 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5416 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5417 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5418 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5419 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5420 gas_assert (logsz <= 3);
5421 else
5422 gas_assert (logsz <= 4);
5423
5424 /* In reloc.c, these pseudo relocation types should be defined in similar
5425 order as above reloc_ldst_lo12 array. Because the array index calculation
5426 below relies on this. */
5427 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5428 }
5429
5430 /* Check whether a register list REGINFO is valid. The registers must be
5431 numbered in increasing order (modulo 32), in increments of one or two.
5432
5433 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5434 increments of two.
5435
5436 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5437
5438 static bfd_boolean
5439 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5440 {
5441 uint32_t i, nb_regs, prev_regno, incr;
5442
5443 nb_regs = 1 + (reginfo & 0x3);
5444 reginfo >>= 2;
5445 prev_regno = reginfo & 0x1f;
5446 incr = accept_alternate ? 2 : 1;
5447
5448 for (i = 1; i < nb_regs; ++i)
5449 {
5450 uint32_t curr_regno;
5451 reginfo >>= 5;
5452 curr_regno = reginfo & 0x1f;
5453 if (curr_regno != ((prev_regno + incr) & 0x1f))
5454 return FALSE;
5455 prev_regno = curr_regno;
5456 }
5457
5458 return TRUE;
5459 }
5460
5461 /* Generic instruction operand parser. This does no encoding and no
5462 semantic validation; it merely squirrels values away in the inst
5463 structure. Returns TRUE or FALSE depending on whether the
5464 specified grammar matched. */
5465
5466 static bfd_boolean
5467 parse_operands (char *str, const aarch64_opcode *opcode)
5468 {
5469 int i;
5470 char *backtrack_pos = 0;
5471 const enum aarch64_opnd *operands = opcode->operands;
5472 aarch64_reg_type imm_reg_type;
5473
5474 clear_error ();
5475 skip_whitespace (str);
5476
5477 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5478 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5479 else
5480 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5481
5482 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5483 {
5484 int64_t val;
5485 const reg_entry *reg;
5486 int comma_skipped_p = 0;
5487 aarch64_reg_type rtype;
5488 struct vector_type_el vectype;
5489 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5490 aarch64_opnd_info *info = &inst.base.operands[i];
5491 aarch64_reg_type reg_type;
5492
5493 DEBUG_TRACE ("parse operand %d", i);
5494
5495 /* Assign the operand code. */
5496 info->type = operands[i];
5497
5498 if (optional_operand_p (opcode, i))
5499 {
5500 /* Remember where we are in case we need to backtrack. */
5501 gas_assert (!backtrack_pos);
5502 backtrack_pos = str;
5503 }
5504
5505 /* Expect comma between operands; the backtrack mechanism will take
5506 care of cases of omitted optional operand. */
5507 if (i > 0 && ! skip_past_char (&str, ','))
5508 {
5509 set_syntax_error (_("comma expected between operands"));
5510 goto failure;
5511 }
5512 else
5513 comma_skipped_p = 1;
5514
5515 switch (operands[i])
5516 {
5517 case AARCH64_OPND_Rd:
5518 case AARCH64_OPND_Rn:
5519 case AARCH64_OPND_Rm:
5520 case AARCH64_OPND_Rt:
5521 case AARCH64_OPND_Rt2:
5522 case AARCH64_OPND_Rs:
5523 case AARCH64_OPND_Ra:
5524 case AARCH64_OPND_Rt_SYS:
5525 case AARCH64_OPND_PAIRREG:
5526 case AARCH64_OPND_SVE_Rm:
5527 po_int_reg_or_fail (REG_TYPE_R_Z);
5528 break;
5529
5530 case AARCH64_OPND_Rd_SP:
5531 case AARCH64_OPND_Rn_SP:
5532 case AARCH64_OPND_Rt_SP:
5533 case AARCH64_OPND_SVE_Rn_SP:
5534 case AARCH64_OPND_Rm_SP:
5535 po_int_reg_or_fail (REG_TYPE_R_SP);
5536 break;
5537
5538 case AARCH64_OPND_Rm_EXT:
5539 case AARCH64_OPND_Rm_SFT:
5540 po_misc_or_fail (parse_shifter_operand
5541 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5542 ? SHIFTED_ARITH_IMM
5543 : SHIFTED_LOGIC_IMM)));
5544 if (!info->shifter.operator_present)
5545 {
5546 /* Default to LSL if not present. Libopcodes prefers shifter
5547 kind to be explicit. */
5548 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5549 info->shifter.kind = AARCH64_MOD_LSL;
5550 /* For Rm_EXT, libopcodes will carry out further check on whether
5551 or not stack pointer is used in the instruction (Recall that
5552 "the extend operator is not optional unless at least one of
5553 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5554 }
5555 break;
5556
5557 case AARCH64_OPND_Fd:
5558 case AARCH64_OPND_Fn:
5559 case AARCH64_OPND_Fm:
5560 case AARCH64_OPND_Fa:
5561 case AARCH64_OPND_Ft:
5562 case AARCH64_OPND_Ft2:
5563 case AARCH64_OPND_Sd:
5564 case AARCH64_OPND_Sn:
5565 case AARCH64_OPND_Sm:
5566 case AARCH64_OPND_SVE_VZn:
5567 case AARCH64_OPND_SVE_Vd:
5568 case AARCH64_OPND_SVE_Vm:
5569 case AARCH64_OPND_SVE_Vn:
5570 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5571 if (val == PARSE_FAIL)
5572 {
5573 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5574 goto failure;
5575 }
5576 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5577
5578 info->reg.regno = val;
5579 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5580 break;
5581
5582 case AARCH64_OPND_SVE_Pd:
5583 case AARCH64_OPND_SVE_Pg3:
5584 case AARCH64_OPND_SVE_Pg4_5:
5585 case AARCH64_OPND_SVE_Pg4_10:
5586 case AARCH64_OPND_SVE_Pg4_16:
5587 case AARCH64_OPND_SVE_Pm:
5588 case AARCH64_OPND_SVE_Pn:
5589 case AARCH64_OPND_SVE_Pt:
5590 reg_type = REG_TYPE_PN;
5591 goto vector_reg;
5592
5593 case AARCH64_OPND_SVE_Za_5:
5594 case AARCH64_OPND_SVE_Za_16:
5595 case AARCH64_OPND_SVE_Zd:
5596 case AARCH64_OPND_SVE_Zm_5:
5597 case AARCH64_OPND_SVE_Zm_16:
5598 case AARCH64_OPND_SVE_Zn:
5599 case AARCH64_OPND_SVE_Zt:
5600 reg_type = REG_TYPE_ZN;
5601 goto vector_reg;
5602
5603 case AARCH64_OPND_Va:
5604 case AARCH64_OPND_Vd:
5605 case AARCH64_OPND_Vn:
5606 case AARCH64_OPND_Vm:
5607 reg_type = REG_TYPE_VN;
5608 vector_reg:
5609 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5610 if (val == PARSE_FAIL)
5611 {
5612 first_error (_(get_reg_expected_msg (reg_type)));
5613 goto failure;
5614 }
5615 if (vectype.defined & NTA_HASINDEX)
5616 goto failure;
5617
5618 info->reg.regno = val;
5619 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5620 && vectype.type == NT_invtype)
5621 /* Unqualified Pn and Zn registers are allowed in certain
5622 contexts. Rely on F_STRICT qualifier checking to catch
5623 invalid uses. */
5624 info->qualifier = AARCH64_OPND_QLF_NIL;
5625 else
5626 {
5627 info->qualifier = vectype_to_qualifier (&vectype);
5628 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5629 goto failure;
5630 }
5631 break;
5632
5633 case AARCH64_OPND_VdD1:
5634 case AARCH64_OPND_VnD1:
5635 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5636 if (val == PARSE_FAIL)
5637 {
5638 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5639 goto failure;
5640 }
5641 if (vectype.type != NT_d || vectype.index != 1)
5642 {
5643 set_fatal_syntax_error
5644 (_("the top half of a 128-bit FP/SIMD register is expected"));
5645 goto failure;
5646 }
5647 info->reg.regno = val;
5648 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5649 here; it is correct for the purpose of encoding/decoding since
5650 only the register number is explicitly encoded in the related
5651 instructions, although this appears a bit hacky. */
5652 info->qualifier = AARCH64_OPND_QLF_S_D;
5653 break;
5654
5655 case AARCH64_OPND_SVE_Zm3_INDEX:
5656 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5657 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5658 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5659 case AARCH64_OPND_SVE_Zm4_INDEX:
5660 case AARCH64_OPND_SVE_Zn_INDEX:
5661 reg_type = REG_TYPE_ZN;
5662 goto vector_reg_index;
5663
5664 case AARCH64_OPND_Ed:
5665 case AARCH64_OPND_En:
5666 case AARCH64_OPND_Em:
5667 case AARCH64_OPND_Em16:
5668 case AARCH64_OPND_SM3_IMM2:
5669 reg_type = REG_TYPE_VN;
5670 vector_reg_index:
5671 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5672 if (val == PARSE_FAIL)
5673 {
5674 first_error (_(get_reg_expected_msg (reg_type)));
5675 goto failure;
5676 }
5677 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5678 goto failure;
5679
5680 info->reglane.regno = val;
5681 info->reglane.index = vectype.index;
5682 info->qualifier = vectype_to_qualifier (&vectype);
5683 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5684 goto failure;
5685 break;
5686
5687 case AARCH64_OPND_SVE_ZnxN:
5688 case AARCH64_OPND_SVE_ZtxN:
5689 reg_type = REG_TYPE_ZN;
5690 goto vector_reg_list;
5691
5692 case AARCH64_OPND_LVn:
5693 case AARCH64_OPND_LVt:
5694 case AARCH64_OPND_LVt_AL:
5695 case AARCH64_OPND_LEt:
5696 reg_type = REG_TYPE_VN;
5697 vector_reg_list:
5698 if (reg_type == REG_TYPE_ZN
5699 && get_opcode_dependent_value (opcode) == 1
5700 && *str != '{')
5701 {
5702 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5703 if (val == PARSE_FAIL)
5704 {
5705 first_error (_(get_reg_expected_msg (reg_type)));
5706 goto failure;
5707 }
5708 info->reglist.first_regno = val;
5709 info->reglist.num_regs = 1;
5710 }
5711 else
5712 {
5713 val = parse_vector_reg_list (&str, reg_type, &vectype);
5714 if (val == PARSE_FAIL)
5715 goto failure;
5716 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5717 {
5718 set_fatal_syntax_error (_("invalid register list"));
5719 goto failure;
5720 }
5721 info->reglist.first_regno = (val >> 2) & 0x1f;
5722 info->reglist.num_regs = (val & 0x3) + 1;
5723 }
5724 if (operands[i] == AARCH64_OPND_LEt)
5725 {
5726 if (!(vectype.defined & NTA_HASINDEX))
5727 goto failure;
5728 info->reglist.has_index = 1;
5729 info->reglist.index = vectype.index;
5730 }
5731 else
5732 {
5733 if (vectype.defined & NTA_HASINDEX)
5734 goto failure;
5735 if (!(vectype.defined & NTA_HASTYPE))
5736 {
5737 if (reg_type == REG_TYPE_ZN)
5738 set_fatal_syntax_error (_("missing type suffix"));
5739 goto failure;
5740 }
5741 }
5742 info->qualifier = vectype_to_qualifier (&vectype);
5743 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5744 goto failure;
5745 break;
5746
5747 case AARCH64_OPND_CRn:
5748 case AARCH64_OPND_CRm:
5749 {
5750 char prefix = *(str++);
5751 if (prefix != 'c' && prefix != 'C')
5752 goto failure;
5753
5754 po_imm_nc_or_fail ();
5755 if (val > 15)
5756 {
5757 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5758 goto failure;
5759 }
5760 info->qualifier = AARCH64_OPND_QLF_CR;
5761 info->imm.value = val;
5762 break;
5763 }
5764
5765 case AARCH64_OPND_SHLL_IMM:
5766 case AARCH64_OPND_IMM_VLSR:
5767 po_imm_or_fail (1, 64);
5768 info->imm.value = val;
5769 break;
5770
5771 case AARCH64_OPND_CCMP_IMM:
5772 case AARCH64_OPND_SIMM5:
5773 case AARCH64_OPND_FBITS:
5774 case AARCH64_OPND_TME_UIMM16:
5775 case AARCH64_OPND_UIMM4:
5776 case AARCH64_OPND_UIMM4_ADDG:
5777 case AARCH64_OPND_UIMM10:
5778 case AARCH64_OPND_UIMM3_OP1:
5779 case AARCH64_OPND_UIMM3_OP2:
5780 case AARCH64_OPND_IMM_VLSL:
5781 case AARCH64_OPND_IMM:
5782 case AARCH64_OPND_IMM_2:
5783 case AARCH64_OPND_WIDTH:
5784 case AARCH64_OPND_SVE_INV_LIMM:
5785 case AARCH64_OPND_SVE_LIMM:
5786 case AARCH64_OPND_SVE_LIMM_MOV:
5787 case AARCH64_OPND_SVE_SHLIMM_PRED:
5788 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5789 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5790 case AARCH64_OPND_SVE_SHRIMM_PRED:
5791 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5792 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5793 case AARCH64_OPND_SVE_SIMM5:
5794 case AARCH64_OPND_SVE_SIMM5B:
5795 case AARCH64_OPND_SVE_SIMM6:
5796 case AARCH64_OPND_SVE_SIMM8:
5797 case AARCH64_OPND_SVE_UIMM3:
5798 case AARCH64_OPND_SVE_UIMM7:
5799 case AARCH64_OPND_SVE_UIMM8:
5800 case AARCH64_OPND_SVE_UIMM8_53:
5801 case AARCH64_OPND_IMM_ROT1:
5802 case AARCH64_OPND_IMM_ROT2:
5803 case AARCH64_OPND_IMM_ROT3:
5804 case AARCH64_OPND_SVE_IMM_ROT1:
5805 case AARCH64_OPND_SVE_IMM_ROT2:
5806 case AARCH64_OPND_SVE_IMM_ROT3:
5807 po_imm_nc_or_fail ();
5808 info->imm.value = val;
5809 break;
5810
5811 case AARCH64_OPND_SVE_AIMM:
5812 case AARCH64_OPND_SVE_ASIMM:
5813 po_imm_nc_or_fail ();
5814 info->imm.value = val;
5815 skip_whitespace (str);
5816 if (skip_past_comma (&str))
5817 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5818 else
5819 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5820 break;
5821
5822 case AARCH64_OPND_SVE_PATTERN:
5823 po_enum_or_fail (aarch64_sve_pattern_array);
5824 info->imm.value = val;
5825 break;
5826
5827 case AARCH64_OPND_SVE_PATTERN_SCALED:
5828 po_enum_or_fail (aarch64_sve_pattern_array);
5829 info->imm.value = val;
5830 if (skip_past_comma (&str)
5831 && !parse_shift (&str, info, SHIFTED_MUL))
5832 goto failure;
5833 if (!info->shifter.operator_present)
5834 {
5835 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5836 info->shifter.kind = AARCH64_MOD_MUL;
5837 info->shifter.amount = 1;
5838 }
5839 break;
5840
5841 case AARCH64_OPND_SVE_PRFOP:
5842 po_enum_or_fail (aarch64_sve_prfop_array);
5843 info->imm.value = val;
5844 break;
5845
5846 case AARCH64_OPND_UIMM7:
5847 po_imm_or_fail (0, 127);
5848 info->imm.value = val;
5849 break;
5850
5851 case AARCH64_OPND_IDX:
5852 case AARCH64_OPND_MASK:
5853 case AARCH64_OPND_BIT_NUM:
5854 case AARCH64_OPND_IMMR:
5855 case AARCH64_OPND_IMMS:
5856 po_imm_or_fail (0, 63);
5857 info->imm.value = val;
5858 break;
5859
5860 case AARCH64_OPND_IMM0:
5861 po_imm_nc_or_fail ();
5862 if (val != 0)
5863 {
5864 set_fatal_syntax_error (_("immediate zero expected"));
5865 goto failure;
5866 }
5867 info->imm.value = 0;
5868 break;
5869
5870 case AARCH64_OPND_FPIMM0:
5871 {
5872 int qfloat;
5873 bfd_boolean res1 = FALSE, res2 = FALSE;
5874 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5875 it is probably not worth the effort to support it. */
5876 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5877 imm_reg_type))
5878 && (error_p ()
5879 || !(res2 = parse_constant_immediate (&str, &val,
5880 imm_reg_type))))
5881 goto failure;
5882 if ((res1 && qfloat == 0) || (res2 && val == 0))
5883 {
5884 info->imm.value = 0;
5885 info->imm.is_fp = 1;
5886 break;
5887 }
5888 set_fatal_syntax_error (_("immediate zero expected"));
5889 goto failure;
5890 }
5891
5892 case AARCH64_OPND_IMM_MOV:
5893 {
5894 char *saved = str;
5895 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5896 reg_name_p (str, REG_TYPE_VN))
5897 goto failure;
5898 str = saved;
5899 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5900 GE_OPT_PREFIX, 1));
5901 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5902 later. fix_mov_imm_insn will try to determine a machine
5903 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5904 message if the immediate cannot be moved by a single
5905 instruction. */
5906 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5907 inst.base.operands[i].skip = 1;
5908 }
5909 break;
5910
5911 case AARCH64_OPND_SIMD_IMM:
5912 case AARCH64_OPND_SIMD_IMM_SFT:
5913 if (! parse_big_immediate (&str, &val, imm_reg_type))
5914 goto failure;
5915 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5916 /* addr_off_p */ 0,
5917 /* need_libopcodes_p */ 1,
5918 /* skip_p */ 1);
5919 /* Parse shift.
5920 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5921 shift, we don't check it here; we leave the checking to
5922 the libopcodes (operand_general_constraint_met_p). By
5923 doing this, we achieve better diagnostics. */
5924 if (skip_past_comma (&str)
5925 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5926 goto failure;
5927 if (!info->shifter.operator_present
5928 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5929 {
5930 /* Default to LSL if not present. Libopcodes prefers shifter
5931 kind to be explicit. */
5932 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5933 info->shifter.kind = AARCH64_MOD_LSL;
5934 }
5935 break;
5936
5937 case AARCH64_OPND_FPIMM:
5938 case AARCH64_OPND_SIMD_FPIMM:
5939 case AARCH64_OPND_SVE_FPIMM8:
5940 {
5941 int qfloat;
5942 bfd_boolean dp_p;
5943
5944 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5945 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5946 || !aarch64_imm_float_p (qfloat))
5947 {
5948 if (!error_p ())
5949 set_fatal_syntax_error (_("invalid floating-point"
5950 " constant"));
5951 goto failure;
5952 }
5953 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5954 inst.base.operands[i].imm.is_fp = 1;
5955 }
5956 break;
5957
5958 case AARCH64_OPND_SVE_I1_HALF_ONE:
5959 case AARCH64_OPND_SVE_I1_HALF_TWO:
5960 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5961 {
5962 int qfloat;
5963 bfd_boolean dp_p;
5964
5965 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5966 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5967 {
5968 if (!error_p ())
5969 set_fatal_syntax_error (_("invalid floating-point"
5970 " constant"));
5971 goto failure;
5972 }
5973 inst.base.operands[i].imm.value = qfloat;
5974 inst.base.operands[i].imm.is_fp = 1;
5975 }
5976 break;
5977
5978 case AARCH64_OPND_LIMM:
5979 po_misc_or_fail (parse_shifter_operand (&str, info,
5980 SHIFTED_LOGIC_IMM));
5981 if (info->shifter.operator_present)
5982 {
5983 set_fatal_syntax_error
5984 (_("shift not allowed for bitmask immediate"));
5985 goto failure;
5986 }
5987 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5988 /* addr_off_p */ 0,
5989 /* need_libopcodes_p */ 1,
5990 /* skip_p */ 1);
5991 break;
5992
5993 case AARCH64_OPND_AIMM:
5994 if (opcode->op == OP_ADD)
5995 /* ADD may have relocation types. */
5996 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5997 SHIFTED_ARITH_IMM));
5998 else
5999 po_misc_or_fail (parse_shifter_operand (&str, info,
6000 SHIFTED_ARITH_IMM));
6001 switch (inst.reloc.type)
6002 {
6003 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6004 info->shifter.amount = 12;
6005 break;
6006 case BFD_RELOC_UNUSED:
6007 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6008 if (info->shifter.kind != AARCH64_MOD_NONE)
6009 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6010 inst.reloc.pc_rel = 0;
6011 break;
6012 default:
6013 break;
6014 }
6015 info->imm.value = 0;
6016 if (!info->shifter.operator_present)
6017 {
6018 /* Default to LSL if not present. Libopcodes prefers shifter
6019 kind to be explicit. */
6020 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6021 info->shifter.kind = AARCH64_MOD_LSL;
6022 }
6023 break;
6024
6025 case AARCH64_OPND_HALF:
6026 {
6027 /* #<imm16> or relocation. */
6028 int internal_fixup_p;
6029 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6030 if (internal_fixup_p)
6031 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6032 skip_whitespace (str);
6033 if (skip_past_comma (&str))
6034 {
6035 /* {, LSL #<shift>} */
6036 if (! aarch64_gas_internal_fixup_p ())
6037 {
6038 set_fatal_syntax_error (_("can't mix relocation modifier "
6039 "with explicit shift"));
6040 goto failure;
6041 }
6042 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6043 }
6044 else
6045 inst.base.operands[i].shifter.amount = 0;
6046 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6047 inst.base.operands[i].imm.value = 0;
6048 if (! process_movw_reloc_info ())
6049 goto failure;
6050 }
6051 break;
6052
6053 case AARCH64_OPND_EXCEPTION:
6054 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6055 imm_reg_type));
6056 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6057 /* addr_off_p */ 0,
6058 /* need_libopcodes_p */ 0,
6059 /* skip_p */ 1);
6060 break;
6061
6062 case AARCH64_OPND_NZCV:
6063 {
6064 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6065 if (nzcv != NULL)
6066 {
6067 str += 4;
6068 info->imm.value = nzcv->value;
6069 break;
6070 }
6071 po_imm_or_fail (0, 15);
6072 info->imm.value = val;
6073 }
6074 break;
6075
6076 case AARCH64_OPND_COND:
6077 case AARCH64_OPND_COND1:
6078 {
6079 char *start = str;
6080 do
6081 str++;
6082 while (ISALPHA (*str));
6083 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6084 if (info->cond == NULL)
6085 {
6086 set_syntax_error (_("invalid condition"));
6087 goto failure;
6088 }
6089 else if (operands[i] == AARCH64_OPND_COND1
6090 && (info->cond->value & 0xe) == 0xe)
6091 {
6092 /* Do not allow AL or NV. */
6093 set_default_error ();
6094 goto failure;
6095 }
6096 }
6097 break;
6098
6099 case AARCH64_OPND_ADDR_ADRP:
6100 po_misc_or_fail (parse_adrp (&str));
6101 /* Clear the value as operand needs to be relocated. */
6102 info->imm.value = 0;
6103 break;
6104
6105 case AARCH64_OPND_ADDR_PCREL14:
6106 case AARCH64_OPND_ADDR_PCREL19:
6107 case AARCH64_OPND_ADDR_PCREL21:
6108 case AARCH64_OPND_ADDR_PCREL26:
6109 po_misc_or_fail (parse_address (&str, info));
6110 if (!info->addr.pcrel)
6111 {
6112 set_syntax_error (_("invalid pc-relative address"));
6113 goto failure;
6114 }
6115 if (inst.gen_lit_pool
6116 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6117 {
6118 /* Only permit "=value" in the literal load instructions.
6119 The literal will be generated by programmer_friendly_fixup. */
6120 set_syntax_error (_("invalid use of \"=immediate\""));
6121 goto failure;
6122 }
6123 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6124 {
6125 set_syntax_error (_("unrecognized relocation suffix"));
6126 goto failure;
6127 }
6128 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6129 {
6130 info->imm.value = inst.reloc.exp.X_add_number;
6131 inst.reloc.type = BFD_RELOC_UNUSED;
6132 }
6133 else
6134 {
6135 info->imm.value = 0;
6136 if (inst.reloc.type == BFD_RELOC_UNUSED)
6137 switch (opcode->iclass)
6138 {
6139 case compbranch:
6140 case condbranch:
6141 /* e.g. CBZ or B.COND */
6142 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6143 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6144 break;
6145 case testbranch:
6146 /* e.g. TBZ */
6147 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6148 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6149 break;
6150 case branch_imm:
6151 /* e.g. B or BL */
6152 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6153 inst.reloc.type =
6154 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6155 : BFD_RELOC_AARCH64_JUMP26;
6156 break;
6157 case loadlit:
6158 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6159 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6160 break;
6161 case pcreladdr:
6162 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6163 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6164 break;
6165 default:
6166 gas_assert (0);
6167 abort ();
6168 }
6169 inst.reloc.pc_rel = 1;
6170 }
6171 break;
6172
6173 case AARCH64_OPND_ADDR_SIMPLE:
6174 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6175 {
6176 /* [<Xn|SP>{, #<simm>}] */
6177 char *start = str;
6178 /* First use the normal address-parsing routines, to get
6179 the usual syntax errors. */
6180 po_misc_or_fail (parse_address (&str, info));
6181 if (info->addr.pcrel || info->addr.offset.is_reg
6182 || !info->addr.preind || info->addr.postind
6183 || info->addr.writeback)
6184 {
6185 set_syntax_error (_("invalid addressing mode"));
6186 goto failure;
6187 }
6188
6189 /* Then retry, matching the specific syntax of these addresses. */
6190 str = start;
6191 po_char_or_fail ('[');
6192 po_reg_or_fail (REG_TYPE_R64_SP);
6193 /* Accept optional ", #0". */
6194 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6195 && skip_past_char (&str, ','))
6196 {
6197 skip_past_char (&str, '#');
6198 if (! skip_past_char (&str, '0'))
6199 {
6200 set_fatal_syntax_error
6201 (_("the optional immediate offset can only be 0"));
6202 goto failure;
6203 }
6204 }
6205 po_char_or_fail (']');
6206 break;
6207 }
6208
6209 case AARCH64_OPND_ADDR_REGOFF:
6210 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6211 po_misc_or_fail (parse_address (&str, info));
6212 regoff_addr:
6213 if (info->addr.pcrel || !info->addr.offset.is_reg
6214 || !info->addr.preind || info->addr.postind
6215 || info->addr.writeback)
6216 {
6217 set_syntax_error (_("invalid addressing mode"));
6218 goto failure;
6219 }
6220 if (!info->shifter.operator_present)
6221 {
6222 /* Default to LSL if not present. Libopcodes prefers shifter
6223 kind to be explicit. */
6224 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6225 info->shifter.kind = AARCH64_MOD_LSL;
6226 }
6227 /* Qualifier to be deduced by libopcodes. */
6228 break;
6229
6230 case AARCH64_OPND_ADDR_SIMM7:
6231 po_misc_or_fail (parse_address (&str, info));
6232 if (info->addr.pcrel || info->addr.offset.is_reg
6233 || (!info->addr.preind && !info->addr.postind))
6234 {
6235 set_syntax_error (_("invalid addressing mode"));
6236 goto failure;
6237 }
6238 if (inst.reloc.type != BFD_RELOC_UNUSED)
6239 {
6240 set_syntax_error (_("relocation not allowed"));
6241 goto failure;
6242 }
6243 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6244 /* addr_off_p */ 1,
6245 /* need_libopcodes_p */ 1,
6246 /* skip_p */ 0);
6247 break;
6248
6249 case AARCH64_OPND_ADDR_SIMM9:
6250 case AARCH64_OPND_ADDR_SIMM9_2:
6251 case AARCH64_OPND_ADDR_SIMM11:
6252 case AARCH64_OPND_ADDR_SIMM13:
6253 po_misc_or_fail (parse_address (&str, info));
6254 if (info->addr.pcrel || info->addr.offset.is_reg
6255 || (!info->addr.preind && !info->addr.postind)
6256 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6257 && info->addr.writeback))
6258 {
6259 set_syntax_error (_("invalid addressing mode"));
6260 goto failure;
6261 }
6262 if (inst.reloc.type != BFD_RELOC_UNUSED)
6263 {
6264 set_syntax_error (_("relocation not allowed"));
6265 goto failure;
6266 }
6267 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6268 /* addr_off_p */ 1,
6269 /* need_libopcodes_p */ 1,
6270 /* skip_p */ 0);
6271 break;
6272
6273 case AARCH64_OPND_ADDR_SIMM10:
6274 case AARCH64_OPND_ADDR_OFFSET:
6275 po_misc_or_fail (parse_address (&str, info));
6276 if (info->addr.pcrel || info->addr.offset.is_reg
6277 || !info->addr.preind || info->addr.postind)
6278 {
6279 set_syntax_error (_("invalid addressing mode"));
6280 goto failure;
6281 }
6282 if (inst.reloc.type != BFD_RELOC_UNUSED)
6283 {
6284 set_syntax_error (_("relocation not allowed"));
6285 goto failure;
6286 }
6287 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6288 /* addr_off_p */ 1,
6289 /* need_libopcodes_p */ 1,
6290 /* skip_p */ 0);
6291 break;
6292
6293 case AARCH64_OPND_ADDR_UIMM12:
6294 po_misc_or_fail (parse_address (&str, info));
6295 if (info->addr.pcrel || info->addr.offset.is_reg
6296 || !info->addr.preind || info->addr.writeback)
6297 {
6298 set_syntax_error (_("invalid addressing mode"));
6299 goto failure;
6300 }
6301 if (inst.reloc.type == BFD_RELOC_UNUSED)
6302 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6303 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6304 || (inst.reloc.type
6305 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6306 || (inst.reloc.type
6307 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6308 || (inst.reloc.type
6309 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6310 || (inst.reloc.type
6311 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6312 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6313 /* Leave qualifier to be determined by libopcodes. */
6314 break;
6315
6316 case AARCH64_OPND_SIMD_ADDR_POST:
6317 /* [<Xn|SP>], <Xm|#<amount>> */
6318 po_misc_or_fail (parse_address (&str, info));
6319 if (!info->addr.postind || !info->addr.writeback)
6320 {
6321 set_syntax_error (_("invalid addressing mode"));
6322 goto failure;
6323 }
6324 if (!info->addr.offset.is_reg)
6325 {
6326 if (inst.reloc.exp.X_op == O_constant)
6327 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6328 else
6329 {
6330 set_fatal_syntax_error
6331 (_("writeback value must be an immediate constant"));
6332 goto failure;
6333 }
6334 }
6335 /* No qualifier. */
6336 break;
6337
6338 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6339 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6340 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6341 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6342 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6343 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6344 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6345 case AARCH64_OPND_SVE_ADDR_RI_U6:
6346 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6347 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6348 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6349 /* [X<n>{, #imm, MUL VL}]
6350 [X<n>{, #imm}]
6351 but recognizing SVE registers. */
6352 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6353 &offset_qualifier));
6354 if (base_qualifier != AARCH64_OPND_QLF_X)
6355 {
6356 set_syntax_error (_("invalid addressing mode"));
6357 goto failure;
6358 }
6359 sve_regimm:
6360 if (info->addr.pcrel || info->addr.offset.is_reg
6361 || !info->addr.preind || info->addr.writeback)
6362 {
6363 set_syntax_error (_("invalid addressing mode"));
6364 goto failure;
6365 }
6366 if (inst.reloc.type != BFD_RELOC_UNUSED
6367 || inst.reloc.exp.X_op != O_constant)
6368 {
6369 /* Make sure this has priority over
6370 "invalid addressing mode". */
6371 set_fatal_syntax_error (_("constant offset required"));
6372 goto failure;
6373 }
6374 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6375 break;
6376
6377 case AARCH64_OPND_SVE_ADDR_R:
6378 /* [<Xn|SP>{, <R><m>}]
6379 but recognizing SVE registers. */
6380 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6381 &offset_qualifier));
6382 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6383 {
6384 offset_qualifier = AARCH64_OPND_QLF_X;
6385 info->addr.offset.is_reg = 1;
6386 info->addr.offset.regno = 31;
6387 }
6388 else if (base_qualifier != AARCH64_OPND_QLF_X
6389 || offset_qualifier != AARCH64_OPND_QLF_X)
6390 {
6391 set_syntax_error (_("invalid addressing mode"));
6392 goto failure;
6393 }
6394 goto regoff_addr;
6395
6396 case AARCH64_OPND_SVE_ADDR_RR:
6397 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6398 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6399 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6400 case AARCH64_OPND_SVE_ADDR_RX:
6401 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6402 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6403 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6404 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6405 but recognizing SVE registers. */
6406 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6407 &offset_qualifier));
6408 if (base_qualifier != AARCH64_OPND_QLF_X
6409 || offset_qualifier != AARCH64_OPND_QLF_X)
6410 {
6411 set_syntax_error (_("invalid addressing mode"));
6412 goto failure;
6413 }
6414 goto regoff_addr;
6415
6416 case AARCH64_OPND_SVE_ADDR_RZ:
6417 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6418 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6419 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6420 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6421 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6422 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6423 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6424 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6425 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6426 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6427 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6428 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6429 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6430 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6431 &offset_qualifier));
6432 if (base_qualifier != AARCH64_OPND_QLF_X
6433 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6434 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6435 {
6436 set_syntax_error (_("invalid addressing mode"));
6437 goto failure;
6438 }
6439 info->qualifier = offset_qualifier;
6440 goto regoff_addr;
6441
6442 case AARCH64_OPND_SVE_ADDR_ZX:
6443 /* [Zn.<T>{, <Xm>}]. */
6444 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6445 &offset_qualifier));
6446 /* Things to check:
6447 base_qualifier either S_S or S_D
6448 offset_qualifier must be X
6449 */
6450 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6451 && base_qualifier != AARCH64_OPND_QLF_S_D)
6452 || offset_qualifier != AARCH64_OPND_QLF_X)
6453 {
6454 set_syntax_error (_("invalid addressing mode"));
6455 goto failure;
6456 }
6457 info->qualifier = base_qualifier;
6458 if (!info->addr.offset.is_reg || info->addr.pcrel
6459 || !info->addr.preind || info->addr.writeback
6460 || info->shifter.operator_present != 0)
6461 {
6462 set_syntax_error (_("invalid addressing mode"));
6463 goto failure;
6464 }
6465 info->shifter.kind = AARCH64_MOD_LSL;
6466 break;
6467
6468
6469 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6470 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6471 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6472 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6473 /* [Z<n>.<T>{, #imm}] */
6474 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6475 &offset_qualifier));
6476 if (base_qualifier != AARCH64_OPND_QLF_S_S
6477 && base_qualifier != AARCH64_OPND_QLF_S_D)
6478 {
6479 set_syntax_error (_("invalid addressing mode"));
6480 goto failure;
6481 }
6482 info->qualifier = base_qualifier;
6483 goto sve_regimm;
6484
6485 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6486 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6487 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6488 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6489 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6490
6491 We don't reject:
6492
6493 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6494
6495 here since we get better error messages by leaving it to
6496 the qualifier checking routines. */
6497 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6498 &offset_qualifier));
6499 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6500 && base_qualifier != AARCH64_OPND_QLF_S_D)
6501 || offset_qualifier != base_qualifier)
6502 {
6503 set_syntax_error (_("invalid addressing mode"));
6504 goto failure;
6505 }
6506 info->qualifier = base_qualifier;
6507 goto regoff_addr;
6508
6509 case AARCH64_OPND_SYSREG:
6510 {
6511 uint32_t sysreg_flags;
6512 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6513 &sysreg_flags)) == PARSE_FAIL)
6514 {
6515 set_syntax_error (_("unknown or missing system register name"));
6516 goto failure;
6517 }
6518 inst.base.operands[i].sysreg.value = val;
6519 inst.base.operands[i].sysreg.flags = sysreg_flags;
6520 break;
6521 }
6522
6523 case AARCH64_OPND_PSTATEFIELD:
6524 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6525 == PARSE_FAIL)
6526 {
6527 set_syntax_error (_("unknown or missing PSTATE field name"));
6528 goto failure;
6529 }
6530 inst.base.operands[i].pstatefield = val;
6531 break;
6532
6533 case AARCH64_OPND_SYSREG_IC:
6534 inst.base.operands[i].sysins_op =
6535 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6536 goto sys_reg_ins;
6537
6538 case AARCH64_OPND_SYSREG_DC:
6539 inst.base.operands[i].sysins_op =
6540 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6541 goto sys_reg_ins;
6542
6543 case AARCH64_OPND_SYSREG_AT:
6544 inst.base.operands[i].sysins_op =
6545 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6546 goto sys_reg_ins;
6547
6548 case AARCH64_OPND_SYSREG_SR:
6549 inst.base.operands[i].sysins_op =
6550 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6551 goto sys_reg_ins;
6552
6553 case AARCH64_OPND_SYSREG_TLBI:
6554 inst.base.operands[i].sysins_op =
6555 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6556 sys_reg_ins:
6557 if (inst.base.operands[i].sysins_op == NULL)
6558 {
6559 set_fatal_syntax_error ( _("unknown or missing operation name"));
6560 goto failure;
6561 }
6562 break;
6563
6564 case AARCH64_OPND_BARRIER:
6565 case AARCH64_OPND_BARRIER_ISB:
6566 val = parse_barrier (&str);
6567 if (val != PARSE_FAIL
6568 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6569 {
6570 /* ISB only accepts options name 'sy'. */
6571 set_syntax_error
6572 (_("the specified option is not accepted in ISB"));
6573 /* Turn off backtrack as this optional operand is present. */
6574 backtrack_pos = 0;
6575 goto failure;
6576 }
6577 /* This is an extension to accept a 0..15 immediate. */
6578 if (val == PARSE_FAIL)
6579 po_imm_or_fail (0, 15);
6580 info->barrier = aarch64_barrier_options + val;
6581 break;
6582
6583 case AARCH64_OPND_PRFOP:
6584 val = parse_pldop (&str);
6585 /* This is an extension to accept a 0..31 immediate. */
6586 if (val == PARSE_FAIL)
6587 po_imm_or_fail (0, 31);
6588 inst.base.operands[i].prfop = aarch64_prfops + val;
6589 break;
6590
6591 case AARCH64_OPND_BARRIER_PSB:
6592 val = parse_barrier_psb (&str, &(info->hint_option));
6593 if (val == PARSE_FAIL)
6594 goto failure;
6595 break;
6596
6597 case AARCH64_OPND_BTI_TARGET:
6598 val = parse_bti_operand (&str, &(info->hint_option));
6599 if (val == PARSE_FAIL)
6600 goto failure;
6601 break;
6602
6603 default:
6604 as_fatal (_("unhandled operand code %d"), operands[i]);
6605 }
6606
6607 /* If we get here, this operand was successfully parsed. */
6608 inst.base.operands[i].present = 1;
6609 continue;
6610
6611 failure:
6612 /* The parse routine should already have set the error, but in case
6613 not, set a default one here. */
6614 if (! error_p ())
6615 set_default_error ();
6616
6617 if (! backtrack_pos)
6618 goto parse_operands_return;
6619
6620 {
6621 /* We reach here because this operand is marked as optional, and
6622 either no operand was supplied or the operand was supplied but it
6623 was syntactically incorrect. In the latter case we report an
6624 error. In the former case we perform a few more checks before
6625 dropping through to the code to insert the default operand. */
6626
6627 char *tmp = backtrack_pos;
6628 char endchar = END_OF_INSN;
6629
6630 if (i != (aarch64_num_of_operands (opcode) - 1))
6631 endchar = ',';
6632 skip_past_char (&tmp, ',');
6633
6634 if (*tmp != endchar)
6635 /* The user has supplied an operand in the wrong format. */
6636 goto parse_operands_return;
6637
6638 /* Make sure there is not a comma before the optional operand.
6639 For example the fifth operand of 'sys' is optional:
6640
6641 sys #0,c0,c0,#0, <--- wrong
6642 sys #0,c0,c0,#0 <--- correct. */
6643 if (comma_skipped_p && i && endchar == END_OF_INSN)
6644 {
6645 set_fatal_syntax_error
6646 (_("unexpected comma before the omitted optional operand"));
6647 goto parse_operands_return;
6648 }
6649 }
6650
6651 /* Reaching here means we are dealing with an optional operand that is
6652 omitted from the assembly line. */
6653 gas_assert (optional_operand_p (opcode, i));
6654 info->present = 0;
6655 process_omitted_operand (operands[i], opcode, i, info);
6656
6657 /* Try again, skipping the optional operand at backtrack_pos. */
6658 str = backtrack_pos;
6659 backtrack_pos = 0;
6660
6661 /* Clear any error record after the omitted optional operand has been
6662 successfully handled. */
6663 clear_error ();
6664 }
6665
6666 /* Check if we have parsed all the operands. */
6667 if (*str != '\0' && ! error_p ())
6668 {
6669 /* Set I to the index of the last present operand; this is
6670 for the purpose of diagnostics. */
6671 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6672 ;
6673 set_fatal_syntax_error
6674 (_("unexpected characters following instruction"));
6675 }
6676
6677 parse_operands_return:
6678
6679 if (error_p ())
6680 {
6681 DEBUG_TRACE ("parsing FAIL: %s - %s",
6682 operand_mismatch_kind_names[get_error_kind ()],
6683 get_error_message ());
6684 /* Record the operand error properly; this is useful when there
6685 are multiple instruction templates for a mnemonic name, so that
6686 later on, we can select the error that most closely describes
6687 the problem. */
6688 record_operand_error (opcode, i, get_error_kind (),
6689 get_error_message ());
6690 return FALSE;
6691 }
6692 else
6693 {
6694 DEBUG_TRACE ("parsing SUCCESS");
6695 return TRUE;
6696 }
6697 }
6698
6699 /* It does some fix-up to provide some programmer friendly feature while
6700 keeping the libopcodes happy, i.e. libopcodes only accepts
6701 the preferred architectural syntax.
6702 Return FALSE if there is any failure; otherwise return TRUE. */
6703
6704 static bfd_boolean
6705 programmer_friendly_fixup (aarch64_instruction *instr)
6706 {
6707 aarch64_inst *base = &instr->base;
6708 const aarch64_opcode *opcode = base->opcode;
6709 enum aarch64_op op = opcode->op;
6710 aarch64_opnd_info *operands = base->operands;
6711
6712 DEBUG_TRACE ("enter");
6713
6714 switch (opcode->iclass)
6715 {
6716 case testbranch:
6717 /* TBNZ Xn|Wn, #uimm6, label
6718 Test and Branch Not Zero: conditionally jumps to label if bit number
6719 uimm6 in register Xn is not zero. The bit number implies the width of
6720 the register, which may be written and should be disassembled as Wn if
6721 uimm is less than 32. */
6722 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6723 {
6724 if (operands[1].imm.value >= 32)
6725 {
6726 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6727 0, 31);
6728 return FALSE;
6729 }
6730 operands[0].qualifier = AARCH64_OPND_QLF_X;
6731 }
6732 break;
6733 case loadlit:
6734 /* LDR Wt, label | =value
6735 As a convenience assemblers will typically permit the notation
6736 "=value" in conjunction with the pc-relative literal load instructions
6737 to automatically place an immediate value or symbolic address in a
6738 nearby literal pool and generate a hidden label which references it.
6739 ISREG has been set to 0 in the case of =value. */
6740 if (instr->gen_lit_pool
6741 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6742 {
6743 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6744 if (op == OP_LDRSW_LIT)
6745 size = 4;
6746 if (instr->reloc.exp.X_op != O_constant
6747 && instr->reloc.exp.X_op != O_big
6748 && instr->reloc.exp.X_op != O_symbol)
6749 {
6750 record_operand_error (opcode, 1,
6751 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6752 _("constant expression expected"));
6753 return FALSE;
6754 }
6755 if (! add_to_lit_pool (&instr->reloc.exp, size))
6756 {
6757 record_operand_error (opcode, 1,
6758 AARCH64_OPDE_OTHER_ERROR,
6759 _("literal pool insertion failed"));
6760 return FALSE;
6761 }
6762 }
6763 break;
6764 case log_shift:
6765 case bitfield:
6766 /* UXT[BHW] Wd, Wn
6767 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6768 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6769 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6770 A programmer-friendly assembler should accept a destination Xd in
6771 place of Wd, however that is not the preferred form for disassembly.
6772 */
6773 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6774 && operands[1].qualifier == AARCH64_OPND_QLF_W
6775 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6776 operands[0].qualifier = AARCH64_OPND_QLF_W;
6777 break;
6778
6779 case addsub_ext:
6780 {
6781 /* In the 64-bit form, the final register operand is written as Wm
6782 for all but the (possibly omitted) UXTX/LSL and SXTX
6783 operators.
6784 As a programmer-friendly assembler, we accept e.g.
6785 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6786 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6787 int idx = aarch64_operand_index (opcode->operands,
6788 AARCH64_OPND_Rm_EXT);
6789 gas_assert (idx == 1 || idx == 2);
6790 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6791 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6792 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6793 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6794 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6795 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6796 }
6797 break;
6798
6799 default:
6800 break;
6801 }
6802
6803 DEBUG_TRACE ("exit with SUCCESS");
6804 return TRUE;
6805 }
6806
6807 /* Check for loads and stores that will cause unpredictable behavior. */
6808
6809 static void
6810 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6811 {
6812 aarch64_inst *base = &instr->base;
6813 const aarch64_opcode *opcode = base->opcode;
6814 const aarch64_opnd_info *opnds = base->operands;
6815 switch (opcode->iclass)
6816 {
6817 case ldst_pos:
6818 case ldst_imm9:
6819 case ldst_imm10:
6820 case ldst_unscaled:
6821 case ldst_unpriv:
6822 /* Loading/storing the base register is unpredictable if writeback. */
6823 if ((aarch64_get_operand_class (opnds[0].type)
6824 == AARCH64_OPND_CLASS_INT_REG)
6825 && opnds[0].reg.regno == opnds[1].addr.base_regno
6826 && opnds[1].addr.base_regno != REG_SP
6827 /* Exempt STG/STZG/ST2G/STZ2G. */
6828 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6829 && opnds[1].addr.writeback)
6830 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6831 break;
6832
6833 case ldstpair_off:
6834 case ldstnapair_offs:
6835 case ldstpair_indexed:
6836 /* Loading/storing the base register is unpredictable if writeback. */
6837 if ((aarch64_get_operand_class (opnds[0].type)
6838 == AARCH64_OPND_CLASS_INT_REG)
6839 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6840 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6841 && opnds[2].addr.base_regno != REG_SP
6842 /* Exempt STGP. */
6843 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6844 && opnds[2].addr.writeback)
6845 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6846 /* Load operations must load different registers. */
6847 if ((opcode->opcode & (1 << 22))
6848 && opnds[0].reg.regno == opnds[1].reg.regno)
6849 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6850 break;
6851
6852 case ldstexcl:
6853 /* It is unpredictable if the destination and status registers are the
6854 same. */
6855 if ((aarch64_get_operand_class (opnds[0].type)
6856 == AARCH64_OPND_CLASS_INT_REG)
6857 && (aarch64_get_operand_class (opnds[1].type)
6858 == AARCH64_OPND_CLASS_INT_REG)
6859 && (opnds[0].reg.regno == opnds[1].reg.regno
6860 || opnds[0].reg.regno == opnds[2].reg.regno))
6861 as_warn (_("unpredictable: identical transfer and status registers"
6862 " --`%s'"),
6863 str);
6864
6865 break;
6866
6867 default:
6868 break;
6869 }
6870 }
6871
6872 static void
6873 force_automatic_sequence_close (void)
6874 {
6875 if (now_instr_sequence.instr)
6876 {
6877 as_warn (_("previous `%s' sequence has not been closed"),
6878 now_instr_sequence.instr->opcode->name);
6879 init_insn_sequence (NULL, &now_instr_sequence);
6880 }
6881 }
6882
6883 /* A wrapper function to interface with libopcodes on encoding and
6884 record the error message if there is any.
6885
6886 Return TRUE on success; otherwise return FALSE. */
6887
6888 static bfd_boolean
6889 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6890 aarch64_insn *code)
6891 {
6892 aarch64_operand_error error_info;
6893 memset (&error_info, '\0', sizeof (error_info));
6894 error_info.kind = AARCH64_OPDE_NIL;
6895 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6896 && !error_info.non_fatal)
6897 return TRUE;
6898
6899 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6900 record_operand_error_info (opcode, &error_info);
6901 return error_info.non_fatal;
6902 }
6903
6904 #ifdef DEBUG_AARCH64
6905 static inline void
6906 dump_opcode_operands (const aarch64_opcode *opcode)
6907 {
6908 int i = 0;
6909 while (opcode->operands[i] != AARCH64_OPND_NIL)
6910 {
6911 aarch64_verbose ("\t\t opnd%d: %s", i,
6912 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6913 ? aarch64_get_operand_name (opcode->operands[i])
6914 : aarch64_get_operand_desc (opcode->operands[i]));
6915 ++i;
6916 }
6917 }
6918 #endif /* DEBUG_AARCH64 */
6919
6920 /* This is the guts of the machine-dependent assembler. STR points to a
6921 machine dependent instruction. This function is supposed to emit
6922 the frags/bytes it assembles to. */
6923
6924 void
6925 md_assemble (char *str)
6926 {
6927 char *p = str;
6928 templates *template;
6929 aarch64_opcode *opcode;
6930 aarch64_inst *inst_base;
6931 unsigned saved_cond;
6932
6933 /* Align the previous label if needed. */
6934 if (last_label_seen != NULL)
6935 {
6936 symbol_set_frag (last_label_seen, frag_now);
6937 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6938 S_SET_SEGMENT (last_label_seen, now_seg);
6939 }
6940
6941 /* Update the current insn_sequence from the segment. */
6942 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6943
6944 inst.reloc.type = BFD_RELOC_UNUSED;
6945
6946 DEBUG_TRACE ("\n\n");
6947 DEBUG_TRACE ("==============================");
6948 DEBUG_TRACE ("Enter md_assemble with %s", str);
6949
6950 template = opcode_lookup (&p);
6951 if (!template)
6952 {
6953 /* It wasn't an instruction, but it might be a register alias of
6954 the form alias .req reg directive. */
6955 if (!create_register_alias (str, p))
6956 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6957 str);
6958 return;
6959 }
6960
6961 skip_whitespace (p);
6962 if (*p == ',')
6963 {
6964 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6965 get_mnemonic_name (str), str);
6966 return;
6967 }
6968
6969 init_operand_error_report ();
6970
6971 /* Sections are assumed to start aligned. In executable section, there is no
6972 MAP_DATA symbol pending. So we only align the address during
6973 MAP_DATA --> MAP_INSN transition.
6974 For other sections, this is not guaranteed. */
6975 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6976 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6977 frag_align_code (2, 0);
6978
6979 saved_cond = inst.cond;
6980 reset_aarch64_instruction (&inst);
6981 inst.cond = saved_cond;
6982
6983 /* Iterate through all opcode entries with the same mnemonic name. */
6984 do
6985 {
6986 opcode = template->opcode;
6987
6988 DEBUG_TRACE ("opcode %s found", opcode->name);
6989 #ifdef DEBUG_AARCH64
6990 if (debug_dump)
6991 dump_opcode_operands (opcode);
6992 #endif /* DEBUG_AARCH64 */
6993
6994 mapping_state (MAP_INSN);
6995
6996 inst_base = &inst.base;
6997 inst_base->opcode = opcode;
6998
6999 /* Truly conditionally executed instructions, e.g. b.cond. */
7000 if (opcode->flags & F_COND)
7001 {
7002 gas_assert (inst.cond != COND_ALWAYS);
7003 inst_base->cond = get_cond_from_value (inst.cond);
7004 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7005 }
7006 else if (inst.cond != COND_ALWAYS)
7007 {
7008 /* It shouldn't arrive here, where the assembly looks like a
7009 conditional instruction but the found opcode is unconditional. */
7010 gas_assert (0);
7011 continue;
7012 }
7013
7014 if (parse_operands (p, opcode)
7015 && programmer_friendly_fixup (&inst)
7016 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7017 {
7018 /* Check that this instruction is supported for this CPU. */
7019 if (!opcode->avariant
7020 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7021 {
7022 as_bad (_("selected processor does not support `%s'"), str);
7023 return;
7024 }
7025
7026 warn_unpredictable_ldst (&inst, str);
7027
7028 if (inst.reloc.type == BFD_RELOC_UNUSED
7029 || !inst.reloc.need_libopcodes_p)
7030 output_inst (NULL);
7031 else
7032 {
7033 /* If there is relocation generated for the instruction,
7034 store the instruction information for the future fix-up. */
7035 struct aarch64_inst *copy;
7036 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7037 copy = XNEW (struct aarch64_inst);
7038 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7039 output_inst (copy);
7040 }
7041
7042 /* Issue non-fatal messages if any. */
7043 output_operand_error_report (str, TRUE);
7044 return;
7045 }
7046
7047 template = template->next;
7048 if (template != NULL)
7049 {
7050 reset_aarch64_instruction (&inst);
7051 inst.cond = saved_cond;
7052 }
7053 }
7054 while (template != NULL);
7055
7056 /* Issue the error messages if any. */
7057 output_operand_error_report (str, FALSE);
7058 }
7059
7060 /* Various frobbings of labels and their addresses. */
7061
7062 void
7063 aarch64_start_line_hook (void)
7064 {
7065 last_label_seen = NULL;
7066 }
7067
7068 void
7069 aarch64_frob_label (symbolS * sym)
7070 {
7071 last_label_seen = sym;
7072
7073 dwarf2_emit_label (sym);
7074 }
7075
7076 void
7077 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7078 {
7079 /* Check to see if we have a block to close. */
7080 force_automatic_sequence_close ();
7081 }
7082
7083 int
7084 aarch64_data_in_code (void)
7085 {
7086 if (!strncmp (input_line_pointer + 1, "data:", 5))
7087 {
7088 *input_line_pointer = '/';
7089 input_line_pointer += 5;
7090 *input_line_pointer = 0;
7091 return 1;
7092 }
7093
7094 return 0;
7095 }
7096
7097 char *
7098 aarch64_canonicalize_symbol_name (char *name)
7099 {
7100 int len;
7101
7102 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7103 *(name + len - 5) = 0;
7104
7105 return name;
7106 }
7107 \f
7108 /* Table of all register names defined by default. The user can
7109 define additional names with .req. Note that all register names
7110 should appear in both upper and lowercase variants. Some registers
7111 also have mixed-case names. */
7112
7113 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7114 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7115 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7116 #define REGSET16(p,t) \
7117 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7118 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7119 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7120 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7121 #define REGSET31(p,t) \
7122 REGSET16(p, t), \
7123 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7124 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7125 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7126 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7127 #define REGSET(p,t) \
7128 REGSET31(p,t), REGNUM(p,31,t)
7129
7130 /* These go into aarch64_reg_hsh hash-table. */
7131 static const reg_entry reg_names[] = {
7132 /* Integer registers. */
7133 REGSET31 (x, R_64), REGSET31 (X, R_64),
7134 REGSET31 (w, R_32), REGSET31 (W, R_32),
7135
7136 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7137 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7138 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7139 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7140 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7141 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7142
7143 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7144 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7145
7146 /* Floating-point single precision registers. */
7147 REGSET (s, FP_S), REGSET (S, FP_S),
7148
7149 /* Floating-point double precision registers. */
7150 REGSET (d, FP_D), REGSET (D, FP_D),
7151
7152 /* Floating-point half precision registers. */
7153 REGSET (h, FP_H), REGSET (H, FP_H),
7154
7155 /* Floating-point byte precision registers. */
7156 REGSET (b, FP_B), REGSET (B, FP_B),
7157
7158 /* Floating-point quad precision registers. */
7159 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7160
7161 /* FP/SIMD registers. */
7162 REGSET (v, VN), REGSET (V, VN),
7163
7164 /* SVE vector registers. */
7165 REGSET (z, ZN), REGSET (Z, ZN),
7166
7167 /* SVE predicate registers. */
7168 REGSET16 (p, PN), REGSET16 (P, PN)
7169 };
7170
7171 #undef REGDEF
7172 #undef REGDEF_ALIAS
7173 #undef REGNUM
7174 #undef REGSET16
7175 #undef REGSET31
7176 #undef REGSET
7177
7178 #define N 1
7179 #define n 0
7180 #define Z 1
7181 #define z 0
7182 #define C 1
7183 #define c 0
7184 #define V 1
7185 #define v 0
7186 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7187 static const asm_nzcv nzcv_names[] = {
7188 {"nzcv", B (n, z, c, v)},
7189 {"nzcV", B (n, z, c, V)},
7190 {"nzCv", B (n, z, C, v)},
7191 {"nzCV", B (n, z, C, V)},
7192 {"nZcv", B (n, Z, c, v)},
7193 {"nZcV", B (n, Z, c, V)},
7194 {"nZCv", B (n, Z, C, v)},
7195 {"nZCV", B (n, Z, C, V)},
7196 {"Nzcv", B (N, z, c, v)},
7197 {"NzcV", B (N, z, c, V)},
7198 {"NzCv", B (N, z, C, v)},
7199 {"NzCV", B (N, z, C, V)},
7200 {"NZcv", B (N, Z, c, v)},
7201 {"NZcV", B (N, Z, c, V)},
7202 {"NZCv", B (N, Z, C, v)},
7203 {"NZCV", B (N, Z, C, V)}
7204 };
7205
7206 #undef N
7207 #undef n
7208 #undef Z
7209 #undef z
7210 #undef C
7211 #undef c
7212 #undef V
7213 #undef v
7214 #undef B
7215 \f
7216 /* MD interface: bits in the object file. */
7217
7218 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7219 for use in the a.out file, and stores them in the array pointed to by buf.
7220 This knows about the endian-ness of the target machine and does
7221 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7222 2 (short) and 4 (long) Floating numbers are put out as a series of
7223 LITTLENUMS (shorts, here at least). */
7224
7225 void
7226 md_number_to_chars (char *buf, valueT val, int n)
7227 {
7228 if (target_big_endian)
7229 number_to_chars_bigendian (buf, val, n);
7230 else
7231 number_to_chars_littleendian (buf, val, n);
7232 }
7233
7234 /* MD interface: Sections. */
7235
7236 /* Estimate the size of a frag before relaxing. Assume everything fits in
7237 4 bytes. */
7238
7239 int
7240 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7241 {
7242 fragp->fr_var = 4;
7243 return 4;
7244 }
7245
7246 /* Round up a section size to the appropriate boundary. */
7247
7248 valueT
7249 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7250 {
7251 return size;
7252 }
7253
7254 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7255 of an rs_align_code fragment.
7256
7257 Here we fill the frag with the appropriate info for padding the
7258 output stream. The resulting frag will consist of a fixed (fr_fix)
7259 and of a repeating (fr_var) part.
7260
7261 The fixed content is always emitted before the repeating content and
7262 these two parts are used as follows in constructing the output:
7263 - the fixed part will be used to align to a valid instruction word
7264 boundary, in case that we start at a misaligned address; as no
7265 executable instruction can live at the misaligned location, we
7266 simply fill with zeros;
7267 - the variable part will be used to cover the remaining padding and
7268 we fill using the AArch64 NOP instruction.
7269
7270 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7271 enough storage space for up to 3 bytes for padding the back to a valid
7272 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7273
7274 void
7275 aarch64_handle_align (fragS * fragP)
7276 {
7277 /* NOP = d503201f */
7278 /* AArch64 instructions are always little-endian. */
7279 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7280
7281 int bytes, fix, noop_size;
7282 char *p;
7283
7284 if (fragP->fr_type != rs_align_code)
7285 return;
7286
7287 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7288 p = fragP->fr_literal + fragP->fr_fix;
7289
7290 #ifdef OBJ_ELF
7291 gas_assert (fragP->tc_frag_data.recorded);
7292 #endif
7293
7294 noop_size = sizeof (aarch64_noop);
7295
7296 fix = bytes & (noop_size - 1);
7297 if (fix)
7298 {
7299 #ifdef OBJ_ELF
7300 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7301 #endif
7302 memset (p, 0, fix);
7303 p += fix;
7304 fragP->fr_fix += fix;
7305 }
7306
7307 if (noop_size)
7308 memcpy (p, aarch64_noop, noop_size);
7309 fragP->fr_var = noop_size;
7310 }
7311
7312 /* Perform target specific initialisation of a frag.
7313 Note - despite the name this initialisation is not done when the frag
7314 is created, but only when its type is assigned. A frag can be created
7315 and used a long time before its type is set, so beware of assuming that
7316 this initialisation is performed first. */
7317
7318 #ifndef OBJ_ELF
7319 void
7320 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7321 int max_chars ATTRIBUTE_UNUSED)
7322 {
7323 }
7324
7325 #else /* OBJ_ELF is defined. */
7326 void
7327 aarch64_init_frag (fragS * fragP, int max_chars)
7328 {
7329 /* Record a mapping symbol for alignment frags. We will delete this
7330 later if the alignment ends up empty. */
7331 if (!fragP->tc_frag_data.recorded)
7332 fragP->tc_frag_data.recorded = 1;
7333
7334 /* PR 21809: Do not set a mapping state for debug sections
7335 - it just confuses other tools. */
7336 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7337 return;
7338
7339 switch (fragP->fr_type)
7340 {
7341 case rs_align_test:
7342 case rs_fill:
7343 mapping_state_2 (MAP_DATA, max_chars);
7344 break;
7345 case rs_align:
7346 /* PR 20364: We can get alignment frags in code sections,
7347 so do not just assume that we should use the MAP_DATA state. */
7348 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7349 break;
7350 case rs_align_code:
7351 mapping_state_2 (MAP_INSN, max_chars);
7352 break;
7353 default:
7354 break;
7355 }
7356 }
7357 \f
7358 /* Initialize the DWARF-2 unwind information for this procedure. */
7359
7360 void
7361 tc_aarch64_frame_initial_instructions (void)
7362 {
7363 cfi_add_CFA_def_cfa (REG_SP, 0);
7364 }
7365 #endif /* OBJ_ELF */
7366
7367 /* Convert REGNAME to a DWARF-2 register number. */
7368
7369 int
7370 tc_aarch64_regname_to_dw2regnum (char *regname)
7371 {
7372 const reg_entry *reg = parse_reg (&regname);
7373 if (reg == NULL)
7374 return -1;
7375
7376 switch (reg->type)
7377 {
7378 case REG_TYPE_SP_32:
7379 case REG_TYPE_SP_64:
7380 case REG_TYPE_R_32:
7381 case REG_TYPE_R_64:
7382 return reg->number;
7383
7384 case REG_TYPE_FP_B:
7385 case REG_TYPE_FP_H:
7386 case REG_TYPE_FP_S:
7387 case REG_TYPE_FP_D:
7388 case REG_TYPE_FP_Q:
7389 return reg->number + 64;
7390
7391 default:
7392 break;
7393 }
7394 return -1;
7395 }
7396
7397 /* Implement DWARF2_ADDR_SIZE. */
7398
7399 int
7400 aarch64_dwarf2_addr_size (void)
7401 {
7402 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7403 if (ilp32_p)
7404 return 4;
7405 #endif
7406 return bfd_arch_bits_per_address (stdoutput) / 8;
7407 }
7408
7409 /* MD interface: Symbol and relocation handling. */
7410
7411 /* Return the address within the segment that a PC-relative fixup is
7412 relative to. For AArch64 PC-relative fixups applied to instructions
7413 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7414
7415 long
7416 md_pcrel_from_section (fixS * fixP, segT seg)
7417 {
7418 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7419
7420 /* If this is pc-relative and we are going to emit a relocation
7421 then we just want to put out any pipeline compensation that the linker
7422 will need. Otherwise we want to use the calculated base. */
7423 if (fixP->fx_pcrel
7424 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7425 || aarch64_force_relocation (fixP)))
7426 base = 0;
7427
7428 /* AArch64 should be consistent for all pc-relative relocations. */
7429 return base + AARCH64_PCREL_OFFSET;
7430 }
7431
7432 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7433 Otherwise we have no need to default values of symbols. */
7434
7435 symbolS *
7436 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7437 {
7438 #ifdef OBJ_ELF
7439 if (name[0] == '_' && name[1] == 'G'
7440 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7441 {
7442 if (!GOT_symbol)
7443 {
7444 if (symbol_find (name))
7445 as_bad (_("GOT already in the symbol table"));
7446
7447 GOT_symbol = symbol_new (name, undefined_section,
7448 (valueT) 0, &zero_address_frag);
7449 }
7450
7451 return GOT_symbol;
7452 }
7453 #endif
7454
7455 return 0;
7456 }
7457
7458 /* Return non-zero if the indicated VALUE has overflowed the maximum
7459 range expressible by a unsigned number with the indicated number of
7460 BITS. */
7461
7462 static bfd_boolean
7463 unsigned_overflow (valueT value, unsigned bits)
7464 {
7465 valueT lim;
7466 if (bits >= sizeof (valueT) * 8)
7467 return FALSE;
7468 lim = (valueT) 1 << bits;
7469 return (value >= lim);
7470 }
7471
7472
7473 /* Return non-zero if the indicated VALUE has overflowed the maximum
7474 range expressible by an signed number with the indicated number of
7475 BITS. */
7476
7477 static bfd_boolean
7478 signed_overflow (offsetT value, unsigned bits)
7479 {
7480 offsetT lim;
7481 if (bits >= sizeof (offsetT) * 8)
7482 return FALSE;
7483 lim = (offsetT) 1 << (bits - 1);
7484 return (value < -lim || value >= lim);
7485 }
7486
7487 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7488 unsigned immediate offset load/store instruction, try to encode it as
7489 an unscaled, 9-bit, signed immediate offset load/store instruction.
7490 Return TRUE if it is successful; otherwise return FALSE.
7491
7492 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7493 in response to the standard LDR/STR mnemonics when the immediate offset is
7494 unambiguous, i.e. when it is negative or unaligned. */
7495
7496 static bfd_boolean
7497 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7498 {
7499 int idx;
7500 enum aarch64_op new_op;
7501 const aarch64_opcode *new_opcode;
7502
7503 gas_assert (instr->opcode->iclass == ldst_pos);
7504
7505 switch (instr->opcode->op)
7506 {
7507 case OP_LDRB_POS:new_op = OP_LDURB; break;
7508 case OP_STRB_POS: new_op = OP_STURB; break;
7509 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7510 case OP_LDRH_POS: new_op = OP_LDURH; break;
7511 case OP_STRH_POS: new_op = OP_STURH; break;
7512 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7513 case OP_LDR_POS: new_op = OP_LDUR; break;
7514 case OP_STR_POS: new_op = OP_STUR; break;
7515 case OP_LDRF_POS: new_op = OP_LDURV; break;
7516 case OP_STRF_POS: new_op = OP_STURV; break;
7517 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7518 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7519 default: new_op = OP_NIL; break;
7520 }
7521
7522 if (new_op == OP_NIL)
7523 return FALSE;
7524
7525 new_opcode = aarch64_get_opcode (new_op);
7526 gas_assert (new_opcode != NULL);
7527
7528 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7529 instr->opcode->op, new_opcode->op);
7530
7531 aarch64_replace_opcode (instr, new_opcode);
7532
7533 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7534 qualifier matching may fail because the out-of-date qualifier will
7535 prevent the operand being updated with a new and correct qualifier. */
7536 idx = aarch64_operand_index (instr->opcode->operands,
7537 AARCH64_OPND_ADDR_SIMM9);
7538 gas_assert (idx == 1);
7539 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7540
7541 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7542
7543 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7544 insn_sequence))
7545 return FALSE;
7546
7547 return TRUE;
7548 }
7549
7550 /* Called by fix_insn to fix a MOV immediate alias instruction.
7551
7552 Operand for a generic move immediate instruction, which is an alias
7553 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7554 a 32-bit/64-bit immediate value into general register. An assembler error
7555 shall result if the immediate cannot be created by a single one of these
7556 instructions. If there is a choice, then to ensure reversability an
7557 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7558
7559 static void
7560 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7561 {
7562 const aarch64_opcode *opcode;
7563
7564 /* Need to check if the destination is SP/ZR. The check has to be done
7565 before any aarch64_replace_opcode. */
7566 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7567 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7568
7569 instr->operands[1].imm.value = value;
7570 instr->operands[1].skip = 0;
7571
7572 if (try_mov_wide_p)
7573 {
7574 /* Try the MOVZ alias. */
7575 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7576 aarch64_replace_opcode (instr, opcode);
7577 if (aarch64_opcode_encode (instr->opcode, instr,
7578 &instr->value, NULL, NULL, insn_sequence))
7579 {
7580 put_aarch64_insn (buf, instr->value);
7581 return;
7582 }
7583 /* Try the MOVK alias. */
7584 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7585 aarch64_replace_opcode (instr, opcode);
7586 if (aarch64_opcode_encode (instr->opcode, instr,
7587 &instr->value, NULL, NULL, insn_sequence))
7588 {
7589 put_aarch64_insn (buf, instr->value);
7590 return;
7591 }
7592 }
7593
7594 if (try_mov_bitmask_p)
7595 {
7596 /* Try the ORR alias. */
7597 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7598 aarch64_replace_opcode (instr, opcode);
7599 if (aarch64_opcode_encode (instr->opcode, instr,
7600 &instr->value, NULL, NULL, insn_sequence))
7601 {
7602 put_aarch64_insn (buf, instr->value);
7603 return;
7604 }
7605 }
7606
7607 as_bad_where (fixP->fx_file, fixP->fx_line,
7608 _("immediate cannot be moved by a single instruction"));
7609 }
7610
7611 /* An instruction operand which is immediate related may have symbol used
7612 in the assembly, e.g.
7613
7614 mov w0, u32
7615 .set u32, 0x00ffff00
7616
7617 At the time when the assembly instruction is parsed, a referenced symbol,
7618 like 'u32' in the above example may not have been seen; a fixS is created
7619 in such a case and is handled here after symbols have been resolved.
7620 Instruction is fixed up with VALUE using the information in *FIXP plus
7621 extra information in FLAGS.
7622
7623 This function is called by md_apply_fix to fix up instructions that need
7624 a fix-up described above but does not involve any linker-time relocation. */
7625
7626 static void
7627 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7628 {
7629 int idx;
7630 uint32_t insn;
7631 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7632 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7633 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7634
7635 if (new_inst)
7636 {
7637 /* Now the instruction is about to be fixed-up, so the operand that
7638 was previously marked as 'ignored' needs to be unmarked in order
7639 to get the encoding done properly. */
7640 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7641 new_inst->operands[idx].skip = 0;
7642 }
7643
7644 gas_assert (opnd != AARCH64_OPND_NIL);
7645
7646 switch (opnd)
7647 {
7648 case AARCH64_OPND_EXCEPTION:
7649 if (unsigned_overflow (value, 16))
7650 as_bad_where (fixP->fx_file, fixP->fx_line,
7651 _("immediate out of range"));
7652 insn = get_aarch64_insn (buf);
7653 insn |= encode_svc_imm (value);
7654 put_aarch64_insn (buf, insn);
7655 break;
7656
7657 case AARCH64_OPND_AIMM:
7658 /* ADD or SUB with immediate.
7659 NOTE this assumes we come here with a add/sub shifted reg encoding
7660 3 322|2222|2 2 2 21111 111111
7661 1 098|7654|3 2 1 09876 543210 98765 43210
7662 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7663 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7664 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7665 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7666 ->
7667 3 322|2222|2 2 221111111111
7668 1 098|7654|3 2 109876543210 98765 43210
7669 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7670 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7671 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7672 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7673 Fields sf Rn Rd are already set. */
7674 insn = get_aarch64_insn (buf);
7675 if (value < 0)
7676 {
7677 /* Add <-> sub. */
7678 insn = reencode_addsub_switch_add_sub (insn);
7679 value = -value;
7680 }
7681
7682 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7683 && unsigned_overflow (value, 12))
7684 {
7685 /* Try to shift the value by 12 to make it fit. */
7686 if (((value >> 12) << 12) == value
7687 && ! unsigned_overflow (value, 12 + 12))
7688 {
7689 value >>= 12;
7690 insn |= encode_addsub_imm_shift_amount (1);
7691 }
7692 }
7693
7694 if (unsigned_overflow (value, 12))
7695 as_bad_where (fixP->fx_file, fixP->fx_line,
7696 _("immediate out of range"));
7697
7698 insn |= encode_addsub_imm (value);
7699
7700 put_aarch64_insn (buf, insn);
7701 break;
7702
7703 case AARCH64_OPND_SIMD_IMM:
7704 case AARCH64_OPND_SIMD_IMM_SFT:
7705 case AARCH64_OPND_LIMM:
7706 /* Bit mask immediate. */
7707 gas_assert (new_inst != NULL);
7708 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7709 new_inst->operands[idx].imm.value = value;
7710 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7711 &new_inst->value, NULL, NULL, insn_sequence))
7712 put_aarch64_insn (buf, new_inst->value);
7713 else
7714 as_bad_where (fixP->fx_file, fixP->fx_line,
7715 _("invalid immediate"));
7716 break;
7717
7718 case AARCH64_OPND_HALF:
7719 /* 16-bit unsigned immediate. */
7720 if (unsigned_overflow (value, 16))
7721 as_bad_where (fixP->fx_file, fixP->fx_line,
7722 _("immediate out of range"));
7723 insn = get_aarch64_insn (buf);
7724 insn |= encode_movw_imm (value & 0xffff);
7725 put_aarch64_insn (buf, insn);
7726 break;
7727
7728 case AARCH64_OPND_IMM_MOV:
7729 /* Operand for a generic move immediate instruction, which is
7730 an alias instruction that generates a single MOVZ, MOVN or ORR
7731 instruction to loads a 32-bit/64-bit immediate value into general
7732 register. An assembler error shall result if the immediate cannot be
7733 created by a single one of these instructions. If there is a choice,
7734 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7735 and MOVZ or MOVN to ORR. */
7736 gas_assert (new_inst != NULL);
7737 fix_mov_imm_insn (fixP, buf, new_inst, value);
7738 break;
7739
7740 case AARCH64_OPND_ADDR_SIMM7:
7741 case AARCH64_OPND_ADDR_SIMM9:
7742 case AARCH64_OPND_ADDR_SIMM9_2:
7743 case AARCH64_OPND_ADDR_SIMM10:
7744 case AARCH64_OPND_ADDR_UIMM12:
7745 case AARCH64_OPND_ADDR_SIMM11:
7746 case AARCH64_OPND_ADDR_SIMM13:
7747 /* Immediate offset in an address. */
7748 insn = get_aarch64_insn (buf);
7749
7750 gas_assert (new_inst != NULL && new_inst->value == insn);
7751 gas_assert (new_inst->opcode->operands[1] == opnd
7752 || new_inst->opcode->operands[2] == opnd);
7753
7754 /* Get the index of the address operand. */
7755 if (new_inst->opcode->operands[1] == opnd)
7756 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7757 idx = 1;
7758 else
7759 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7760 idx = 2;
7761
7762 /* Update the resolved offset value. */
7763 new_inst->operands[idx].addr.offset.imm = value;
7764
7765 /* Encode/fix-up. */
7766 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7767 &new_inst->value, NULL, NULL, insn_sequence))
7768 {
7769 put_aarch64_insn (buf, new_inst->value);
7770 break;
7771 }
7772 else if (new_inst->opcode->iclass == ldst_pos
7773 && try_to_encode_as_unscaled_ldst (new_inst))
7774 {
7775 put_aarch64_insn (buf, new_inst->value);
7776 break;
7777 }
7778
7779 as_bad_where (fixP->fx_file, fixP->fx_line,
7780 _("immediate offset out of range"));
7781 break;
7782
7783 default:
7784 gas_assert (0);
7785 as_fatal (_("unhandled operand code %d"), opnd);
7786 }
7787 }
7788
7789 /* Apply a fixup (fixP) to segment data, once it has been determined
7790 by our caller that we have all the info we need to fix it up.
7791
7792 Parameter valP is the pointer to the value of the bits. */
7793
7794 void
7795 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7796 {
7797 offsetT value = *valP;
7798 uint32_t insn;
7799 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7800 int scale;
7801 unsigned flags = fixP->fx_addnumber;
7802
7803 DEBUG_TRACE ("\n\n");
7804 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7805 DEBUG_TRACE ("Enter md_apply_fix");
7806
7807 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7808
7809 /* Note whether this will delete the relocation. */
7810
7811 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7812 fixP->fx_done = 1;
7813
7814 /* Process the relocations. */
7815 switch (fixP->fx_r_type)
7816 {
7817 case BFD_RELOC_NONE:
7818 /* This will need to go in the object file. */
7819 fixP->fx_done = 0;
7820 break;
7821
7822 case BFD_RELOC_8:
7823 case BFD_RELOC_8_PCREL:
7824 if (fixP->fx_done || !seg->use_rela_p)
7825 md_number_to_chars (buf, value, 1);
7826 break;
7827
7828 case BFD_RELOC_16:
7829 case BFD_RELOC_16_PCREL:
7830 if (fixP->fx_done || !seg->use_rela_p)
7831 md_number_to_chars (buf, value, 2);
7832 break;
7833
7834 case BFD_RELOC_32:
7835 case BFD_RELOC_32_PCREL:
7836 if (fixP->fx_done || !seg->use_rela_p)
7837 md_number_to_chars (buf, value, 4);
7838 break;
7839
7840 case BFD_RELOC_64:
7841 case BFD_RELOC_64_PCREL:
7842 if (fixP->fx_done || !seg->use_rela_p)
7843 md_number_to_chars (buf, value, 8);
7844 break;
7845
7846 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7847 /* We claim that these fixups have been processed here, even if
7848 in fact we generate an error because we do not have a reloc
7849 for them, so tc_gen_reloc() will reject them. */
7850 fixP->fx_done = 1;
7851 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7852 {
7853 as_bad_where (fixP->fx_file, fixP->fx_line,
7854 _("undefined symbol %s used as an immediate value"),
7855 S_GET_NAME (fixP->fx_addsy));
7856 goto apply_fix_return;
7857 }
7858 fix_insn (fixP, flags, value);
7859 break;
7860
7861 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7862 if (fixP->fx_done || !seg->use_rela_p)
7863 {
7864 if (value & 3)
7865 as_bad_where (fixP->fx_file, fixP->fx_line,
7866 _("pc-relative load offset not word aligned"));
7867 if (signed_overflow (value, 21))
7868 as_bad_where (fixP->fx_file, fixP->fx_line,
7869 _("pc-relative load offset out of range"));
7870 insn = get_aarch64_insn (buf);
7871 insn |= encode_ld_lit_ofs_19 (value >> 2);
7872 put_aarch64_insn (buf, insn);
7873 }
7874 break;
7875
7876 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7877 if (fixP->fx_done || !seg->use_rela_p)
7878 {
7879 if (signed_overflow (value, 21))
7880 as_bad_where (fixP->fx_file, fixP->fx_line,
7881 _("pc-relative address offset out of range"));
7882 insn = get_aarch64_insn (buf);
7883 insn |= encode_adr_imm (value);
7884 put_aarch64_insn (buf, insn);
7885 }
7886 break;
7887
7888 case BFD_RELOC_AARCH64_BRANCH19:
7889 if (fixP->fx_done || !seg->use_rela_p)
7890 {
7891 if (value & 3)
7892 as_bad_where (fixP->fx_file, fixP->fx_line,
7893 _("conditional branch target not word aligned"));
7894 if (signed_overflow (value, 21))
7895 as_bad_where (fixP->fx_file, fixP->fx_line,
7896 _("conditional branch out of range"));
7897 insn = get_aarch64_insn (buf);
7898 insn |= encode_cond_branch_ofs_19 (value >> 2);
7899 put_aarch64_insn (buf, insn);
7900 }
7901 break;
7902
7903 case BFD_RELOC_AARCH64_TSTBR14:
7904 if (fixP->fx_done || !seg->use_rela_p)
7905 {
7906 if (value & 3)
7907 as_bad_where (fixP->fx_file, fixP->fx_line,
7908 _("conditional branch target not word aligned"));
7909 if (signed_overflow (value, 16))
7910 as_bad_where (fixP->fx_file, fixP->fx_line,
7911 _("conditional branch out of range"));
7912 insn = get_aarch64_insn (buf);
7913 insn |= encode_tst_branch_ofs_14 (value >> 2);
7914 put_aarch64_insn (buf, insn);
7915 }
7916 break;
7917
7918 case BFD_RELOC_AARCH64_CALL26:
7919 case BFD_RELOC_AARCH64_JUMP26:
7920 if (fixP->fx_done || !seg->use_rela_p)
7921 {
7922 if (value & 3)
7923 as_bad_where (fixP->fx_file, fixP->fx_line,
7924 _("branch target not word aligned"));
7925 if (signed_overflow (value, 28))
7926 as_bad_where (fixP->fx_file, fixP->fx_line,
7927 _("branch out of range"));
7928 insn = get_aarch64_insn (buf);
7929 insn |= encode_branch_ofs_26 (value >> 2);
7930 put_aarch64_insn (buf, insn);
7931 }
7932 break;
7933
7934 case BFD_RELOC_AARCH64_MOVW_G0:
7935 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7936 case BFD_RELOC_AARCH64_MOVW_G0_S:
7937 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7938 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7939 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7940 scale = 0;
7941 goto movw_common;
7942 case BFD_RELOC_AARCH64_MOVW_G1:
7943 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7944 case BFD_RELOC_AARCH64_MOVW_G1_S:
7945 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7946 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7947 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7948 scale = 16;
7949 goto movw_common;
7950 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7951 scale = 0;
7952 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7953 /* Should always be exported to object file, see
7954 aarch64_force_relocation(). */
7955 gas_assert (!fixP->fx_done);
7956 gas_assert (seg->use_rela_p);
7957 goto movw_common;
7958 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7959 scale = 16;
7960 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7961 /* Should always be exported to object file, see
7962 aarch64_force_relocation(). */
7963 gas_assert (!fixP->fx_done);
7964 gas_assert (seg->use_rela_p);
7965 goto movw_common;
7966 case BFD_RELOC_AARCH64_MOVW_G2:
7967 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7968 case BFD_RELOC_AARCH64_MOVW_G2_S:
7969 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7970 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7971 scale = 32;
7972 goto movw_common;
7973 case BFD_RELOC_AARCH64_MOVW_G3:
7974 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7975 scale = 48;
7976 movw_common:
7977 if (fixP->fx_done || !seg->use_rela_p)
7978 {
7979 insn = get_aarch64_insn (buf);
7980
7981 if (!fixP->fx_done)
7982 {
7983 /* REL signed addend must fit in 16 bits */
7984 if (signed_overflow (value, 16))
7985 as_bad_where (fixP->fx_file, fixP->fx_line,
7986 _("offset out of range"));
7987 }
7988 else
7989 {
7990 /* Check for overflow and scale. */
7991 switch (fixP->fx_r_type)
7992 {
7993 case BFD_RELOC_AARCH64_MOVW_G0:
7994 case BFD_RELOC_AARCH64_MOVW_G1:
7995 case BFD_RELOC_AARCH64_MOVW_G2:
7996 case BFD_RELOC_AARCH64_MOVW_G3:
7997 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7998 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7999 if (unsigned_overflow (value, scale + 16))
8000 as_bad_where (fixP->fx_file, fixP->fx_line,
8001 _("unsigned value out of range"));
8002 break;
8003 case BFD_RELOC_AARCH64_MOVW_G0_S:
8004 case BFD_RELOC_AARCH64_MOVW_G1_S:
8005 case BFD_RELOC_AARCH64_MOVW_G2_S:
8006 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8007 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8008 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8009 /* NOTE: We can only come here with movz or movn. */
8010 if (signed_overflow (value, scale + 16))
8011 as_bad_where (fixP->fx_file, fixP->fx_line,
8012 _("signed value out of range"));
8013 if (value < 0)
8014 {
8015 /* Force use of MOVN. */
8016 value = ~value;
8017 insn = reencode_movzn_to_movn (insn);
8018 }
8019 else
8020 {
8021 /* Force use of MOVZ. */
8022 insn = reencode_movzn_to_movz (insn);
8023 }
8024 break;
8025 default:
8026 /* Unchecked relocations. */
8027 break;
8028 }
8029 value >>= scale;
8030 }
8031
8032 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8033 insn |= encode_movw_imm (value & 0xffff);
8034
8035 put_aarch64_insn (buf, insn);
8036 }
8037 break;
8038
8039 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8040 fixP->fx_r_type = (ilp32_p
8041 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8042 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8043 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8044 /* Should always be exported to object file, see
8045 aarch64_force_relocation(). */
8046 gas_assert (!fixP->fx_done);
8047 gas_assert (seg->use_rela_p);
8048 break;
8049
8050 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8051 fixP->fx_r_type = (ilp32_p
8052 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8053 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8054 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8055 /* Should always be exported to object file, see
8056 aarch64_force_relocation(). */
8057 gas_assert (!fixP->fx_done);
8058 gas_assert (seg->use_rela_p);
8059 break;
8060
8061 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8062 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8063 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8064 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8065 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8066 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8067 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8068 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8069 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8070 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8071 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8072 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8073 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8074 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8075 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8076 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8077 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8078 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8079 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8080 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8081 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8082 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8083 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8084 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8085 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8086 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8087 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8088 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8089 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8090 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8091 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8092 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8093 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8094 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8095 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8096 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8097 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8098 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8099 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8100 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8101 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8102 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8103 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8104 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8105 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8106 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8107 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8108 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8109 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8110 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8111 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8112 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8113 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8114 /* Should always be exported to object file, see
8115 aarch64_force_relocation(). */
8116 gas_assert (!fixP->fx_done);
8117 gas_assert (seg->use_rela_p);
8118 break;
8119
8120 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8121 /* Should always be exported to object file, see
8122 aarch64_force_relocation(). */
8123 fixP->fx_r_type = (ilp32_p
8124 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8125 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8126 gas_assert (!fixP->fx_done);
8127 gas_assert (seg->use_rela_p);
8128 break;
8129
8130 case BFD_RELOC_AARCH64_ADD_LO12:
8131 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8132 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8133 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8134 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8135 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8136 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8137 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8138 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8139 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8140 case BFD_RELOC_AARCH64_LDST128_LO12:
8141 case BFD_RELOC_AARCH64_LDST16_LO12:
8142 case BFD_RELOC_AARCH64_LDST32_LO12:
8143 case BFD_RELOC_AARCH64_LDST64_LO12:
8144 case BFD_RELOC_AARCH64_LDST8_LO12:
8145 /* Should always be exported to object file, see
8146 aarch64_force_relocation(). */
8147 gas_assert (!fixP->fx_done);
8148 gas_assert (seg->use_rela_p);
8149 break;
8150
8151 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8152 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8153 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8154 break;
8155
8156 case BFD_RELOC_UNUSED:
8157 /* An error will already have been reported. */
8158 break;
8159
8160 default:
8161 as_bad_where (fixP->fx_file, fixP->fx_line,
8162 _("unexpected %s fixup"),
8163 bfd_get_reloc_code_name (fixP->fx_r_type));
8164 break;
8165 }
8166
8167 apply_fix_return:
8168 /* Free the allocated the struct aarch64_inst.
8169 N.B. currently there are very limited number of fix-up types actually use
8170 this field, so the impact on the performance should be minimal . */
8171 if (fixP->tc_fix_data.inst != NULL)
8172 free (fixP->tc_fix_data.inst);
8173
8174 return;
8175 }
8176
8177 /* Translate internal representation of relocation info to BFD target
8178 format. */
8179
8180 arelent *
8181 tc_gen_reloc (asection * section, fixS * fixp)
8182 {
8183 arelent *reloc;
8184 bfd_reloc_code_real_type code;
8185
8186 reloc = XNEW (arelent);
8187
8188 reloc->sym_ptr_ptr = XNEW (asymbol *);
8189 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8190 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8191
8192 if (fixp->fx_pcrel)
8193 {
8194 if (section->use_rela_p)
8195 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8196 else
8197 fixp->fx_offset = reloc->address;
8198 }
8199 reloc->addend = fixp->fx_offset;
8200
8201 code = fixp->fx_r_type;
8202 switch (code)
8203 {
8204 case BFD_RELOC_16:
8205 if (fixp->fx_pcrel)
8206 code = BFD_RELOC_16_PCREL;
8207 break;
8208
8209 case BFD_RELOC_32:
8210 if (fixp->fx_pcrel)
8211 code = BFD_RELOC_32_PCREL;
8212 break;
8213
8214 case BFD_RELOC_64:
8215 if (fixp->fx_pcrel)
8216 code = BFD_RELOC_64_PCREL;
8217 break;
8218
8219 default:
8220 break;
8221 }
8222
8223 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8224 if (reloc->howto == NULL)
8225 {
8226 as_bad_where (fixp->fx_file, fixp->fx_line,
8227 _
8228 ("cannot represent %s relocation in this object file format"),
8229 bfd_get_reloc_code_name (code));
8230 return NULL;
8231 }
8232
8233 return reloc;
8234 }
8235
8236 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8237
8238 void
8239 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8240 {
8241 bfd_reloc_code_real_type type;
8242 int pcrel = 0;
8243
8244 /* Pick a reloc.
8245 FIXME: @@ Should look at CPU word size. */
8246 switch (size)
8247 {
8248 case 1:
8249 type = BFD_RELOC_8;
8250 break;
8251 case 2:
8252 type = BFD_RELOC_16;
8253 break;
8254 case 4:
8255 type = BFD_RELOC_32;
8256 break;
8257 case 8:
8258 type = BFD_RELOC_64;
8259 break;
8260 default:
8261 as_bad (_("cannot do %u-byte relocation"), size);
8262 type = BFD_RELOC_UNUSED;
8263 break;
8264 }
8265
8266 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8267 }
8268
8269 int
8270 aarch64_force_relocation (struct fix *fixp)
8271 {
8272 switch (fixp->fx_r_type)
8273 {
8274 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8275 /* Perform these "immediate" internal relocations
8276 even if the symbol is extern or weak. */
8277 return 0;
8278
8279 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8280 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8281 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8282 /* Pseudo relocs that need to be fixed up according to
8283 ilp32_p. */
8284 return 0;
8285
8286 case BFD_RELOC_AARCH64_ADD_LO12:
8287 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8288 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8289 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8290 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8291 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8292 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8293 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8294 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8295 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8296 case BFD_RELOC_AARCH64_LDST128_LO12:
8297 case BFD_RELOC_AARCH64_LDST16_LO12:
8298 case BFD_RELOC_AARCH64_LDST32_LO12:
8299 case BFD_RELOC_AARCH64_LDST64_LO12:
8300 case BFD_RELOC_AARCH64_LDST8_LO12:
8301 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8302 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8303 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8304 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8305 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8306 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8307 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8308 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8309 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8310 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8311 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8312 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8313 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8314 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8315 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8316 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8317 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8318 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8319 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8320 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8321 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8322 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8323 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8324 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8325 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8326 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8327 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8328 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8329 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8330 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8331 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8332 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8333 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8334 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8335 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8336 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8337 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8338 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8339 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8340 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8341 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8342 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8343 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8344 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8345 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8346 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8347 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8348 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8349 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8350 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8351 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8352 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8353 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8354 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8355 /* Always leave these relocations for the linker. */
8356 return 1;
8357
8358 default:
8359 break;
8360 }
8361
8362 return generic_force_reloc (fixp);
8363 }
8364
8365 #ifdef OBJ_ELF
8366
8367 /* Implement md_after_parse_args. This is the earliest time we need to decide
8368 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8369
8370 void
8371 aarch64_after_parse_args (void)
8372 {
8373 if (aarch64_abi != AARCH64_ABI_NONE)
8374 return;
8375
8376 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8377 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8378 aarch64_abi = AARCH64_ABI_ILP32;
8379 else
8380 aarch64_abi = AARCH64_ABI_LP64;
8381 }
8382
8383 const char *
8384 elf64_aarch64_target_format (void)
8385 {
8386 #ifdef TE_CLOUDABI
8387 /* FIXME: What to do for ilp32_p ? */
8388 if (target_big_endian)
8389 return "elf64-bigaarch64-cloudabi";
8390 else
8391 return "elf64-littleaarch64-cloudabi";
8392 #else
8393 if (target_big_endian)
8394 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8395 else
8396 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8397 #endif
8398 }
8399
8400 void
8401 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8402 {
8403 elf_frob_symbol (symp, puntp);
8404 }
8405 #endif
8406
8407 /* MD interface: Finalization. */
8408
8409 /* A good place to do this, although this was probably not intended
8410 for this kind of use. We need to dump the literal pool before
8411 references are made to a null symbol pointer. */
8412
8413 void
8414 aarch64_cleanup (void)
8415 {
8416 literal_pool *pool;
8417
8418 for (pool = list_of_pools; pool; pool = pool->next)
8419 {
8420 /* Put it at the end of the relevant section. */
8421 subseg_set (pool->section, pool->sub_section);
8422 s_ltorg (0);
8423 }
8424 }
8425
8426 #ifdef OBJ_ELF
8427 /* Remove any excess mapping symbols generated for alignment frags in
8428 SEC. We may have created a mapping symbol before a zero byte
8429 alignment; remove it if there's a mapping symbol after the
8430 alignment. */
8431 static void
8432 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8433 void *dummy ATTRIBUTE_UNUSED)
8434 {
8435 segment_info_type *seginfo = seg_info (sec);
8436 fragS *fragp;
8437
8438 if (seginfo == NULL || seginfo->frchainP == NULL)
8439 return;
8440
8441 for (fragp = seginfo->frchainP->frch_root;
8442 fragp != NULL; fragp = fragp->fr_next)
8443 {
8444 symbolS *sym = fragp->tc_frag_data.last_map;
8445 fragS *next = fragp->fr_next;
8446
8447 /* Variable-sized frags have been converted to fixed size by
8448 this point. But if this was variable-sized to start with,
8449 there will be a fixed-size frag after it. So don't handle
8450 next == NULL. */
8451 if (sym == NULL || next == NULL)
8452 continue;
8453
8454 if (S_GET_VALUE (sym) < next->fr_address)
8455 /* Not at the end of this frag. */
8456 continue;
8457 know (S_GET_VALUE (sym) == next->fr_address);
8458
8459 do
8460 {
8461 if (next->tc_frag_data.first_map != NULL)
8462 {
8463 /* Next frag starts with a mapping symbol. Discard this
8464 one. */
8465 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8466 break;
8467 }
8468
8469 if (next->fr_next == NULL)
8470 {
8471 /* This mapping symbol is at the end of the section. Discard
8472 it. */
8473 know (next->fr_fix == 0 && next->fr_var == 0);
8474 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8475 break;
8476 }
8477
8478 /* As long as we have empty frags without any mapping symbols,
8479 keep looking. */
8480 /* If the next frag is non-empty and does not start with a
8481 mapping symbol, then this mapping symbol is required. */
8482 if (next->fr_address != next->fr_next->fr_address)
8483 break;
8484
8485 next = next->fr_next;
8486 }
8487 while (next != NULL);
8488 }
8489 }
8490 #endif
8491
8492 /* Adjust the symbol table. */
8493
8494 void
8495 aarch64_adjust_symtab (void)
8496 {
8497 #ifdef OBJ_ELF
8498 /* Remove any overlapping mapping symbols generated by alignment frags. */
8499 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8500 /* Now do generic ELF adjustments. */
8501 elf_adjust_symtab ();
8502 #endif
8503 }
8504
8505 static void
8506 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8507 {
8508 const char *hash_err;
8509
8510 hash_err = hash_insert (table, key, value);
8511 if (hash_err)
8512 printf ("Internal Error: Can't hash %s\n", key);
8513 }
8514
8515 static void
8516 fill_instruction_hash_table (void)
8517 {
8518 aarch64_opcode *opcode = aarch64_opcode_table;
8519
8520 while (opcode->name != NULL)
8521 {
8522 templates *templ, *new_templ;
8523 templ = hash_find (aarch64_ops_hsh, opcode->name);
8524
8525 new_templ = XNEW (templates);
8526 new_templ->opcode = opcode;
8527 new_templ->next = NULL;
8528
8529 if (!templ)
8530 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8531 else
8532 {
8533 new_templ->next = templ->next;
8534 templ->next = new_templ;
8535 }
8536 ++opcode;
8537 }
8538 }
8539
8540 static inline void
8541 convert_to_upper (char *dst, const char *src, size_t num)
8542 {
8543 unsigned int i;
8544 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8545 *dst = TOUPPER (*src);
8546 *dst = '\0';
8547 }
8548
8549 /* Assume STR point to a lower-case string, allocate, convert and return
8550 the corresponding upper-case string. */
8551 static inline const char*
8552 get_upper_str (const char *str)
8553 {
8554 char *ret;
8555 size_t len = strlen (str);
8556 ret = XNEWVEC (char, len + 1);
8557 convert_to_upper (ret, str, len);
8558 return ret;
8559 }
8560
8561 /* MD interface: Initialization. */
8562
8563 void
8564 md_begin (void)
8565 {
8566 unsigned mach;
8567 unsigned int i;
8568
8569 if ((aarch64_ops_hsh = hash_new ()) == NULL
8570 || (aarch64_cond_hsh = hash_new ()) == NULL
8571 || (aarch64_shift_hsh = hash_new ()) == NULL
8572 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8573 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8574 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8575 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8576 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8577 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8578 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8579 || (aarch64_reg_hsh = hash_new ()) == NULL
8580 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8581 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8582 || (aarch64_pldop_hsh = hash_new ()) == NULL
8583 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8584 as_fatal (_("virtual memory exhausted"));
8585
8586 fill_instruction_hash_table ();
8587
8588 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8589 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8590 (void *) (aarch64_sys_regs + i));
8591
8592 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8593 checked_hash_insert (aarch64_pstatefield_hsh,
8594 aarch64_pstatefields[i].name,
8595 (void *) (aarch64_pstatefields + i));
8596
8597 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8598 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8599 aarch64_sys_regs_ic[i].name,
8600 (void *) (aarch64_sys_regs_ic + i));
8601
8602 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8603 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8604 aarch64_sys_regs_dc[i].name,
8605 (void *) (aarch64_sys_regs_dc + i));
8606
8607 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8608 checked_hash_insert (aarch64_sys_regs_at_hsh,
8609 aarch64_sys_regs_at[i].name,
8610 (void *) (aarch64_sys_regs_at + i));
8611
8612 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8613 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8614 aarch64_sys_regs_tlbi[i].name,
8615 (void *) (aarch64_sys_regs_tlbi + i));
8616
8617 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8618 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8619 aarch64_sys_regs_sr[i].name,
8620 (void *) (aarch64_sys_regs_sr + i));
8621
8622 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8623 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8624 (void *) (reg_names + i));
8625
8626 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8627 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8628 (void *) (nzcv_names + i));
8629
8630 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8631 {
8632 const char *name = aarch64_operand_modifiers[i].name;
8633 checked_hash_insert (aarch64_shift_hsh, name,
8634 (void *) (aarch64_operand_modifiers + i));
8635 /* Also hash the name in the upper case. */
8636 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8637 (void *) (aarch64_operand_modifiers + i));
8638 }
8639
8640 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8641 {
8642 unsigned int j;
8643 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8644 the same condition code. */
8645 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8646 {
8647 const char *name = aarch64_conds[i].names[j];
8648 if (name == NULL)
8649 break;
8650 checked_hash_insert (aarch64_cond_hsh, name,
8651 (void *) (aarch64_conds + i));
8652 /* Also hash the name in the upper case. */
8653 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8654 (void *) (aarch64_conds + i));
8655 }
8656 }
8657
8658 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8659 {
8660 const char *name = aarch64_barrier_options[i].name;
8661 /* Skip xx00 - the unallocated values of option. */
8662 if ((i & 0x3) == 0)
8663 continue;
8664 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8665 (void *) (aarch64_barrier_options + i));
8666 /* Also hash the name in the upper case. */
8667 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8668 (void *) (aarch64_barrier_options + i));
8669 }
8670
8671 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8672 {
8673 const char* name = aarch64_prfops[i].name;
8674 /* Skip the unallocated hint encodings. */
8675 if (name == NULL)
8676 continue;
8677 checked_hash_insert (aarch64_pldop_hsh, name,
8678 (void *) (aarch64_prfops + i));
8679 /* Also hash the name in the upper case. */
8680 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8681 (void *) (aarch64_prfops + i));
8682 }
8683
8684 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8685 {
8686 const char* name = aarch64_hint_options[i].name;
8687
8688 checked_hash_insert (aarch64_hint_opt_hsh, name,
8689 (void *) (aarch64_hint_options + i));
8690 /* Also hash the name in the upper case. */
8691 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8692 (void *) (aarch64_hint_options + i));
8693 }
8694
8695 /* Set the cpu variant based on the command-line options. */
8696 if (!mcpu_cpu_opt)
8697 mcpu_cpu_opt = march_cpu_opt;
8698
8699 if (!mcpu_cpu_opt)
8700 mcpu_cpu_opt = &cpu_default;
8701
8702 cpu_variant = *mcpu_cpu_opt;
8703
8704 /* Record the CPU type. */
8705 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8706
8707 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8708 }
8709
8710 /* Command line processing. */
8711
8712 const char *md_shortopts = "m:";
8713
8714 #ifdef AARCH64_BI_ENDIAN
8715 #define OPTION_EB (OPTION_MD_BASE + 0)
8716 #define OPTION_EL (OPTION_MD_BASE + 1)
8717 #else
8718 #if TARGET_BYTES_BIG_ENDIAN
8719 #define OPTION_EB (OPTION_MD_BASE + 0)
8720 #else
8721 #define OPTION_EL (OPTION_MD_BASE + 1)
8722 #endif
8723 #endif
8724
8725 struct option md_longopts[] = {
8726 #ifdef OPTION_EB
8727 {"EB", no_argument, NULL, OPTION_EB},
8728 #endif
8729 #ifdef OPTION_EL
8730 {"EL", no_argument, NULL, OPTION_EL},
8731 #endif
8732 {NULL, no_argument, NULL, 0}
8733 };
8734
8735 size_t md_longopts_size = sizeof (md_longopts);
8736
8737 struct aarch64_option_table
8738 {
8739 const char *option; /* Option name to match. */
8740 const char *help; /* Help information. */
8741 int *var; /* Variable to change. */
8742 int value; /* What to change it to. */
8743 char *deprecated; /* If non-null, print this message. */
8744 };
8745
8746 static struct aarch64_option_table aarch64_opts[] = {
8747 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8748 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8749 NULL},
8750 #ifdef DEBUG_AARCH64
8751 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8752 #endif /* DEBUG_AARCH64 */
8753 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8754 NULL},
8755 {"mno-verbose-error", N_("do not output verbose error messages"),
8756 &verbose_error_p, 0, NULL},
8757 {NULL, NULL, NULL, 0, NULL}
8758 };
8759
8760 struct aarch64_cpu_option_table
8761 {
8762 const char *name;
8763 const aarch64_feature_set value;
8764 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8765 case. */
8766 const char *canonical_name;
8767 };
8768
8769 /* This list should, at a minimum, contain all the cpu names
8770 recognized by GCC. */
8771 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8772 {"all", AARCH64_ANY, NULL},
8773 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8774 AARCH64_FEATURE_CRC), "Cortex-A35"},
8775 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8776 AARCH64_FEATURE_CRC), "Cortex-A53"},
8777 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8778 AARCH64_FEATURE_CRC), "Cortex-A57"},
8779 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8780 AARCH64_FEATURE_CRC), "Cortex-A72"},
8781 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8782 AARCH64_FEATURE_CRC), "Cortex-A73"},
8783 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8784 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8785 "Cortex-A55"},
8786 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8787 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8788 "Cortex-A75"},
8789 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8790 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8791 "Cortex-A76"},
8792 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8793 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8794 | AARCH64_FEATURE_DOTPROD
8795 | AARCH64_FEATURE_PROFILE),
8796 "Ares"},
8797 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8798 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8799 "Samsung Exynos M1"},
8800 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8801 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8802 | AARCH64_FEATURE_RDMA),
8803 "Qualcomm Falkor"},
8804 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8805 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8806 | AARCH64_FEATURE_DOTPROD
8807 | AARCH64_FEATURE_SSBS),
8808 "Neoverse E1"},
8809 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8810 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8811 | AARCH64_FEATURE_DOTPROD
8812 | AARCH64_FEATURE_PROFILE),
8813 "Neoverse N1"},
8814 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8815 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8816 | AARCH64_FEATURE_RDMA),
8817 "Qualcomm QDF24XX"},
8818 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8819 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8820 "Qualcomm Saphira"},
8821 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8822 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8823 "Cavium ThunderX"},
8824 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8825 AARCH64_FEATURE_CRYPTO),
8826 "Broadcom Vulcan"},
8827 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8828 in earlier releases and is superseded by 'xgene1' in all
8829 tools. */
8830 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8831 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8832 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8833 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8834 {"generic", AARCH64_ARCH_V8, NULL},
8835
8836 {NULL, AARCH64_ARCH_NONE, NULL}
8837 };
8838
8839 struct aarch64_arch_option_table
8840 {
8841 const char *name;
8842 const aarch64_feature_set value;
8843 };
8844
8845 /* This list should, at a minimum, contain all the architecture names
8846 recognized by GCC. */
8847 static const struct aarch64_arch_option_table aarch64_archs[] = {
8848 {"all", AARCH64_ANY},
8849 {"armv8-a", AARCH64_ARCH_V8},
8850 {"armv8.1-a", AARCH64_ARCH_V8_1},
8851 {"armv8.2-a", AARCH64_ARCH_V8_2},
8852 {"armv8.3-a", AARCH64_ARCH_V8_3},
8853 {"armv8.4-a", AARCH64_ARCH_V8_4},
8854 {"armv8.5-a", AARCH64_ARCH_V8_5},
8855 {NULL, AARCH64_ARCH_NONE}
8856 };
8857
8858 /* ISA extensions. */
8859 struct aarch64_option_cpu_value_table
8860 {
8861 const char *name;
8862 const aarch64_feature_set value;
8863 const aarch64_feature_set require; /* Feature dependencies. */
8864 };
8865
8866 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8867 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8868 AARCH64_ARCH_NONE},
8869 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8870 | AARCH64_FEATURE_AES
8871 | AARCH64_FEATURE_SHA2, 0),
8872 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8873 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8874 AARCH64_ARCH_NONE},
8875 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8876 AARCH64_ARCH_NONE},
8877 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8878 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8879 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8880 AARCH64_ARCH_NONE},
8881 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8882 AARCH64_ARCH_NONE},
8883 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8884 AARCH64_ARCH_NONE},
8885 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8886 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8887 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8888 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8889 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8890 AARCH64_FEATURE (AARCH64_FEATURE_FP
8891 | AARCH64_FEATURE_F16, 0)},
8892 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8893 AARCH64_ARCH_NONE},
8894 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8895 AARCH64_FEATURE (AARCH64_FEATURE_F16
8896 | AARCH64_FEATURE_SIMD
8897 | AARCH64_FEATURE_COMPNUM, 0)},
8898 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
8899 AARCH64_ARCH_NONE},
8900 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8901 AARCH64_FEATURE (AARCH64_FEATURE_F16
8902 | AARCH64_FEATURE_SIMD, 0)},
8903 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8904 AARCH64_ARCH_NONE},
8905 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8906 AARCH64_ARCH_NONE},
8907 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8908 AARCH64_ARCH_NONE},
8909 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8910 AARCH64_ARCH_NONE},
8911 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8912 AARCH64_ARCH_NONE},
8913 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8914 AARCH64_ARCH_NONE},
8915 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8916 AARCH64_ARCH_NONE},
8917 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8918 | AARCH64_FEATURE_SHA3, 0),
8919 AARCH64_ARCH_NONE},
8920 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8921 AARCH64_ARCH_NONE},
8922 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8923 AARCH64_ARCH_NONE},
8924 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8925 AARCH64_ARCH_NONE},
8926 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
8927 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
8928 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
8929 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8930 | AARCH64_FEATURE_SM4, 0)},
8931 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
8932 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8933 | AARCH64_FEATURE_AES, 0)},
8934 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
8935 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
8936 | AARCH64_FEATURE_SHA3, 0)},
8937 {"bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
8938 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
8939 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8940 };
8941
8942 struct aarch64_long_option_table
8943 {
8944 const char *option; /* Substring to match. */
8945 const char *help; /* Help information. */
8946 int (*func) (const char *subopt); /* Function to decode sub-option. */
8947 char *deprecated; /* If non-null, print this message. */
8948 };
8949
8950 /* Transitive closure of features depending on set. */
8951 static aarch64_feature_set
8952 aarch64_feature_disable_set (aarch64_feature_set set)
8953 {
8954 const struct aarch64_option_cpu_value_table *opt;
8955 aarch64_feature_set prev = 0;
8956
8957 while (prev != set) {
8958 prev = set;
8959 for (opt = aarch64_features; opt->name != NULL; opt++)
8960 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8961 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8962 }
8963 return set;
8964 }
8965
8966 /* Transitive closure of dependencies of set. */
8967 static aarch64_feature_set
8968 aarch64_feature_enable_set (aarch64_feature_set set)
8969 {
8970 const struct aarch64_option_cpu_value_table *opt;
8971 aarch64_feature_set prev = 0;
8972
8973 while (prev != set) {
8974 prev = set;
8975 for (opt = aarch64_features; opt->name != NULL; opt++)
8976 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8977 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8978 }
8979 return set;
8980 }
8981
8982 static int
8983 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8984 bfd_boolean ext_only)
8985 {
8986 /* We insist on extensions being added before being removed. We achieve
8987 this by using the ADDING_VALUE variable to indicate whether we are
8988 adding an extension (1) or removing it (0) and only allowing it to
8989 change in the order -1 -> 1 -> 0. */
8990 int adding_value = -1;
8991 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8992
8993 /* Copy the feature set, so that we can modify it. */
8994 *ext_set = **opt_p;
8995 *opt_p = ext_set;
8996
8997 while (str != NULL && *str != 0)
8998 {
8999 const struct aarch64_option_cpu_value_table *opt;
9000 const char *ext = NULL;
9001 int optlen;
9002
9003 if (!ext_only)
9004 {
9005 if (*str != '+')
9006 {
9007 as_bad (_("invalid architectural extension"));
9008 return 0;
9009 }
9010
9011 ext = strchr (++str, '+');
9012 }
9013
9014 if (ext != NULL)
9015 optlen = ext - str;
9016 else
9017 optlen = strlen (str);
9018
9019 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9020 {
9021 if (adding_value != 0)
9022 adding_value = 0;
9023 optlen -= 2;
9024 str += 2;
9025 }
9026 else if (optlen > 0)
9027 {
9028 if (adding_value == -1)
9029 adding_value = 1;
9030 else if (adding_value != 1)
9031 {
9032 as_bad (_("must specify extensions to add before specifying "
9033 "those to remove"));
9034 return FALSE;
9035 }
9036 }
9037
9038 if (optlen == 0)
9039 {
9040 as_bad (_("missing architectural extension"));
9041 return 0;
9042 }
9043
9044 gas_assert (adding_value != -1);
9045
9046 for (opt = aarch64_features; opt->name != NULL; opt++)
9047 if (strncmp (opt->name, str, optlen) == 0)
9048 {
9049 aarch64_feature_set set;
9050
9051 /* Add or remove the extension. */
9052 if (adding_value)
9053 {
9054 set = aarch64_feature_enable_set (opt->value);
9055 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9056 }
9057 else
9058 {
9059 set = aarch64_feature_disable_set (opt->value);
9060 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9061 }
9062 break;
9063 }
9064
9065 if (opt->name == NULL)
9066 {
9067 as_bad (_("unknown architectural extension `%s'"), str);
9068 return 0;
9069 }
9070
9071 str = ext;
9072 };
9073
9074 return 1;
9075 }
9076
9077 static int
9078 aarch64_parse_cpu (const char *str)
9079 {
9080 const struct aarch64_cpu_option_table *opt;
9081 const char *ext = strchr (str, '+');
9082 size_t optlen;
9083
9084 if (ext != NULL)
9085 optlen = ext - str;
9086 else
9087 optlen = strlen (str);
9088
9089 if (optlen == 0)
9090 {
9091 as_bad (_("missing cpu name `%s'"), str);
9092 return 0;
9093 }
9094
9095 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9096 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9097 {
9098 mcpu_cpu_opt = &opt->value;
9099 if (ext != NULL)
9100 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9101
9102 return 1;
9103 }
9104
9105 as_bad (_("unknown cpu `%s'"), str);
9106 return 0;
9107 }
9108
9109 static int
9110 aarch64_parse_arch (const char *str)
9111 {
9112 const struct aarch64_arch_option_table *opt;
9113 const char *ext = strchr (str, '+');
9114 size_t optlen;
9115
9116 if (ext != NULL)
9117 optlen = ext - str;
9118 else
9119 optlen = strlen (str);
9120
9121 if (optlen == 0)
9122 {
9123 as_bad (_("missing architecture name `%s'"), str);
9124 return 0;
9125 }
9126
9127 for (opt = aarch64_archs; opt->name != NULL; opt++)
9128 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9129 {
9130 march_cpu_opt = &opt->value;
9131 if (ext != NULL)
9132 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9133
9134 return 1;
9135 }
9136
9137 as_bad (_("unknown architecture `%s'\n"), str);
9138 return 0;
9139 }
9140
9141 /* ABIs. */
9142 struct aarch64_option_abi_value_table
9143 {
9144 const char *name;
9145 enum aarch64_abi_type value;
9146 };
9147
9148 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9149 {"ilp32", AARCH64_ABI_ILP32},
9150 {"lp64", AARCH64_ABI_LP64},
9151 };
9152
9153 static int
9154 aarch64_parse_abi (const char *str)
9155 {
9156 unsigned int i;
9157
9158 if (str[0] == '\0')
9159 {
9160 as_bad (_("missing abi name `%s'"), str);
9161 return 0;
9162 }
9163
9164 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9165 if (strcmp (str, aarch64_abis[i].name) == 0)
9166 {
9167 aarch64_abi = aarch64_abis[i].value;
9168 return 1;
9169 }
9170
9171 as_bad (_("unknown abi `%s'\n"), str);
9172 return 0;
9173 }
9174
9175 static struct aarch64_long_option_table aarch64_long_opts[] = {
9176 #ifdef OBJ_ELF
9177 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9178 aarch64_parse_abi, NULL},
9179 #endif /* OBJ_ELF */
9180 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9181 aarch64_parse_cpu, NULL},
9182 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9183 aarch64_parse_arch, NULL},
9184 {NULL, NULL, 0, NULL}
9185 };
9186
9187 int
9188 md_parse_option (int c, const char *arg)
9189 {
9190 struct aarch64_option_table *opt;
9191 struct aarch64_long_option_table *lopt;
9192
9193 switch (c)
9194 {
9195 #ifdef OPTION_EB
9196 case OPTION_EB:
9197 target_big_endian = 1;
9198 break;
9199 #endif
9200
9201 #ifdef OPTION_EL
9202 case OPTION_EL:
9203 target_big_endian = 0;
9204 break;
9205 #endif
9206
9207 case 'a':
9208 /* Listing option. Just ignore these, we don't support additional
9209 ones. */
9210 return 0;
9211
9212 default:
9213 for (opt = aarch64_opts; opt->option != NULL; opt++)
9214 {
9215 if (c == opt->option[0]
9216 && ((arg == NULL && opt->option[1] == 0)
9217 || streq (arg, opt->option + 1)))
9218 {
9219 /* If the option is deprecated, tell the user. */
9220 if (opt->deprecated != NULL)
9221 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9222 arg ? arg : "", _(opt->deprecated));
9223
9224 if (opt->var != NULL)
9225 *opt->var = opt->value;
9226
9227 return 1;
9228 }
9229 }
9230
9231 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9232 {
9233 /* These options are expected to have an argument. */
9234 if (c == lopt->option[0]
9235 && arg != NULL
9236 && strncmp (arg, lopt->option + 1,
9237 strlen (lopt->option + 1)) == 0)
9238 {
9239 /* If the option is deprecated, tell the user. */
9240 if (lopt->deprecated != NULL)
9241 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9242 _(lopt->deprecated));
9243
9244 /* Call the sup-option parser. */
9245 return lopt->func (arg + strlen (lopt->option) - 1);
9246 }
9247 }
9248
9249 return 0;
9250 }
9251
9252 return 1;
9253 }
9254
9255 void
9256 md_show_usage (FILE * fp)
9257 {
9258 struct aarch64_option_table *opt;
9259 struct aarch64_long_option_table *lopt;
9260
9261 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9262
9263 for (opt = aarch64_opts; opt->option != NULL; opt++)
9264 if (opt->help != NULL)
9265 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9266
9267 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9268 if (lopt->help != NULL)
9269 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9270
9271 #ifdef OPTION_EB
9272 fprintf (fp, _("\
9273 -EB assemble code for a big-endian cpu\n"));
9274 #endif
9275
9276 #ifdef OPTION_EL
9277 fprintf (fp, _("\
9278 -EL assemble code for a little-endian cpu\n"));
9279 #endif
9280 }
9281
9282 /* Parse a .cpu directive. */
9283
9284 static void
9285 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9286 {
9287 const struct aarch64_cpu_option_table *opt;
9288 char saved_char;
9289 char *name;
9290 char *ext;
9291 size_t optlen;
9292
9293 name = input_line_pointer;
9294 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9295 input_line_pointer++;
9296 saved_char = *input_line_pointer;
9297 *input_line_pointer = 0;
9298
9299 ext = strchr (name, '+');
9300
9301 if (ext != NULL)
9302 optlen = ext - name;
9303 else
9304 optlen = strlen (name);
9305
9306 /* Skip the first "all" entry. */
9307 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9308 if (strlen (opt->name) == optlen
9309 && strncmp (name, opt->name, optlen) == 0)
9310 {
9311 mcpu_cpu_opt = &opt->value;
9312 if (ext != NULL)
9313 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9314 return;
9315
9316 cpu_variant = *mcpu_cpu_opt;
9317
9318 *input_line_pointer = saved_char;
9319 demand_empty_rest_of_line ();
9320 return;
9321 }
9322 as_bad (_("unknown cpu `%s'"), name);
9323 *input_line_pointer = saved_char;
9324 ignore_rest_of_line ();
9325 }
9326
9327
9328 /* Parse a .arch directive. */
9329
9330 static void
9331 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9332 {
9333 const struct aarch64_arch_option_table *opt;
9334 char saved_char;
9335 char *name;
9336 char *ext;
9337 size_t optlen;
9338
9339 name = input_line_pointer;
9340 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9341 input_line_pointer++;
9342 saved_char = *input_line_pointer;
9343 *input_line_pointer = 0;
9344
9345 ext = strchr (name, '+');
9346
9347 if (ext != NULL)
9348 optlen = ext - name;
9349 else
9350 optlen = strlen (name);
9351
9352 /* Skip the first "all" entry. */
9353 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9354 if (strlen (opt->name) == optlen
9355 && strncmp (name, opt->name, optlen) == 0)
9356 {
9357 mcpu_cpu_opt = &opt->value;
9358 if (ext != NULL)
9359 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9360 return;
9361
9362 cpu_variant = *mcpu_cpu_opt;
9363
9364 *input_line_pointer = saved_char;
9365 demand_empty_rest_of_line ();
9366 return;
9367 }
9368
9369 as_bad (_("unknown architecture `%s'\n"), name);
9370 *input_line_pointer = saved_char;
9371 ignore_rest_of_line ();
9372 }
9373
9374 /* Parse a .arch_extension directive. */
9375
9376 static void
9377 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9378 {
9379 char saved_char;
9380 char *ext = input_line_pointer;;
9381
9382 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9383 input_line_pointer++;
9384 saved_char = *input_line_pointer;
9385 *input_line_pointer = 0;
9386
9387 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9388 return;
9389
9390 cpu_variant = *mcpu_cpu_opt;
9391
9392 *input_line_pointer = saved_char;
9393 demand_empty_rest_of_line ();
9394 }
9395
9396 /* Copy symbol information. */
9397
9398 void
9399 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9400 {
9401 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9402 }