]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: [SME] Add +sme option to -march
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bool parse_operands (char *, const aarch64_opcode *);
150 static bool programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bool
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bool
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return true;
542 }
543 else
544 return false;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bool in_aarch64_get_expression = false;
552
553 /* Third argument to aarch64_get_expression. */
554 #define GE_NO_PREFIX false
555 #define GE_OPT_PREFIX true
556
557 /* Fourth argument to aarch64_get_expression. */
558 #define ALLOW_ABSENT false
559 #define REJECT_ABSENT true
560
561 /* Fifth argument to aarch64_get_expression. */
562 #define NORMAL_RESOLUTION false
563
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE.
567
568 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
569 If REJECT_ABSENT is true then trat missing expressions as an error.
570 If DEFER_RESOLUTION is true, then do not resolve expressions against
571 constant symbols. Necessary if the expression is part of a fixup
572 that uses a reloc that must be emitted. */
573
574 static bool
575 aarch64_get_expression (expressionS * ep,
576 char ** str,
577 bool allow_immediate_prefix,
578 bool reject_absent,
579 bool defer_resolution)
580 {
581 char *save_in;
582 segT seg;
583 bool prefix_present = false;
584
585 if (allow_immediate_prefix)
586 {
587 if (is_immediate_prefix (**str))
588 {
589 (*str)++;
590 prefix_present = true;
591 }
592 }
593
594 memset (ep, 0, sizeof (expressionS));
595
596 save_in = input_line_pointer;
597 input_line_pointer = *str;
598 in_aarch64_get_expression = true;
599 if (defer_resolution)
600 seg = deferred_expression (ep);
601 else
602 seg = expression (ep);
603 in_aarch64_get_expression = false;
604
605 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
606 {
607 /* We found a bad expression in md_operand(). */
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 if (prefix_present && ! error_p ())
611 set_fatal_syntax_error (_("bad expression"));
612 else
613 set_first_syntax_error (_("bad expression"));
614 return false;
615 }
616
617 #ifdef OBJ_AOUT
618 if (seg != absolute_section
619 && seg != text_section
620 && seg != data_section
621 && seg != bss_section
622 && seg != undefined_section)
623 {
624 set_syntax_error (_("bad segment"));
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return false;
628 }
629 #else
630 (void) seg;
631 #endif
632
633 *str = input_line_pointer;
634 input_line_pointer = save_in;
635 return true;
636 }
637
638 /* Turn a string in input_line_pointer into a floating point constant
639 of type TYPE, and store the appropriate bytes in *LITP. The number
640 of LITTLENUMS emitted is stored in *SIZEP. An error message is
641 returned, or NULL on OK. */
642
643 const char *
644 md_atof (int type, char *litP, int *sizeP)
645 {
646 return ieee_md_atof (type, litP, sizeP, target_big_endian);
647 }
648
649 /* We handle all bad expressions here, so that we can report the faulty
650 instruction in the error message. */
651 void
652 md_operand (expressionS * exp)
653 {
654 if (in_aarch64_get_expression)
655 exp->X_op = O_illegal;
656 }
657
658 /* Immediate values. */
659
660 /* Errors may be set multiple times during parsing or bit encoding
661 (particularly in the Neon bits), but usually the earliest error which is set
662 will be the most meaningful. Avoid overwriting it with later (cascading)
663 errors by calling this function. */
664
665 static void
666 first_error (const char *error)
667 {
668 if (! error_p ())
669 set_syntax_error (error);
670 }
671
672 /* Similar to first_error, but this function accepts formatted error
673 message. */
674 static void
675 first_error_fmt (const char *format, ...)
676 {
677 va_list args;
678 enum
679 { size = 100 };
680 /* N.B. this single buffer will not cause error messages for different
681 instructions to pollute each other; this is because at the end of
682 processing of each assembly line, error message if any will be
683 collected by as_bad. */
684 static char buffer[size];
685
686 if (! error_p ())
687 {
688 int ret ATTRIBUTE_UNUSED;
689 va_start (args, format);
690 ret = vsnprintf (buffer, size, format, args);
691 know (ret <= size - 1 && ret >= 0);
692 va_end (args);
693 set_syntax_error (buffer);
694 }
695 }
696
697 /* Register parsing. */
698
699 /* Generic register parser which is called by other specialized
700 register parsers.
701 CCP points to what should be the beginning of a register name.
702 If it is indeed a valid register name, advance CCP over it and
703 return the reg_entry structure; otherwise return NULL.
704 It does not issue diagnostics. */
705
706 static reg_entry *
707 parse_reg (char **ccp)
708 {
709 char *start = *ccp;
710 char *p;
711 reg_entry *reg;
712
713 #ifdef REGISTER_PREFIX
714 if (*start != REGISTER_PREFIX)
715 return NULL;
716 start++;
717 #endif
718
719 p = start;
720 if (!ISALPHA (*p) || !is_name_beginner (*p))
721 return NULL;
722
723 do
724 p++;
725 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
726
727 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
728
729 if (!reg)
730 return NULL;
731
732 *ccp = p;
733 return reg;
734 }
735
736 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
737 return FALSE. */
738 static bool
739 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
740 {
741 return (reg_type_masks[type] & (1 << reg->type)) != 0;
742 }
743
744 /* Try to parse a base or offset register. Allow SVE base and offset
745 registers if REG_TYPE includes SVE registers. Return the register
746 entry on success, setting *QUALIFIER to the register qualifier.
747 Return null otherwise.
748
749 Note that this function does not issue any diagnostics. */
750
751 static const reg_entry *
752 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
753 aarch64_opnd_qualifier_t *qualifier)
754 {
755 char *str = *ccp;
756 const reg_entry *reg = parse_reg (&str);
757
758 if (reg == NULL)
759 return NULL;
760
761 switch (reg->type)
762 {
763 case REG_TYPE_R_32:
764 case REG_TYPE_SP_32:
765 case REG_TYPE_Z_32:
766 *qualifier = AARCH64_OPND_QLF_W;
767 break;
768
769 case REG_TYPE_R_64:
770 case REG_TYPE_SP_64:
771 case REG_TYPE_Z_64:
772 *qualifier = AARCH64_OPND_QLF_X;
773 break;
774
775 case REG_TYPE_ZN:
776 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
777 || str[0] != '.')
778 return NULL;
779 switch (TOLOWER (str[1]))
780 {
781 case 's':
782 *qualifier = AARCH64_OPND_QLF_S_S;
783 break;
784 case 'd':
785 *qualifier = AARCH64_OPND_QLF_S_D;
786 break;
787 default:
788 return NULL;
789 }
790 str += 2;
791 break;
792
793 default:
794 return NULL;
795 }
796
797 *ccp = str;
798
799 return reg;
800 }
801
802 /* Try to parse a base or offset register. Return the register entry
803 on success, setting *QUALIFIER to the register qualifier. Return null
804 otherwise.
805
806 Note that this function does not issue any diagnostics. */
807
808 static const reg_entry *
809 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
810 {
811 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
812 }
813
814 /* Parse the qualifier of a vector register or vector element of type
815 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
816 succeeds; otherwise return FALSE.
817
818 Accept only one occurrence of:
819 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
820 b h s d q */
821 static bool
822 parse_vector_type_for_operand (aarch64_reg_type reg_type,
823 struct vector_type_el *parsed_type, char **str)
824 {
825 char *ptr = *str;
826 unsigned width;
827 unsigned element_size;
828 enum vector_el_type type;
829
830 /* skip '.' */
831 gas_assert (*ptr == '.');
832 ptr++;
833
834 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
835 {
836 width = 0;
837 goto elt_size;
838 }
839 width = strtoul (ptr, &ptr, 10);
840 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
841 {
842 first_error_fmt (_("bad size %d in vector width specifier"), width);
843 return false;
844 }
845
846 elt_size:
847 switch (TOLOWER (*ptr))
848 {
849 case 'b':
850 type = NT_b;
851 element_size = 8;
852 break;
853 case 'h':
854 type = NT_h;
855 element_size = 16;
856 break;
857 case 's':
858 type = NT_s;
859 element_size = 32;
860 break;
861 case 'd':
862 type = NT_d;
863 element_size = 64;
864 break;
865 case 'q':
866 if (reg_type == REG_TYPE_ZN || width == 1)
867 {
868 type = NT_q;
869 element_size = 128;
870 break;
871 }
872 /* fall through. */
873 default:
874 if (*ptr != '\0')
875 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
876 else
877 first_error (_("missing element size"));
878 return false;
879 }
880 if (width != 0 && width * element_size != 64
881 && width * element_size != 128
882 && !(width == 2 && element_size == 16)
883 && !(width == 4 && element_size == 8))
884 {
885 first_error_fmt (_
886 ("invalid element size %d and vector size combination %c"),
887 width, *ptr);
888 return false;
889 }
890 ptr++;
891
892 parsed_type->type = type;
893 parsed_type->width = width;
894
895 *str = ptr;
896
897 return true;
898 }
899
900 /* *STR contains an SVE zero/merge predication suffix. Parse it into
901 *PARSED_TYPE and point *STR at the end of the suffix. */
902
903 static bool
904 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
905 {
906 char *ptr = *str;
907
908 /* Skip '/'. */
909 gas_assert (*ptr == '/');
910 ptr++;
911 switch (TOLOWER (*ptr))
912 {
913 case 'z':
914 parsed_type->type = NT_zero;
915 break;
916 case 'm':
917 parsed_type->type = NT_merge;
918 break;
919 default:
920 if (*ptr != '\0' && *ptr != ',')
921 first_error_fmt (_("unexpected character `%c' in predication type"),
922 *ptr);
923 else
924 first_error (_("missing predication type"));
925 return false;
926 }
927 parsed_type->width = 0;
928 *str = ptr + 1;
929 return true;
930 }
931
932 /* Parse a register of the type TYPE.
933
934 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
935 name or the parsed register is not of TYPE.
936
937 Otherwise return the register number, and optionally fill in the actual
938 type of the register in *RTYPE when multiple alternatives were given, and
939 return the register shape and element index information in *TYPEINFO.
940
941 IN_REG_LIST should be set with TRUE if the caller is parsing a register
942 list. */
943
944 static int
945 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
946 struct vector_type_el *typeinfo, bool in_reg_list)
947 {
948 char *str = *ccp;
949 const reg_entry *reg = parse_reg (&str);
950 struct vector_type_el atype;
951 struct vector_type_el parsetype;
952 bool is_typed_vecreg = false;
953
954 atype.defined = 0;
955 atype.type = NT_invtype;
956 atype.width = -1;
957 atype.index = 0;
958
959 if (reg == NULL)
960 {
961 if (typeinfo)
962 *typeinfo = atype;
963 set_default_error ();
964 return PARSE_FAIL;
965 }
966
967 if (! aarch64_check_reg_type (reg, type))
968 {
969 DEBUG_TRACE ("reg type check failed");
970 set_default_error ();
971 return PARSE_FAIL;
972 }
973 type = reg->type;
974
975 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
976 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
977 {
978 if (*str == '.')
979 {
980 if (!parse_vector_type_for_operand (type, &parsetype, &str))
981 return PARSE_FAIL;
982 }
983 else
984 {
985 if (!parse_predication_for_operand (&parsetype, &str))
986 return PARSE_FAIL;
987 }
988
989 /* Register if of the form Vn.[bhsdq]. */
990 is_typed_vecreg = true;
991
992 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
993 {
994 /* The width is always variable; we don't allow an integer width
995 to be specified. */
996 gas_assert (parsetype.width == 0);
997 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
998 }
999 else if (parsetype.width == 0)
1000 /* Expect index. In the new scheme we cannot have
1001 Vn.[bhsdq] represent a scalar. Therefore any
1002 Vn.[bhsdq] should have an index following it.
1003 Except in reglists of course. */
1004 atype.defined |= NTA_HASINDEX;
1005 else
1006 atype.defined |= NTA_HASTYPE;
1007
1008 atype.type = parsetype.type;
1009 atype.width = parsetype.width;
1010 }
1011
1012 if (skip_past_char (&str, '['))
1013 {
1014 expressionS exp;
1015
1016 /* Reject Sn[index] syntax. */
1017 if (!is_typed_vecreg)
1018 {
1019 first_error (_("this type of register can't be indexed"));
1020 return PARSE_FAIL;
1021 }
1022
1023 if (in_reg_list)
1024 {
1025 first_error (_("index not allowed inside register list"));
1026 return PARSE_FAIL;
1027 }
1028
1029 atype.defined |= NTA_HASINDEX;
1030
1031 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1032 NORMAL_RESOLUTION);
1033
1034 if (exp.X_op != O_constant)
1035 {
1036 first_error (_("constant expression required"));
1037 return PARSE_FAIL;
1038 }
1039
1040 if (! skip_past_char (&str, ']'))
1041 return PARSE_FAIL;
1042
1043 atype.index = exp.X_add_number;
1044 }
1045 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1046 {
1047 /* Indexed vector register expected. */
1048 first_error (_("indexed vector register expected"));
1049 return PARSE_FAIL;
1050 }
1051
1052 /* A vector reg Vn should be typed or indexed. */
1053 if (type == REG_TYPE_VN && atype.defined == 0)
1054 {
1055 first_error (_("invalid use of vector register"));
1056 }
1057
1058 if (typeinfo)
1059 *typeinfo = atype;
1060
1061 if (rtype)
1062 *rtype = type;
1063
1064 *ccp = str;
1065
1066 return reg->number;
1067 }
1068
1069 /* Parse register.
1070
1071 Return the register number on success; return PARSE_FAIL otherwise.
1072
1073 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1074 the register (e.g. NEON double or quad reg when either has been requested).
1075
1076 If this is a NEON vector register with additional type information, fill
1077 in the struct pointed to by VECTYPE (if non-NULL).
1078
1079 This parser does not handle register list. */
1080
1081 static int
1082 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1083 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1084 {
1085 struct vector_type_el atype;
1086 char *str = *ccp;
1087 int reg = parse_typed_reg (&str, type, rtype, &atype,
1088 /*in_reg_list= */ false);
1089
1090 if (reg == PARSE_FAIL)
1091 return PARSE_FAIL;
1092
1093 if (vectype)
1094 *vectype = atype;
1095
1096 *ccp = str;
1097
1098 return reg;
1099 }
1100
1101 static inline bool
1102 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1103 {
1104 return
1105 e1.type == e2.type
1106 && e1.defined == e2.defined
1107 && e1.width == e2.width && e1.index == e2.index;
1108 }
1109
1110 /* This function parses a list of vector registers of type TYPE.
1111 On success, it returns the parsed register list information in the
1112 following encoded format:
1113
1114 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1115 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1116
1117 The information of the register shape and/or index is returned in
1118 *VECTYPE.
1119
1120 It returns PARSE_FAIL if the register list is invalid.
1121
1122 The list contains one to four registers.
1123 Each register can be one of:
1124 <Vt>.<T>[<index>]
1125 <Vt>.<T>
1126 All <T> should be identical.
1127 All <index> should be identical.
1128 There are restrictions on <Vt> numbers which are checked later
1129 (by reg_list_valid_p). */
1130
1131 static int
1132 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1133 struct vector_type_el *vectype)
1134 {
1135 char *str = *ccp;
1136 int nb_regs;
1137 struct vector_type_el typeinfo, typeinfo_first;
1138 int val, val_range;
1139 int in_range;
1140 int ret_val;
1141 int i;
1142 bool error = false;
1143 bool expect_index = false;
1144
1145 if (*str != '{')
1146 {
1147 set_syntax_error (_("expecting {"));
1148 return PARSE_FAIL;
1149 }
1150 str++;
1151
1152 nb_regs = 0;
1153 typeinfo_first.defined = 0;
1154 typeinfo_first.type = NT_invtype;
1155 typeinfo_first.width = -1;
1156 typeinfo_first.index = 0;
1157 ret_val = 0;
1158 val = -1;
1159 val_range = -1;
1160 in_range = 0;
1161 do
1162 {
1163 if (in_range)
1164 {
1165 str++; /* skip over '-' */
1166 val_range = val;
1167 }
1168 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1169 /*in_reg_list= */ true);
1170 if (val == PARSE_FAIL)
1171 {
1172 set_first_syntax_error (_("invalid vector register in list"));
1173 error = true;
1174 continue;
1175 }
1176 /* reject [bhsd]n */
1177 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1178 {
1179 set_first_syntax_error (_("invalid scalar register in list"));
1180 error = true;
1181 continue;
1182 }
1183
1184 if (typeinfo.defined & NTA_HASINDEX)
1185 expect_index = true;
1186
1187 if (in_range)
1188 {
1189 if (val < val_range)
1190 {
1191 set_first_syntax_error
1192 (_("invalid range in vector register list"));
1193 error = true;
1194 }
1195 val_range++;
1196 }
1197 else
1198 {
1199 val_range = val;
1200 if (nb_regs == 0)
1201 typeinfo_first = typeinfo;
1202 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1203 {
1204 set_first_syntax_error
1205 (_("type mismatch in vector register list"));
1206 error = true;
1207 }
1208 }
1209 if (! error)
1210 for (i = val_range; i <= val; i++)
1211 {
1212 ret_val |= i << (5 * nb_regs);
1213 nb_regs++;
1214 }
1215 in_range = 0;
1216 }
1217 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1218
1219 skip_whitespace (str);
1220 if (*str != '}')
1221 {
1222 set_first_syntax_error (_("end of vector register list not found"));
1223 error = true;
1224 }
1225 str++;
1226
1227 skip_whitespace (str);
1228
1229 if (expect_index)
1230 {
1231 if (skip_past_char (&str, '['))
1232 {
1233 expressionS exp;
1234
1235 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1236 NORMAL_RESOLUTION);
1237 if (exp.X_op != O_constant)
1238 {
1239 set_first_syntax_error (_("constant expression required."));
1240 error = true;
1241 }
1242 if (! skip_past_char (&str, ']'))
1243 error = true;
1244 else
1245 typeinfo_first.index = exp.X_add_number;
1246 }
1247 else
1248 {
1249 set_first_syntax_error (_("expected index"));
1250 error = true;
1251 }
1252 }
1253
1254 if (nb_regs > 4)
1255 {
1256 set_first_syntax_error (_("too many registers in vector register list"));
1257 error = true;
1258 }
1259 else if (nb_regs == 0)
1260 {
1261 set_first_syntax_error (_("empty vector register list"));
1262 error = true;
1263 }
1264
1265 *ccp = str;
1266 if (! error)
1267 *vectype = typeinfo_first;
1268
1269 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1270 }
1271
1272 /* Directives: register aliases. */
1273
1274 static reg_entry *
1275 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1276 {
1277 reg_entry *new;
1278 const char *name;
1279
1280 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1281 {
1282 if (new->builtin)
1283 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1284 str);
1285
1286 /* Only warn about a redefinition if it's not defined as the
1287 same register. */
1288 else if (new->number != number || new->type != type)
1289 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1290
1291 return NULL;
1292 }
1293
1294 name = xstrdup (str);
1295 new = XNEW (reg_entry);
1296
1297 new->name = name;
1298 new->number = number;
1299 new->type = type;
1300 new->builtin = false;
1301
1302 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1303
1304 return new;
1305 }
1306
1307 /* Look for the .req directive. This is of the form:
1308
1309 new_register_name .req existing_register_name
1310
1311 If we find one, or if it looks sufficiently like one that we want to
1312 handle any error here, return TRUE. Otherwise return FALSE. */
1313
1314 static bool
1315 create_register_alias (char *newname, char *p)
1316 {
1317 const reg_entry *old;
1318 char *oldname, *nbuf;
1319 size_t nlen;
1320
1321 /* The input scrubber ensures that whitespace after the mnemonic is
1322 collapsed to single spaces. */
1323 oldname = p;
1324 if (!startswith (oldname, " .req "))
1325 return false;
1326
1327 oldname += 6;
1328 if (*oldname == '\0')
1329 return false;
1330
1331 old = str_hash_find (aarch64_reg_hsh, oldname);
1332 if (!old)
1333 {
1334 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1335 return true;
1336 }
1337
1338 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1339 the desired alias name, and p points to its end. If not, then
1340 the desired alias name is in the global original_case_string. */
1341 #ifdef TC_CASE_SENSITIVE
1342 nlen = p - newname;
1343 #else
1344 newname = original_case_string;
1345 nlen = strlen (newname);
1346 #endif
1347
1348 nbuf = xmemdup0 (newname, nlen);
1349
1350 /* Create aliases under the new name as stated; an all-lowercase
1351 version of the new name; and an all-uppercase version of the new
1352 name. */
1353 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1354 {
1355 for (p = nbuf; *p; p++)
1356 *p = TOUPPER (*p);
1357
1358 if (strncmp (nbuf, newname, nlen))
1359 {
1360 /* If this attempt to create an additional alias fails, do not bother
1361 trying to create the all-lower case alias. We will fail and issue
1362 a second, duplicate error message. This situation arises when the
1363 programmer does something like:
1364 foo .req r0
1365 Foo .req r1
1366 The second .req creates the "Foo" alias but then fails to create
1367 the artificial FOO alias because it has already been created by the
1368 first .req. */
1369 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1370 {
1371 free (nbuf);
1372 return true;
1373 }
1374 }
1375
1376 for (p = nbuf; *p; p++)
1377 *p = TOLOWER (*p);
1378
1379 if (strncmp (nbuf, newname, nlen))
1380 insert_reg_alias (nbuf, old->number, old->type);
1381 }
1382
1383 free (nbuf);
1384 return true;
1385 }
1386
1387 /* Should never be called, as .req goes between the alias and the
1388 register name, not at the beginning of the line. */
1389 static void
1390 s_req (int a ATTRIBUTE_UNUSED)
1391 {
1392 as_bad (_("invalid syntax for .req directive"));
1393 }
1394
1395 /* The .unreq directive deletes an alias which was previously defined
1396 by .req. For example:
1397
1398 my_alias .req r11
1399 .unreq my_alias */
1400
1401 static void
1402 s_unreq (int a ATTRIBUTE_UNUSED)
1403 {
1404 char *name;
1405 char saved_char;
1406
1407 name = input_line_pointer;
1408
1409 while (*input_line_pointer != 0
1410 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1411 ++input_line_pointer;
1412
1413 saved_char = *input_line_pointer;
1414 *input_line_pointer = 0;
1415
1416 if (!*name)
1417 as_bad (_("invalid syntax for .unreq directive"));
1418 else
1419 {
1420 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1421
1422 if (!reg)
1423 as_bad (_("unknown register alias '%s'"), name);
1424 else if (reg->builtin)
1425 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1426 name);
1427 else
1428 {
1429 char *p;
1430 char *nbuf;
1431
1432 str_hash_delete (aarch64_reg_hsh, name);
1433 free ((char *) reg->name);
1434 free (reg);
1435
1436 /* Also locate the all upper case and all lower case versions.
1437 Do not complain if we cannot find one or the other as it
1438 was probably deleted above. */
1439
1440 nbuf = strdup (name);
1441 for (p = nbuf; *p; p++)
1442 *p = TOUPPER (*p);
1443 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1444 if (reg)
1445 {
1446 str_hash_delete (aarch64_reg_hsh, nbuf);
1447 free ((char *) reg->name);
1448 free (reg);
1449 }
1450
1451 for (p = nbuf; *p; p++)
1452 *p = TOLOWER (*p);
1453 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1454 if (reg)
1455 {
1456 str_hash_delete (aarch64_reg_hsh, nbuf);
1457 free ((char *) reg->name);
1458 free (reg);
1459 }
1460
1461 free (nbuf);
1462 }
1463 }
1464
1465 *input_line_pointer = saved_char;
1466 demand_empty_rest_of_line ();
1467 }
1468
1469 /* Directives: Instruction set selection. */
1470
1471 #ifdef OBJ_ELF
1472 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1473 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1474 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1475 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1476
1477 /* Create a new mapping symbol for the transition to STATE. */
1478
1479 static void
1480 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1481 {
1482 symbolS *symbolP;
1483 const char *symname;
1484 int type;
1485
1486 switch (state)
1487 {
1488 case MAP_DATA:
1489 symname = "$d";
1490 type = BSF_NO_FLAGS;
1491 break;
1492 case MAP_INSN:
1493 symname = "$x";
1494 type = BSF_NO_FLAGS;
1495 break;
1496 default:
1497 abort ();
1498 }
1499
1500 symbolP = symbol_new (symname, now_seg, frag, value);
1501 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1502
1503 /* Save the mapping symbols for future reference. Also check that
1504 we do not place two mapping symbols at the same offset within a
1505 frag. We'll handle overlap between frags in
1506 check_mapping_symbols.
1507
1508 If .fill or other data filling directive generates zero sized data,
1509 the mapping symbol for the following code will have the same value
1510 as the one generated for the data filling directive. In this case,
1511 we replace the old symbol with the new one at the same address. */
1512 if (value == 0)
1513 {
1514 if (frag->tc_frag_data.first_map != NULL)
1515 {
1516 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1517 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1518 &symbol_lastP);
1519 }
1520 frag->tc_frag_data.first_map = symbolP;
1521 }
1522 if (frag->tc_frag_data.last_map != NULL)
1523 {
1524 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1525 S_GET_VALUE (symbolP));
1526 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1527 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1528 &symbol_lastP);
1529 }
1530 frag->tc_frag_data.last_map = symbolP;
1531 }
1532
1533 /* We must sometimes convert a region marked as code to data during
1534 code alignment, if an odd number of bytes have to be padded. The
1535 code mapping symbol is pushed to an aligned address. */
1536
1537 static void
1538 insert_data_mapping_symbol (enum mstate state,
1539 valueT value, fragS * frag, offsetT bytes)
1540 {
1541 /* If there was already a mapping symbol, remove it. */
1542 if (frag->tc_frag_data.last_map != NULL
1543 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1544 frag->fr_address + value)
1545 {
1546 symbolS *symp = frag->tc_frag_data.last_map;
1547
1548 if (value == 0)
1549 {
1550 know (frag->tc_frag_data.first_map == symp);
1551 frag->tc_frag_data.first_map = NULL;
1552 }
1553 frag->tc_frag_data.last_map = NULL;
1554 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1555 }
1556
1557 make_mapping_symbol (MAP_DATA, value, frag);
1558 make_mapping_symbol (state, value + bytes, frag);
1559 }
1560
1561 static void mapping_state_2 (enum mstate state, int max_chars);
1562
1563 /* Set the mapping state to STATE. Only call this when about to
1564 emit some STATE bytes to the file. */
1565
1566 void
1567 mapping_state (enum mstate state)
1568 {
1569 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1570
1571 if (state == MAP_INSN)
1572 /* AArch64 instructions require 4-byte alignment. When emitting
1573 instructions into any section, record the appropriate section
1574 alignment. */
1575 record_alignment (now_seg, 2);
1576
1577 if (mapstate == state)
1578 /* The mapping symbol has already been emitted.
1579 There is nothing else to do. */
1580 return;
1581
1582 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1583 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1584 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1585 evaluated later in the next else. */
1586 return;
1587 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1588 {
1589 /* Only add the symbol if the offset is > 0:
1590 if we're at the first frag, check it's size > 0;
1591 if we're not at the first frag, then for sure
1592 the offset is > 0. */
1593 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1594 const int add_symbol = (frag_now != frag_first)
1595 || (frag_now_fix () > 0);
1596
1597 if (add_symbol)
1598 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1599 }
1600 #undef TRANSITION
1601
1602 mapping_state_2 (state, 0);
1603 }
1604
1605 /* Same as mapping_state, but MAX_CHARS bytes have already been
1606 allocated. Put the mapping symbol that far back. */
1607
1608 static void
1609 mapping_state_2 (enum mstate state, int max_chars)
1610 {
1611 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1612
1613 if (!SEG_NORMAL (now_seg))
1614 return;
1615
1616 if (mapstate == state)
1617 /* The mapping symbol has already been emitted.
1618 There is nothing else to do. */
1619 return;
1620
1621 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1622 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1623 }
1624 #else
1625 #define mapping_state(x) /* nothing */
1626 #define mapping_state_2(x, y) /* nothing */
1627 #endif
1628
1629 /* Directives: sectioning and alignment. */
1630
1631 static void
1632 s_bss (int ignore ATTRIBUTE_UNUSED)
1633 {
1634 /* We don't support putting frags in the BSS segment, we fake it by
1635 marking in_bss, then looking at s_skip for clues. */
1636 subseg_set (bss_section, 0);
1637 demand_empty_rest_of_line ();
1638 mapping_state (MAP_DATA);
1639 }
1640
1641 static void
1642 s_even (int ignore ATTRIBUTE_UNUSED)
1643 {
1644 /* Never make frag if expect extra pass. */
1645 if (!need_pass_2)
1646 frag_align (1, 0, 0);
1647
1648 record_alignment (now_seg, 1);
1649
1650 demand_empty_rest_of_line ();
1651 }
1652
1653 /* Directives: Literal pools. */
1654
1655 static literal_pool *
1656 find_literal_pool (int size)
1657 {
1658 literal_pool *pool;
1659
1660 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1661 {
1662 if (pool->section == now_seg
1663 && pool->sub_section == now_subseg && pool->size == size)
1664 break;
1665 }
1666
1667 return pool;
1668 }
1669
1670 static literal_pool *
1671 find_or_make_literal_pool (int size)
1672 {
1673 /* Next literal pool ID number. */
1674 static unsigned int latest_pool_num = 1;
1675 literal_pool *pool;
1676
1677 pool = find_literal_pool (size);
1678
1679 if (pool == NULL)
1680 {
1681 /* Create a new pool. */
1682 pool = XNEW (literal_pool);
1683 if (!pool)
1684 return NULL;
1685
1686 /* Currently we always put the literal pool in the current text
1687 section. If we were generating "small" model code where we
1688 knew that all code and initialised data was within 1MB then
1689 we could output literals to mergeable, read-only data
1690 sections. */
1691
1692 pool->next_free_entry = 0;
1693 pool->section = now_seg;
1694 pool->sub_section = now_subseg;
1695 pool->size = size;
1696 pool->next = list_of_pools;
1697 pool->symbol = NULL;
1698
1699 /* Add it to the list. */
1700 list_of_pools = pool;
1701 }
1702
1703 /* New pools, and emptied pools, will have a NULL symbol. */
1704 if (pool->symbol == NULL)
1705 {
1706 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1707 &zero_address_frag, 0);
1708 pool->id = latest_pool_num++;
1709 }
1710
1711 /* Done. */
1712 return pool;
1713 }
1714
1715 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1716 Return TRUE on success, otherwise return FALSE. */
1717 static bool
1718 add_to_lit_pool (expressionS *exp, int size)
1719 {
1720 literal_pool *pool;
1721 unsigned int entry;
1722
1723 pool = find_or_make_literal_pool (size);
1724
1725 /* Check if this literal value is already in the pool. */
1726 for (entry = 0; entry < pool->next_free_entry; entry++)
1727 {
1728 expressionS * litexp = & pool->literals[entry].exp;
1729
1730 if ((litexp->X_op == exp->X_op)
1731 && (exp->X_op == O_constant)
1732 && (litexp->X_add_number == exp->X_add_number)
1733 && (litexp->X_unsigned == exp->X_unsigned))
1734 break;
1735
1736 if ((litexp->X_op == exp->X_op)
1737 && (exp->X_op == O_symbol)
1738 && (litexp->X_add_number == exp->X_add_number)
1739 && (litexp->X_add_symbol == exp->X_add_symbol)
1740 && (litexp->X_op_symbol == exp->X_op_symbol))
1741 break;
1742 }
1743
1744 /* Do we need to create a new entry? */
1745 if (entry == pool->next_free_entry)
1746 {
1747 if (entry >= MAX_LITERAL_POOL_SIZE)
1748 {
1749 set_syntax_error (_("literal pool overflow"));
1750 return false;
1751 }
1752
1753 pool->literals[entry].exp = *exp;
1754 pool->next_free_entry += 1;
1755 if (exp->X_op == O_big)
1756 {
1757 /* PR 16688: Bignums are held in a single global array. We must
1758 copy and preserve that value now, before it is overwritten. */
1759 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1760 exp->X_add_number);
1761 memcpy (pool->literals[entry].bignum, generic_bignum,
1762 CHARS_PER_LITTLENUM * exp->X_add_number);
1763 }
1764 else
1765 pool->literals[entry].bignum = NULL;
1766 }
1767
1768 exp->X_op = O_symbol;
1769 exp->X_add_number = ((int) entry) * size;
1770 exp->X_add_symbol = pool->symbol;
1771
1772 return true;
1773 }
1774
1775 /* Can't use symbol_new here, so have to create a symbol and then at
1776 a later date assign it a value. That's what these functions do. */
1777
1778 static void
1779 symbol_locate (symbolS * symbolP,
1780 const char *name,/* It is copied, the caller can modify. */
1781 segT segment, /* Segment identifier (SEG_<something>). */
1782 valueT valu, /* Symbol value. */
1783 fragS * frag) /* Associated fragment. */
1784 {
1785 size_t name_length;
1786 char *preserved_copy_of_name;
1787
1788 name_length = strlen (name) + 1; /* +1 for \0. */
1789 obstack_grow (&notes, name, name_length);
1790 preserved_copy_of_name = obstack_finish (&notes);
1791
1792 #ifdef tc_canonicalize_symbol_name
1793 preserved_copy_of_name =
1794 tc_canonicalize_symbol_name (preserved_copy_of_name);
1795 #endif
1796
1797 S_SET_NAME (symbolP, preserved_copy_of_name);
1798
1799 S_SET_SEGMENT (symbolP, segment);
1800 S_SET_VALUE (symbolP, valu);
1801 symbol_clear_list_pointers (symbolP);
1802
1803 symbol_set_frag (symbolP, frag);
1804
1805 /* Link to end of symbol chain. */
1806 {
1807 extern int symbol_table_frozen;
1808
1809 if (symbol_table_frozen)
1810 abort ();
1811 }
1812
1813 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1814
1815 obj_symbol_new_hook (symbolP);
1816
1817 #ifdef tc_symbol_new_hook
1818 tc_symbol_new_hook (symbolP);
1819 #endif
1820
1821 #ifdef DEBUG_SYMS
1822 verify_symbol_chain (symbol_rootP, symbol_lastP);
1823 #endif /* DEBUG_SYMS */
1824 }
1825
1826
1827 static void
1828 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1829 {
1830 unsigned int entry;
1831 literal_pool *pool;
1832 char sym_name[20];
1833 int align;
1834
1835 for (align = 2; align <= 4; align++)
1836 {
1837 int size = 1 << align;
1838
1839 pool = find_literal_pool (size);
1840 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1841 continue;
1842
1843 /* Align pool as you have word accesses.
1844 Only make a frag if we have to. */
1845 if (!need_pass_2)
1846 frag_align (align, 0, 0);
1847
1848 mapping_state (MAP_DATA);
1849
1850 record_alignment (now_seg, align);
1851
1852 sprintf (sym_name, "$$lit_\002%x", pool->id);
1853
1854 symbol_locate (pool->symbol, sym_name, now_seg,
1855 (valueT) frag_now_fix (), frag_now);
1856 symbol_table_insert (pool->symbol);
1857
1858 for (entry = 0; entry < pool->next_free_entry; entry++)
1859 {
1860 expressionS * exp = & pool->literals[entry].exp;
1861
1862 if (exp->X_op == O_big)
1863 {
1864 /* PR 16688: Restore the global bignum value. */
1865 gas_assert (pool->literals[entry].bignum != NULL);
1866 memcpy (generic_bignum, pool->literals[entry].bignum,
1867 CHARS_PER_LITTLENUM * exp->X_add_number);
1868 }
1869
1870 /* First output the expression in the instruction to the pool. */
1871 emit_expr (exp, size); /* .word|.xword */
1872
1873 if (exp->X_op == O_big)
1874 {
1875 free (pool->literals[entry].bignum);
1876 pool->literals[entry].bignum = NULL;
1877 }
1878 }
1879
1880 /* Mark the pool as empty. */
1881 pool->next_free_entry = 0;
1882 pool->symbol = NULL;
1883 }
1884 }
1885
1886 #ifdef OBJ_ELF
1887 /* Forward declarations for functions below, in the MD interface
1888 section. */
1889 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1890 static struct reloc_table_entry * find_reloc_table_entry (char **);
1891
1892 /* Directives: Data. */
1893 /* N.B. the support for relocation suffix in this directive needs to be
1894 implemented properly. */
1895
1896 static void
1897 s_aarch64_elf_cons (int nbytes)
1898 {
1899 expressionS exp;
1900
1901 #ifdef md_flush_pending_output
1902 md_flush_pending_output ();
1903 #endif
1904
1905 if (is_it_end_of_statement ())
1906 {
1907 demand_empty_rest_of_line ();
1908 return;
1909 }
1910
1911 #ifdef md_cons_align
1912 md_cons_align (nbytes);
1913 #endif
1914
1915 mapping_state (MAP_DATA);
1916 do
1917 {
1918 struct reloc_table_entry *reloc;
1919
1920 expression (&exp);
1921
1922 if (exp.X_op != O_symbol)
1923 emit_expr (&exp, (unsigned int) nbytes);
1924 else
1925 {
1926 skip_past_char (&input_line_pointer, '#');
1927 if (skip_past_char (&input_line_pointer, ':'))
1928 {
1929 reloc = find_reloc_table_entry (&input_line_pointer);
1930 if (reloc == NULL)
1931 as_bad (_("unrecognized relocation suffix"));
1932 else
1933 as_bad (_("unimplemented relocation suffix"));
1934 ignore_rest_of_line ();
1935 return;
1936 }
1937 else
1938 emit_expr (&exp, (unsigned int) nbytes);
1939 }
1940 }
1941 while (*input_line_pointer++ == ',');
1942
1943 /* Put terminator back into stream. */
1944 input_line_pointer--;
1945 demand_empty_rest_of_line ();
1946 }
1947
1948 /* Mark symbol that it follows a variant PCS convention. */
1949
1950 static void
1951 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1952 {
1953 char *name;
1954 char c;
1955 symbolS *sym;
1956 asymbol *bfdsym;
1957 elf_symbol_type *elfsym;
1958
1959 c = get_symbol_name (&name);
1960 if (!*name)
1961 as_bad (_("Missing symbol name in directive"));
1962 sym = symbol_find_or_make (name);
1963 restore_line_pointer (c);
1964 demand_empty_rest_of_line ();
1965 bfdsym = symbol_get_bfdsym (sym);
1966 elfsym = elf_symbol_from (bfdsym);
1967 gas_assert (elfsym);
1968 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1969 }
1970 #endif /* OBJ_ELF */
1971
1972 /* Output a 32-bit word, but mark as an instruction. */
1973
1974 static void
1975 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1976 {
1977 expressionS exp;
1978
1979 #ifdef md_flush_pending_output
1980 md_flush_pending_output ();
1981 #endif
1982
1983 if (is_it_end_of_statement ())
1984 {
1985 demand_empty_rest_of_line ();
1986 return;
1987 }
1988
1989 /* Sections are assumed to start aligned. In executable section, there is no
1990 MAP_DATA symbol pending. So we only align the address during
1991 MAP_DATA --> MAP_INSN transition.
1992 For other sections, this is not guaranteed. */
1993 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1994 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1995 frag_align_code (2, 0);
1996
1997 #ifdef OBJ_ELF
1998 mapping_state (MAP_INSN);
1999 #endif
2000
2001 do
2002 {
2003 expression (&exp);
2004 if (exp.X_op != O_constant)
2005 {
2006 as_bad (_("constant expression required"));
2007 ignore_rest_of_line ();
2008 return;
2009 }
2010
2011 if (target_big_endian)
2012 {
2013 unsigned int val = exp.X_add_number;
2014 exp.X_add_number = SWAP_32 (val);
2015 }
2016 emit_expr (&exp, 4);
2017 }
2018 while (*input_line_pointer++ == ',');
2019
2020 /* Put terminator back into stream. */
2021 input_line_pointer--;
2022 demand_empty_rest_of_line ();
2023 }
2024
2025 static void
2026 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2027 {
2028 demand_empty_rest_of_line ();
2029 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2030 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2031 }
2032
2033 #ifdef OBJ_ELF
2034 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2035
2036 static void
2037 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2038 {
2039 expressionS exp;
2040
2041 expression (&exp);
2042 frag_grow (4);
2043 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2044 BFD_RELOC_AARCH64_TLSDESC_ADD);
2045
2046 demand_empty_rest_of_line ();
2047 }
2048
2049 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2050
2051 static void
2052 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2053 {
2054 expressionS exp;
2055
2056 /* Since we're just labelling the code, there's no need to define a
2057 mapping symbol. */
2058 expression (&exp);
2059 /* Make sure there is enough room in this frag for the following
2060 blr. This trick only works if the blr follows immediately after
2061 the .tlsdesc directive. */
2062 frag_grow (4);
2063 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2064 BFD_RELOC_AARCH64_TLSDESC_CALL);
2065
2066 demand_empty_rest_of_line ();
2067 }
2068
2069 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2070
2071 static void
2072 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2073 {
2074 expressionS exp;
2075
2076 expression (&exp);
2077 frag_grow (4);
2078 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2079 BFD_RELOC_AARCH64_TLSDESC_LDR);
2080
2081 demand_empty_rest_of_line ();
2082 }
2083 #endif /* OBJ_ELF */
2084
2085 static void s_aarch64_arch (int);
2086 static void s_aarch64_cpu (int);
2087 static void s_aarch64_arch_extension (int);
2088
2089 /* This table describes all the machine specific pseudo-ops the assembler
2090 has to support. The fields are:
2091 pseudo-op name without dot
2092 function to call to execute this pseudo-op
2093 Integer arg to pass to the function. */
2094
2095 const pseudo_typeS md_pseudo_table[] = {
2096 /* Never called because '.req' does not start a line. */
2097 {"req", s_req, 0},
2098 {"unreq", s_unreq, 0},
2099 {"bss", s_bss, 0},
2100 {"even", s_even, 0},
2101 {"ltorg", s_ltorg, 0},
2102 {"pool", s_ltorg, 0},
2103 {"cpu", s_aarch64_cpu, 0},
2104 {"arch", s_aarch64_arch, 0},
2105 {"arch_extension", s_aarch64_arch_extension, 0},
2106 {"inst", s_aarch64_inst, 0},
2107 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2108 #ifdef OBJ_ELF
2109 {"tlsdescadd", s_tlsdescadd, 0},
2110 {"tlsdesccall", s_tlsdesccall, 0},
2111 {"tlsdescldr", s_tlsdescldr, 0},
2112 {"word", s_aarch64_elf_cons, 4},
2113 {"long", s_aarch64_elf_cons, 4},
2114 {"xword", s_aarch64_elf_cons, 8},
2115 {"dword", s_aarch64_elf_cons, 8},
2116 {"variant_pcs", s_variant_pcs, 0},
2117 #endif
2118 {"float16", float_cons, 'h'},
2119 {"bfloat16", float_cons, 'b'},
2120 {0, 0, 0}
2121 };
2122 \f
2123
2124 /* Check whether STR points to a register name followed by a comma or the
2125 end of line; REG_TYPE indicates which register types are checked
2126 against. Return TRUE if STR is such a register name; otherwise return
2127 FALSE. The function does not intend to produce any diagnostics, but since
2128 the register parser aarch64_reg_parse, which is called by this function,
2129 does produce diagnostics, we call clear_error to clear any diagnostics
2130 that may be generated by aarch64_reg_parse.
2131 Also, the function returns FALSE directly if there is any user error
2132 present at the function entry. This prevents the existing diagnostics
2133 state from being spoiled.
2134 The function currently serves parse_constant_immediate and
2135 parse_big_immediate only. */
2136 static bool
2137 reg_name_p (char *str, aarch64_reg_type reg_type)
2138 {
2139 int reg;
2140
2141 /* Prevent the diagnostics state from being spoiled. */
2142 if (error_p ())
2143 return false;
2144
2145 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2146
2147 /* Clear the parsing error that may be set by the reg parser. */
2148 clear_error ();
2149
2150 if (reg == PARSE_FAIL)
2151 return false;
2152
2153 skip_whitespace (str);
2154 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2155 return true;
2156
2157 return false;
2158 }
2159
2160 /* Parser functions used exclusively in instruction operands. */
2161
2162 /* Parse an immediate expression which may not be constant.
2163
2164 To prevent the expression parser from pushing a register name
2165 into the symbol table as an undefined symbol, firstly a check is
2166 done to find out whether STR is a register of type REG_TYPE followed
2167 by a comma or the end of line. Return FALSE if STR is such a string. */
2168
2169 static bool
2170 parse_immediate_expression (char **str, expressionS *exp,
2171 aarch64_reg_type reg_type)
2172 {
2173 if (reg_name_p (*str, reg_type))
2174 {
2175 set_recoverable_error (_("immediate operand required"));
2176 return false;
2177 }
2178
2179 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2180 NORMAL_RESOLUTION);
2181
2182 if (exp->X_op == O_absent)
2183 {
2184 set_fatal_syntax_error (_("missing immediate expression"));
2185 return false;
2186 }
2187
2188 return true;
2189 }
2190
2191 /* Constant immediate-value read function for use in insn parsing.
2192 STR points to the beginning of the immediate (with the optional
2193 leading #); *VAL receives the value. REG_TYPE says which register
2194 names should be treated as registers rather than as symbolic immediates.
2195
2196 Return TRUE on success; otherwise return FALSE. */
2197
2198 static bool
2199 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2200 {
2201 expressionS exp;
2202
2203 if (! parse_immediate_expression (str, &exp, reg_type))
2204 return false;
2205
2206 if (exp.X_op != O_constant)
2207 {
2208 set_syntax_error (_("constant expression required"));
2209 return false;
2210 }
2211
2212 *val = exp.X_add_number;
2213 return true;
2214 }
2215
2216 static uint32_t
2217 encode_imm_float_bits (uint32_t imm)
2218 {
2219 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2220 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2221 }
2222
2223 /* Return TRUE if the single-precision floating-point value encoded in IMM
2224 can be expressed in the AArch64 8-bit signed floating-point format with
2225 3-bit exponent and normalized 4 bits of precision; in other words, the
2226 floating-point value must be expressable as
2227 (+/-) n / 16 * power (2, r)
2228 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2229
2230 static bool
2231 aarch64_imm_float_p (uint32_t imm)
2232 {
2233 /* If a single-precision floating-point value has the following bit
2234 pattern, it can be expressed in the AArch64 8-bit floating-point
2235 format:
2236
2237 3 32222222 2221111111111
2238 1 09876543 21098765432109876543210
2239 n Eeeeeexx xxxx0000000000000000000
2240
2241 where n, e and each x are either 0 or 1 independently, with
2242 E == ~ e. */
2243
2244 uint32_t pattern;
2245
2246 /* Prepare the pattern for 'Eeeeee'. */
2247 if (((imm >> 30) & 0x1) == 0)
2248 pattern = 0x3e000000;
2249 else
2250 pattern = 0x40000000;
2251
2252 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2253 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2254 }
2255
2256 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2257 as an IEEE float without any loss of precision. Store the value in
2258 *FPWORD if so. */
2259
2260 static bool
2261 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2262 {
2263 /* If a double-precision floating-point value has the following bit
2264 pattern, it can be expressed in a float:
2265
2266 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2267 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2268 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2269
2270 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2271 if Eeee_eeee != 1111_1111
2272
2273 where n, e, s and S are either 0 or 1 independently and where ~ is the
2274 inverse of E. */
2275
2276 uint32_t pattern;
2277 uint32_t high32 = imm >> 32;
2278 uint32_t low32 = imm;
2279
2280 /* Lower 29 bits need to be 0s. */
2281 if ((imm & 0x1fffffff) != 0)
2282 return false;
2283
2284 /* Prepare the pattern for 'Eeeeeeeee'. */
2285 if (((high32 >> 30) & 0x1) == 0)
2286 pattern = 0x38000000;
2287 else
2288 pattern = 0x40000000;
2289
2290 /* Check E~~~. */
2291 if ((high32 & 0x78000000) != pattern)
2292 return false;
2293
2294 /* Check Eeee_eeee != 1111_1111. */
2295 if ((high32 & 0x7ff00000) == 0x47f00000)
2296 return false;
2297
2298 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2299 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2300 | (low32 >> 29)); /* 3 S bits. */
2301 return true;
2302 }
2303
2304 /* Return true if we should treat OPERAND as a double-precision
2305 floating-point operand rather than a single-precision one. */
2306 static bool
2307 double_precision_operand_p (const aarch64_opnd_info *operand)
2308 {
2309 /* Check for unsuffixed SVE registers, which are allowed
2310 for LDR and STR but not in instructions that require an
2311 immediate. We get better error messages if we arbitrarily
2312 pick one size, parse the immediate normally, and then
2313 report the match failure in the normal way. */
2314 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2315 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2316 }
2317
2318 /* Parse a floating-point immediate. Return TRUE on success and return the
2319 value in *IMMED in the format of IEEE754 single-precision encoding.
2320 *CCP points to the start of the string; DP_P is TRUE when the immediate
2321 is expected to be in double-precision (N.B. this only matters when
2322 hexadecimal representation is involved). REG_TYPE says which register
2323 names should be treated as registers rather than as symbolic immediates.
2324
2325 This routine accepts any IEEE float; it is up to the callers to reject
2326 invalid ones. */
2327
2328 static bool
2329 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2330 aarch64_reg_type reg_type)
2331 {
2332 char *str = *ccp;
2333 char *fpnum;
2334 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2335 int64_t val = 0;
2336 unsigned fpword = 0;
2337 bool hex_p = false;
2338
2339 skip_past_char (&str, '#');
2340
2341 fpnum = str;
2342 skip_whitespace (fpnum);
2343
2344 if (startswith (fpnum, "0x"))
2345 {
2346 /* Support the hexadecimal representation of the IEEE754 encoding.
2347 Double-precision is expected when DP_P is TRUE, otherwise the
2348 representation should be in single-precision. */
2349 if (! parse_constant_immediate (&str, &val, reg_type))
2350 goto invalid_fp;
2351
2352 if (dp_p)
2353 {
2354 if (!can_convert_double_to_float (val, &fpword))
2355 goto invalid_fp;
2356 }
2357 else if ((uint64_t) val > 0xffffffff)
2358 goto invalid_fp;
2359 else
2360 fpword = val;
2361
2362 hex_p = true;
2363 }
2364 else if (reg_name_p (str, reg_type))
2365 {
2366 set_recoverable_error (_("immediate operand required"));
2367 return false;
2368 }
2369
2370 if (! hex_p)
2371 {
2372 int i;
2373
2374 if ((str = atof_ieee (str, 's', words)) == NULL)
2375 goto invalid_fp;
2376
2377 /* Our FP word must be 32 bits (single-precision FP). */
2378 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2379 {
2380 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2381 fpword |= words[i];
2382 }
2383 }
2384
2385 *immed = fpword;
2386 *ccp = str;
2387 return true;
2388
2389 invalid_fp:
2390 set_fatal_syntax_error (_("invalid floating-point constant"));
2391 return false;
2392 }
2393
2394 /* Less-generic immediate-value read function with the possibility of loading
2395 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2396 instructions.
2397
2398 To prevent the expression parser from pushing a register name into the
2399 symbol table as an undefined symbol, a check is firstly done to find
2400 out whether STR is a register of type REG_TYPE followed by a comma or
2401 the end of line. Return FALSE if STR is such a register. */
2402
2403 static bool
2404 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2405 {
2406 char *ptr = *str;
2407
2408 if (reg_name_p (ptr, reg_type))
2409 {
2410 set_syntax_error (_("immediate operand required"));
2411 return false;
2412 }
2413
2414 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2415 NORMAL_RESOLUTION);
2416
2417 if (inst.reloc.exp.X_op == O_constant)
2418 *imm = inst.reloc.exp.X_add_number;
2419
2420 *str = ptr;
2421
2422 return true;
2423 }
2424
2425 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2426 if NEED_LIBOPCODES is non-zero, the fixup will need
2427 assistance from the libopcodes. */
2428
2429 static inline void
2430 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2431 const aarch64_opnd_info *operand,
2432 int need_libopcodes_p)
2433 {
2434 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2435 reloc->opnd = operand->type;
2436 if (need_libopcodes_p)
2437 reloc->need_libopcodes_p = 1;
2438 };
2439
2440 /* Return TRUE if the instruction needs to be fixed up later internally by
2441 the GAS; otherwise return FALSE. */
2442
2443 static inline bool
2444 aarch64_gas_internal_fixup_p (void)
2445 {
2446 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2447 }
2448
2449 /* Assign the immediate value to the relevant field in *OPERAND if
2450 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2451 needs an internal fixup in a later stage.
2452 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2453 IMM.VALUE that may get assigned with the constant. */
2454 static inline void
2455 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2456 aarch64_opnd_info *operand,
2457 int addr_off_p,
2458 int need_libopcodes_p,
2459 int skip_p)
2460 {
2461 if (reloc->exp.X_op == O_constant)
2462 {
2463 if (addr_off_p)
2464 operand->addr.offset.imm = reloc->exp.X_add_number;
2465 else
2466 operand->imm.value = reloc->exp.X_add_number;
2467 reloc->type = BFD_RELOC_UNUSED;
2468 }
2469 else
2470 {
2471 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2472 /* Tell libopcodes to ignore this operand or not. This is helpful
2473 when one of the operands needs to be fixed up later but we need
2474 libopcodes to check the other operands. */
2475 operand->skip = skip_p;
2476 }
2477 }
2478
2479 /* Relocation modifiers. Each entry in the table contains the textual
2480 name for the relocation which may be placed before a symbol used as
2481 a load/store offset, or add immediate. It must be surrounded by a
2482 leading and trailing colon, for example:
2483
2484 ldr x0, [x1, #:rello:varsym]
2485 add x0, x1, #:rello:varsym */
2486
2487 struct reloc_table_entry
2488 {
2489 const char *name;
2490 int pc_rel;
2491 bfd_reloc_code_real_type adr_type;
2492 bfd_reloc_code_real_type adrp_type;
2493 bfd_reloc_code_real_type movw_type;
2494 bfd_reloc_code_real_type add_type;
2495 bfd_reloc_code_real_type ldst_type;
2496 bfd_reloc_code_real_type ld_literal_type;
2497 };
2498
2499 static struct reloc_table_entry reloc_table[] =
2500 {
2501 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2502 {"lo12", 0,
2503 0, /* adr_type */
2504 0,
2505 0,
2506 BFD_RELOC_AARCH64_ADD_LO12,
2507 BFD_RELOC_AARCH64_LDST_LO12,
2508 0},
2509
2510 /* Higher 21 bits of pc-relative page offset: ADRP */
2511 {"pg_hi21", 1,
2512 0, /* adr_type */
2513 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2514 0,
2515 0,
2516 0,
2517 0},
2518
2519 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2520 {"pg_hi21_nc", 1,
2521 0, /* adr_type */
2522 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2523 0,
2524 0,
2525 0,
2526 0},
2527
2528 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2529 {"abs_g0", 0,
2530 0, /* adr_type */
2531 0,
2532 BFD_RELOC_AARCH64_MOVW_G0,
2533 0,
2534 0,
2535 0},
2536
2537 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2538 {"abs_g0_s", 0,
2539 0, /* adr_type */
2540 0,
2541 BFD_RELOC_AARCH64_MOVW_G0_S,
2542 0,
2543 0,
2544 0},
2545
2546 /* Less significant bits 0-15 of address/value: MOVK, no check */
2547 {"abs_g0_nc", 0,
2548 0, /* adr_type */
2549 0,
2550 BFD_RELOC_AARCH64_MOVW_G0_NC,
2551 0,
2552 0,
2553 0},
2554
2555 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2556 {"abs_g1", 0,
2557 0, /* adr_type */
2558 0,
2559 BFD_RELOC_AARCH64_MOVW_G1,
2560 0,
2561 0,
2562 0},
2563
2564 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2565 {"abs_g1_s", 0,
2566 0, /* adr_type */
2567 0,
2568 BFD_RELOC_AARCH64_MOVW_G1_S,
2569 0,
2570 0,
2571 0},
2572
2573 /* Less significant bits 16-31 of address/value: MOVK, no check */
2574 {"abs_g1_nc", 0,
2575 0, /* adr_type */
2576 0,
2577 BFD_RELOC_AARCH64_MOVW_G1_NC,
2578 0,
2579 0,
2580 0},
2581
2582 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2583 {"abs_g2", 0,
2584 0, /* adr_type */
2585 0,
2586 BFD_RELOC_AARCH64_MOVW_G2,
2587 0,
2588 0,
2589 0},
2590
2591 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2592 {"abs_g2_s", 0,
2593 0, /* adr_type */
2594 0,
2595 BFD_RELOC_AARCH64_MOVW_G2_S,
2596 0,
2597 0,
2598 0},
2599
2600 /* Less significant bits 32-47 of address/value: MOVK, no check */
2601 {"abs_g2_nc", 0,
2602 0, /* adr_type */
2603 0,
2604 BFD_RELOC_AARCH64_MOVW_G2_NC,
2605 0,
2606 0,
2607 0},
2608
2609 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2610 {"abs_g3", 0,
2611 0, /* adr_type */
2612 0,
2613 BFD_RELOC_AARCH64_MOVW_G3,
2614 0,
2615 0,
2616 0},
2617
2618 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2619 {"prel_g0", 1,
2620 0, /* adr_type */
2621 0,
2622 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2623 0,
2624 0,
2625 0},
2626
2627 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2628 {"prel_g0_nc", 1,
2629 0, /* adr_type */
2630 0,
2631 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2632 0,
2633 0,
2634 0},
2635
2636 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2637 {"prel_g1", 1,
2638 0, /* adr_type */
2639 0,
2640 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2641 0,
2642 0,
2643 0},
2644
2645 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2646 {"prel_g1_nc", 1,
2647 0, /* adr_type */
2648 0,
2649 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2650 0,
2651 0,
2652 0},
2653
2654 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2655 {"prel_g2", 1,
2656 0, /* adr_type */
2657 0,
2658 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2659 0,
2660 0,
2661 0},
2662
2663 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2664 {"prel_g2_nc", 1,
2665 0, /* adr_type */
2666 0,
2667 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2668 0,
2669 0,
2670 0},
2671
2672 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2673 {"prel_g3", 1,
2674 0, /* adr_type */
2675 0,
2676 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2677 0,
2678 0,
2679 0},
2680
2681 /* Get to the page containing GOT entry for a symbol. */
2682 {"got", 1,
2683 0, /* adr_type */
2684 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2685 0,
2686 0,
2687 0,
2688 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2689
2690 /* 12 bit offset into the page containing GOT entry for that symbol. */
2691 {"got_lo12", 0,
2692 0, /* adr_type */
2693 0,
2694 0,
2695 0,
2696 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2697 0},
2698
2699 /* 0-15 bits of address/value: MOVk, no check. */
2700 {"gotoff_g0_nc", 0,
2701 0, /* adr_type */
2702 0,
2703 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2704 0,
2705 0,
2706 0},
2707
2708 /* Most significant bits 16-31 of address/value: MOVZ. */
2709 {"gotoff_g1", 0,
2710 0, /* adr_type */
2711 0,
2712 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2713 0,
2714 0,
2715 0},
2716
2717 /* 15 bit offset into the page containing GOT entry for that symbol. */
2718 {"gotoff_lo15", 0,
2719 0, /* adr_type */
2720 0,
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2724 0},
2725
2726 /* Get to the page containing GOT TLS entry for a symbol */
2727 {"gottprel_g0_nc", 0,
2728 0, /* adr_type */
2729 0,
2730 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2731 0,
2732 0,
2733 0},
2734
2735 /* Get to the page containing GOT TLS entry for a symbol */
2736 {"gottprel_g1", 0,
2737 0, /* adr_type */
2738 0,
2739 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2740 0,
2741 0,
2742 0},
2743
2744 /* Get to the page containing GOT TLS entry for a symbol */
2745 {"tlsgd", 0,
2746 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2747 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2748 0,
2749 0,
2750 0,
2751 0},
2752
2753 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2754 {"tlsgd_lo12", 0,
2755 0, /* adr_type */
2756 0,
2757 0,
2758 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2759 0,
2760 0},
2761
2762 /* Lower 16 bits address/value: MOVk. */
2763 {"tlsgd_g0_nc", 0,
2764 0, /* adr_type */
2765 0,
2766 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2767 0,
2768 0,
2769 0},
2770
2771 /* Most significant bits 16-31 of address/value: MOVZ. */
2772 {"tlsgd_g1", 0,
2773 0, /* adr_type */
2774 0,
2775 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2776 0,
2777 0,
2778 0},
2779
2780 /* Get to the page containing GOT TLS entry for a symbol */
2781 {"tlsdesc", 0,
2782 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2783 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2784 0,
2785 0,
2786 0,
2787 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2788
2789 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2790 {"tlsdesc_lo12", 0,
2791 0, /* adr_type */
2792 0,
2793 0,
2794 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2795 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2796 0},
2797
2798 /* Get to the page containing GOT TLS entry for a symbol.
2799 The same as GD, we allocate two consecutive GOT slots
2800 for module index and module offset, the only difference
2801 with GD is the module offset should be initialized to
2802 zero without any outstanding runtime relocation. */
2803 {"tlsldm", 0,
2804 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2805 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2806 0,
2807 0,
2808 0,
2809 0},
2810
2811 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2812 {"tlsldm_lo12_nc", 0,
2813 0, /* adr_type */
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2817 0,
2818 0},
2819
2820 /* 12 bit offset into the module TLS base address. */
2821 {"dtprel_lo12", 0,
2822 0, /* adr_type */
2823 0,
2824 0,
2825 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2826 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2827 0},
2828
2829 /* Same as dtprel_lo12, no overflow check. */
2830 {"dtprel_lo12_nc", 0,
2831 0, /* adr_type */
2832 0,
2833 0,
2834 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2835 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2836 0},
2837
2838 /* bits[23:12] of offset to the module TLS base address. */
2839 {"dtprel_hi12", 0,
2840 0, /* adr_type */
2841 0,
2842 0,
2843 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2844 0,
2845 0},
2846
2847 /* bits[15:0] of offset to the module TLS base address. */
2848 {"dtprel_g0", 0,
2849 0, /* adr_type */
2850 0,
2851 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2852 0,
2853 0,
2854 0},
2855
2856 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2857 {"dtprel_g0_nc", 0,
2858 0, /* adr_type */
2859 0,
2860 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2861 0,
2862 0,
2863 0},
2864
2865 /* bits[31:16] of offset to the module TLS base address. */
2866 {"dtprel_g1", 0,
2867 0, /* adr_type */
2868 0,
2869 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2870 0,
2871 0,
2872 0},
2873
2874 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2875 {"dtprel_g1_nc", 0,
2876 0, /* adr_type */
2877 0,
2878 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2879 0,
2880 0,
2881 0},
2882
2883 /* bits[47:32] of offset to the module TLS base address. */
2884 {"dtprel_g2", 0,
2885 0, /* adr_type */
2886 0,
2887 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2888 0,
2889 0,
2890 0},
2891
2892 /* Lower 16 bit offset into GOT entry for a symbol */
2893 {"tlsdesc_off_g0_nc", 0,
2894 0, /* adr_type */
2895 0,
2896 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2897 0,
2898 0,
2899 0},
2900
2901 /* Higher 16 bit offset into GOT entry for a symbol */
2902 {"tlsdesc_off_g1", 0,
2903 0, /* adr_type */
2904 0,
2905 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2906 0,
2907 0,
2908 0},
2909
2910 /* Get to the page containing GOT TLS entry for a symbol */
2911 {"gottprel", 0,
2912 0, /* adr_type */
2913 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2914 0,
2915 0,
2916 0,
2917 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2918
2919 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2920 {"gottprel_lo12", 0,
2921 0, /* adr_type */
2922 0,
2923 0,
2924 0,
2925 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2926 0},
2927
2928 /* Get tp offset for a symbol. */
2929 {"tprel", 0,
2930 0, /* adr_type */
2931 0,
2932 0,
2933 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2934 0,
2935 0},
2936
2937 /* Get tp offset for a symbol. */
2938 {"tprel_lo12", 0,
2939 0, /* adr_type */
2940 0,
2941 0,
2942 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2943 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2944 0},
2945
2946 /* Get tp offset for a symbol. */
2947 {"tprel_hi12", 0,
2948 0, /* adr_type */
2949 0,
2950 0,
2951 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2952 0,
2953 0},
2954
2955 /* Get tp offset for a symbol. */
2956 {"tprel_lo12_nc", 0,
2957 0, /* adr_type */
2958 0,
2959 0,
2960 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2961 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2962 0},
2963
2964 /* Most significant bits 32-47 of address/value: MOVZ. */
2965 {"tprel_g2", 0,
2966 0, /* adr_type */
2967 0,
2968 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2969 0,
2970 0,
2971 0},
2972
2973 /* Most significant bits 16-31 of address/value: MOVZ. */
2974 {"tprel_g1", 0,
2975 0, /* adr_type */
2976 0,
2977 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2978 0,
2979 0,
2980 0},
2981
2982 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2983 {"tprel_g1_nc", 0,
2984 0, /* adr_type */
2985 0,
2986 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2987 0,
2988 0,
2989 0},
2990
2991 /* Most significant bits 0-15 of address/value: MOVZ. */
2992 {"tprel_g0", 0,
2993 0, /* adr_type */
2994 0,
2995 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2996 0,
2997 0,
2998 0},
2999
3000 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3001 {"tprel_g0_nc", 0,
3002 0, /* adr_type */
3003 0,
3004 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3005 0,
3006 0,
3007 0},
3008
3009 /* 15bit offset from got entry to base address of GOT table. */
3010 {"gotpage_lo15", 0,
3011 0,
3012 0,
3013 0,
3014 0,
3015 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3016 0},
3017
3018 /* 14bit offset from got entry to base address of GOT table. */
3019 {"gotpage_lo14", 0,
3020 0,
3021 0,
3022 0,
3023 0,
3024 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3025 0},
3026 };
3027
3028 /* Given the address of a pointer pointing to the textual name of a
3029 relocation as may appear in assembler source, attempt to find its
3030 details in reloc_table. The pointer will be updated to the character
3031 after the trailing colon. On failure, NULL will be returned;
3032 otherwise return the reloc_table_entry. */
3033
3034 static struct reloc_table_entry *
3035 find_reloc_table_entry (char **str)
3036 {
3037 unsigned int i;
3038 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3039 {
3040 int length = strlen (reloc_table[i].name);
3041
3042 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3043 && (*str)[length] == ':')
3044 {
3045 *str += (length + 1);
3046 return &reloc_table[i];
3047 }
3048 }
3049
3050 return NULL;
3051 }
3052
3053 /* Returns 0 if the relocation should never be forced,
3054 1 if the relocation must be forced, and -1 if either
3055 result is OK. */
3056
3057 static signed int
3058 aarch64_force_reloc (unsigned int type)
3059 {
3060 switch (type)
3061 {
3062 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3063 /* Perform these "immediate" internal relocations
3064 even if the symbol is extern or weak. */
3065 return 0;
3066
3067 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3068 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3069 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3070 /* Pseudo relocs that need to be fixed up according to
3071 ilp32_p. */
3072 return 0;
3073
3074 case BFD_RELOC_AARCH64_ADD_LO12:
3075 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3076 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3077 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3078 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3079 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3080 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3081 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3082 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3083 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3084 case BFD_RELOC_AARCH64_LDST128_LO12:
3085 case BFD_RELOC_AARCH64_LDST16_LO12:
3086 case BFD_RELOC_AARCH64_LDST32_LO12:
3087 case BFD_RELOC_AARCH64_LDST64_LO12:
3088 case BFD_RELOC_AARCH64_LDST8_LO12:
3089 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3090 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3091 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3092 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3093 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3094 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3095 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3096 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3097 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3098 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3099 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3100 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3101 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3102 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3103 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3104 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3105 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3106 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3107 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3108 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3109 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3110 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3111 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3113 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3114 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3115 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3117 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3119 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3120 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3121 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3122 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3127 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3128 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3129 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3130 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3131 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3132 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3133 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3134 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3135 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3136 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3137 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3138 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3139 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3140 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3141 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3142 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3143 /* Always leave these relocations for the linker. */
3144 return 1;
3145
3146 default:
3147 return -1;
3148 }
3149 }
3150
3151 int
3152 aarch64_force_relocation (struct fix *fixp)
3153 {
3154 int res = aarch64_force_reloc (fixp->fx_r_type);
3155
3156 if (res == -1)
3157 return generic_force_reloc (fixp);
3158 return res;
3159 }
3160
3161 /* Mode argument to parse_shift and parser_shifter_operand. */
3162 enum parse_shift_mode
3163 {
3164 SHIFTED_NONE, /* no shifter allowed */
3165 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3166 "#imm{,lsl #n}" */
3167 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3168 "#imm" */
3169 SHIFTED_LSL, /* bare "lsl #n" */
3170 SHIFTED_MUL, /* bare "mul #n" */
3171 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3172 SHIFTED_MUL_VL, /* "mul vl" */
3173 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3174 };
3175
3176 /* Parse a <shift> operator on an AArch64 data processing instruction.
3177 Return TRUE on success; otherwise return FALSE. */
3178 static bool
3179 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3180 {
3181 const struct aarch64_name_value_pair *shift_op;
3182 enum aarch64_modifier_kind kind;
3183 expressionS exp;
3184 int exp_has_prefix;
3185 char *s = *str;
3186 char *p = s;
3187
3188 for (p = *str; ISALPHA (*p); p++)
3189 ;
3190
3191 if (p == *str)
3192 {
3193 set_syntax_error (_("shift expression expected"));
3194 return false;
3195 }
3196
3197 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3198
3199 if (shift_op == NULL)
3200 {
3201 set_syntax_error (_("shift operator expected"));
3202 return false;
3203 }
3204
3205 kind = aarch64_get_operand_modifier (shift_op);
3206
3207 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3208 {
3209 set_syntax_error (_("invalid use of 'MSL'"));
3210 return false;
3211 }
3212
3213 if (kind == AARCH64_MOD_MUL
3214 && mode != SHIFTED_MUL
3215 && mode != SHIFTED_MUL_VL)
3216 {
3217 set_syntax_error (_("invalid use of 'MUL'"));
3218 return false;
3219 }
3220
3221 switch (mode)
3222 {
3223 case SHIFTED_LOGIC_IMM:
3224 if (aarch64_extend_operator_p (kind))
3225 {
3226 set_syntax_error (_("extending shift is not permitted"));
3227 return false;
3228 }
3229 break;
3230
3231 case SHIFTED_ARITH_IMM:
3232 if (kind == AARCH64_MOD_ROR)
3233 {
3234 set_syntax_error (_("'ROR' shift is not permitted"));
3235 return false;
3236 }
3237 break;
3238
3239 case SHIFTED_LSL:
3240 if (kind != AARCH64_MOD_LSL)
3241 {
3242 set_syntax_error (_("only 'LSL' shift is permitted"));
3243 return false;
3244 }
3245 break;
3246
3247 case SHIFTED_MUL:
3248 if (kind != AARCH64_MOD_MUL)
3249 {
3250 set_syntax_error (_("only 'MUL' is permitted"));
3251 return false;
3252 }
3253 break;
3254
3255 case SHIFTED_MUL_VL:
3256 /* "MUL VL" consists of two separate tokens. Require the first
3257 token to be "MUL" and look for a following "VL". */
3258 if (kind == AARCH64_MOD_MUL)
3259 {
3260 skip_whitespace (p);
3261 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3262 {
3263 p += 2;
3264 kind = AARCH64_MOD_MUL_VL;
3265 break;
3266 }
3267 }
3268 set_syntax_error (_("only 'MUL VL' is permitted"));
3269 return false;
3270
3271 case SHIFTED_REG_OFFSET:
3272 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3273 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3274 {
3275 set_fatal_syntax_error
3276 (_("invalid shift for the register offset addressing mode"));
3277 return false;
3278 }
3279 break;
3280
3281 case SHIFTED_LSL_MSL:
3282 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3283 {
3284 set_syntax_error (_("invalid shift operator"));
3285 return false;
3286 }
3287 break;
3288
3289 default:
3290 abort ();
3291 }
3292
3293 /* Whitespace can appear here if the next thing is a bare digit. */
3294 skip_whitespace (p);
3295
3296 /* Parse shift amount. */
3297 exp_has_prefix = 0;
3298 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3299 exp.X_op = O_absent;
3300 else
3301 {
3302 if (is_immediate_prefix (*p))
3303 {
3304 p++;
3305 exp_has_prefix = 1;
3306 }
3307 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3308 NORMAL_RESOLUTION);
3309 }
3310 if (kind == AARCH64_MOD_MUL_VL)
3311 /* For consistency, give MUL VL the same shift amount as an implicit
3312 MUL #1. */
3313 operand->shifter.amount = 1;
3314 else if (exp.X_op == O_absent)
3315 {
3316 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3317 {
3318 set_syntax_error (_("missing shift amount"));
3319 return false;
3320 }
3321 operand->shifter.amount = 0;
3322 }
3323 else if (exp.X_op != O_constant)
3324 {
3325 set_syntax_error (_("constant shift amount required"));
3326 return false;
3327 }
3328 /* For parsing purposes, MUL #n has no inherent range. The range
3329 depends on the operand and will be checked by operand-specific
3330 routines. */
3331 else if (kind != AARCH64_MOD_MUL
3332 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3333 {
3334 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3335 return false;
3336 }
3337 else
3338 {
3339 operand->shifter.amount = exp.X_add_number;
3340 operand->shifter.amount_present = 1;
3341 }
3342
3343 operand->shifter.operator_present = 1;
3344 operand->shifter.kind = kind;
3345
3346 *str = p;
3347 return true;
3348 }
3349
3350 /* Parse a <shifter_operand> for a data processing instruction:
3351
3352 #<immediate>
3353 #<immediate>, LSL #imm
3354
3355 Validation of immediate operands is deferred to md_apply_fix.
3356
3357 Return TRUE on success; otherwise return FALSE. */
3358
3359 static bool
3360 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3361 enum parse_shift_mode mode)
3362 {
3363 char *p;
3364
3365 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3366 return false;
3367
3368 p = *str;
3369
3370 /* Accept an immediate expression. */
3371 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3372 REJECT_ABSENT, NORMAL_RESOLUTION))
3373 return false;
3374
3375 /* Accept optional LSL for arithmetic immediate values. */
3376 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3377 if (! parse_shift (&p, operand, SHIFTED_LSL))
3378 return false;
3379
3380 /* Not accept any shifter for logical immediate values. */
3381 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3382 && parse_shift (&p, operand, mode))
3383 {
3384 set_syntax_error (_("unexpected shift operator"));
3385 return false;
3386 }
3387
3388 *str = p;
3389 return true;
3390 }
3391
3392 /* Parse a <shifter_operand> for a data processing instruction:
3393
3394 <Rm>
3395 <Rm>, <shift>
3396 #<immediate>
3397 #<immediate>, LSL #imm
3398
3399 where <shift> is handled by parse_shift above, and the last two
3400 cases are handled by the function above.
3401
3402 Validation of immediate operands is deferred to md_apply_fix.
3403
3404 Return TRUE on success; otherwise return FALSE. */
3405
3406 static bool
3407 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3408 enum parse_shift_mode mode)
3409 {
3410 const reg_entry *reg;
3411 aarch64_opnd_qualifier_t qualifier;
3412 enum aarch64_operand_class opd_class
3413 = aarch64_get_operand_class (operand->type);
3414
3415 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3416 if (reg)
3417 {
3418 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3419 {
3420 set_syntax_error (_("unexpected register in the immediate operand"));
3421 return false;
3422 }
3423
3424 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3425 {
3426 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3427 return false;
3428 }
3429
3430 operand->reg.regno = reg->number;
3431 operand->qualifier = qualifier;
3432
3433 /* Accept optional shift operation on register. */
3434 if (! skip_past_comma (str))
3435 return true;
3436
3437 if (! parse_shift (str, operand, mode))
3438 return false;
3439
3440 return true;
3441 }
3442 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3443 {
3444 set_syntax_error
3445 (_("integer register expected in the extended/shifted operand "
3446 "register"));
3447 return false;
3448 }
3449
3450 /* We have a shifted immediate variable. */
3451 return parse_shifter_operand_imm (str, operand, mode);
3452 }
3453
3454 /* Return TRUE on success; return FALSE otherwise. */
3455
3456 static bool
3457 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3458 enum parse_shift_mode mode)
3459 {
3460 char *p = *str;
3461
3462 /* Determine if we have the sequence of characters #: or just :
3463 coming next. If we do, then we check for a :rello: relocation
3464 modifier. If we don't, punt the whole lot to
3465 parse_shifter_operand. */
3466
3467 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3468 {
3469 struct reloc_table_entry *entry;
3470
3471 if (p[0] == '#')
3472 p += 2;
3473 else
3474 p++;
3475 *str = p;
3476
3477 /* Try to parse a relocation. Anything else is an error. */
3478 if (!(entry = find_reloc_table_entry (str)))
3479 {
3480 set_syntax_error (_("unknown relocation modifier"));
3481 return false;
3482 }
3483
3484 if (entry->add_type == 0)
3485 {
3486 set_syntax_error
3487 (_("this relocation modifier is not allowed on this instruction"));
3488 return false;
3489 }
3490
3491 /* Save str before we decompose it. */
3492 p = *str;
3493
3494 /* Next, we parse the expression. */
3495 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3496 REJECT_ABSENT,
3497 aarch64_force_reloc (entry->add_type) == 1))
3498 return false;
3499
3500 /* Record the relocation type (use the ADD variant here). */
3501 inst.reloc.type = entry->add_type;
3502 inst.reloc.pc_rel = entry->pc_rel;
3503
3504 /* If str is empty, we've reached the end, stop here. */
3505 if (**str == '\0')
3506 return true;
3507
3508 /* Otherwise, we have a shifted reloc modifier, so rewind to
3509 recover the variable name and continue parsing for the shifter. */
3510 *str = p;
3511 return parse_shifter_operand_imm (str, operand, mode);
3512 }
3513
3514 return parse_shifter_operand (str, operand, mode);
3515 }
3516
3517 /* Parse all forms of an address expression. Information is written
3518 to *OPERAND and/or inst.reloc.
3519
3520 The A64 instruction set has the following addressing modes:
3521
3522 Offset
3523 [base] // in SIMD ld/st structure
3524 [base{,#0}] // in ld/st exclusive
3525 [base{,#imm}]
3526 [base,Xm{,LSL #imm}]
3527 [base,Xm,SXTX {#imm}]
3528 [base,Wm,(S|U)XTW {#imm}]
3529 Pre-indexed
3530 [base]! // in ldraa/ldrab exclusive
3531 [base,#imm]!
3532 Post-indexed
3533 [base],#imm
3534 [base],Xm // in SIMD ld/st structure
3535 PC-relative (literal)
3536 label
3537 SVE:
3538 [base,#imm,MUL VL]
3539 [base,Zm.D{,LSL #imm}]
3540 [base,Zm.S,(S|U)XTW {#imm}]
3541 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3542 [Zn.S,#imm]
3543 [Zn.D,#imm]
3544 [Zn.S{, Xm}]
3545 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3546 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3547 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3548
3549 (As a convenience, the notation "=immediate" is permitted in conjunction
3550 with the pc-relative literal load instructions to automatically place an
3551 immediate value or symbolic address in a nearby literal pool and generate
3552 a hidden label which references it.)
3553
3554 Upon a successful parsing, the address structure in *OPERAND will be
3555 filled in the following way:
3556
3557 .base_regno = <base>
3558 .offset.is_reg // 1 if the offset is a register
3559 .offset.imm = <imm>
3560 .offset.regno = <Rm>
3561
3562 For different addressing modes defined in the A64 ISA:
3563
3564 Offset
3565 .pcrel=0; .preind=1; .postind=0; .writeback=0
3566 Pre-indexed
3567 .pcrel=0; .preind=1; .postind=0; .writeback=1
3568 Post-indexed
3569 .pcrel=0; .preind=0; .postind=1; .writeback=1
3570 PC-relative (literal)
3571 .pcrel=1; .preind=1; .postind=0; .writeback=0
3572
3573 The shift/extension information, if any, will be stored in .shifter.
3574 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3575 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3576 corresponding register.
3577
3578 BASE_TYPE says which types of base register should be accepted and
3579 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3580 is the type of shifter that is allowed for immediate offsets,
3581 or SHIFTED_NONE if none.
3582
3583 In all other respects, it is the caller's responsibility to check
3584 for addressing modes not supported by the instruction, and to set
3585 inst.reloc.type. */
3586
3587 static bool
3588 parse_address_main (char **str, aarch64_opnd_info *operand,
3589 aarch64_opnd_qualifier_t *base_qualifier,
3590 aarch64_opnd_qualifier_t *offset_qualifier,
3591 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3592 enum parse_shift_mode imm_shift_mode)
3593 {
3594 char *p = *str;
3595 const reg_entry *reg;
3596 expressionS *exp = &inst.reloc.exp;
3597
3598 *base_qualifier = AARCH64_OPND_QLF_NIL;
3599 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3600 if (! skip_past_char (&p, '['))
3601 {
3602 /* =immediate or label. */
3603 operand->addr.pcrel = 1;
3604 operand->addr.preind = 1;
3605
3606 /* #:<reloc_op>:<symbol> */
3607 skip_past_char (&p, '#');
3608 if (skip_past_char (&p, ':'))
3609 {
3610 bfd_reloc_code_real_type ty;
3611 struct reloc_table_entry *entry;
3612
3613 /* Try to parse a relocation modifier. Anything else is
3614 an error. */
3615 entry = find_reloc_table_entry (&p);
3616 if (! entry)
3617 {
3618 set_syntax_error (_("unknown relocation modifier"));
3619 return false;
3620 }
3621
3622 switch (operand->type)
3623 {
3624 case AARCH64_OPND_ADDR_PCREL21:
3625 /* adr */
3626 ty = entry->adr_type;
3627 break;
3628
3629 default:
3630 ty = entry->ld_literal_type;
3631 break;
3632 }
3633
3634 if (ty == 0)
3635 {
3636 set_syntax_error
3637 (_("this relocation modifier is not allowed on this "
3638 "instruction"));
3639 return false;
3640 }
3641
3642 /* #:<reloc_op>: */
3643 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3644 aarch64_force_reloc (entry->add_type) == 1))
3645 {
3646 set_syntax_error (_("invalid relocation expression"));
3647 return false;
3648 }
3649 /* #:<reloc_op>:<expr> */
3650 /* Record the relocation type. */
3651 inst.reloc.type = ty;
3652 inst.reloc.pc_rel = entry->pc_rel;
3653 }
3654 else
3655 {
3656 if (skip_past_char (&p, '='))
3657 /* =immediate; need to generate the literal in the literal pool. */
3658 inst.gen_lit_pool = 1;
3659
3660 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3661 NORMAL_RESOLUTION))
3662 {
3663 set_syntax_error (_("invalid address"));
3664 return false;
3665 }
3666 }
3667
3668 *str = p;
3669 return true;
3670 }
3671
3672 /* [ */
3673
3674 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3675 if (!reg || !aarch64_check_reg_type (reg, base_type))
3676 {
3677 set_syntax_error (_(get_reg_expected_msg (base_type)));
3678 return false;
3679 }
3680 operand->addr.base_regno = reg->number;
3681
3682 /* [Xn */
3683 if (skip_past_comma (&p))
3684 {
3685 /* [Xn, */
3686 operand->addr.preind = 1;
3687
3688 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3689 if (reg)
3690 {
3691 if (!aarch64_check_reg_type (reg, offset_type))
3692 {
3693 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3694 return false;
3695 }
3696
3697 /* [Xn,Rm */
3698 operand->addr.offset.regno = reg->number;
3699 operand->addr.offset.is_reg = 1;
3700 /* Shifted index. */
3701 if (skip_past_comma (&p))
3702 {
3703 /* [Xn,Rm, */
3704 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3705 /* Use the diagnostics set in parse_shift, so not set new
3706 error message here. */
3707 return false;
3708 }
3709 /* We only accept:
3710 [base,Xm] # For vector plus scalar SVE2 indexing.
3711 [base,Xm{,LSL #imm}]
3712 [base,Xm,SXTX {#imm}]
3713 [base,Wm,(S|U)XTW {#imm}] */
3714 if (operand->shifter.kind == AARCH64_MOD_NONE
3715 || operand->shifter.kind == AARCH64_MOD_LSL
3716 || operand->shifter.kind == AARCH64_MOD_SXTX)
3717 {
3718 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3719 {
3720 set_syntax_error (_("invalid use of 32-bit register offset"));
3721 return false;
3722 }
3723 if (aarch64_get_qualifier_esize (*base_qualifier)
3724 != aarch64_get_qualifier_esize (*offset_qualifier)
3725 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3726 || *base_qualifier != AARCH64_OPND_QLF_S_S
3727 || *offset_qualifier != AARCH64_OPND_QLF_X))
3728 {
3729 set_syntax_error (_("offset has different size from base"));
3730 return false;
3731 }
3732 }
3733 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3734 {
3735 set_syntax_error (_("invalid use of 64-bit register offset"));
3736 return false;
3737 }
3738 }
3739 else
3740 {
3741 /* [Xn,#:<reloc_op>:<symbol> */
3742 skip_past_char (&p, '#');
3743 if (skip_past_char (&p, ':'))
3744 {
3745 struct reloc_table_entry *entry;
3746
3747 /* Try to parse a relocation modifier. Anything else is
3748 an error. */
3749 if (!(entry = find_reloc_table_entry (&p)))
3750 {
3751 set_syntax_error (_("unknown relocation modifier"));
3752 return false;
3753 }
3754
3755 if (entry->ldst_type == 0)
3756 {
3757 set_syntax_error
3758 (_("this relocation modifier is not allowed on this "
3759 "instruction"));
3760 return false;
3761 }
3762
3763 /* [Xn,#:<reloc_op>: */
3764 /* We now have the group relocation table entry corresponding to
3765 the name in the assembler source. Next, we parse the
3766 expression. */
3767 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3768 aarch64_force_reloc (entry->add_type) == 1))
3769 {
3770 set_syntax_error (_("invalid relocation expression"));
3771 return false;
3772 }
3773
3774 /* [Xn,#:<reloc_op>:<expr> */
3775 /* Record the load/store relocation type. */
3776 inst.reloc.type = entry->ldst_type;
3777 inst.reloc.pc_rel = entry->pc_rel;
3778 }
3779 else
3780 {
3781 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3782 NORMAL_RESOLUTION))
3783 {
3784 set_syntax_error (_("invalid expression in the address"));
3785 return false;
3786 }
3787 /* [Xn,<expr> */
3788 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3789 /* [Xn,<expr>,<shifter> */
3790 if (! parse_shift (&p, operand, imm_shift_mode))
3791 return false;
3792 }
3793 }
3794 }
3795
3796 if (! skip_past_char (&p, ']'))
3797 {
3798 set_syntax_error (_("']' expected"));
3799 return false;
3800 }
3801
3802 if (skip_past_char (&p, '!'))
3803 {
3804 if (operand->addr.preind && operand->addr.offset.is_reg)
3805 {
3806 set_syntax_error (_("register offset not allowed in pre-indexed "
3807 "addressing mode"));
3808 return false;
3809 }
3810 /* [Xn]! */
3811 operand->addr.writeback = 1;
3812 }
3813 else if (skip_past_comma (&p))
3814 {
3815 /* [Xn], */
3816 operand->addr.postind = 1;
3817 operand->addr.writeback = 1;
3818
3819 if (operand->addr.preind)
3820 {
3821 set_syntax_error (_("cannot combine pre- and post-indexing"));
3822 return false;
3823 }
3824
3825 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3826 if (reg)
3827 {
3828 /* [Xn],Xm */
3829 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3830 {
3831 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3832 return false;
3833 }
3834
3835 operand->addr.offset.regno = reg->number;
3836 operand->addr.offset.is_reg = 1;
3837 }
3838 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3839 NORMAL_RESOLUTION))
3840 {
3841 /* [Xn],#expr */
3842 set_syntax_error (_("invalid expression in the address"));
3843 return false;
3844 }
3845 }
3846
3847 /* If at this point neither .preind nor .postind is set, we have a
3848 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3849 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3850 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3851 [Zn.<T>, xzr]. */
3852 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3853 {
3854 if (operand->addr.writeback)
3855 {
3856 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3857 {
3858 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3859 operand->addr.offset.is_reg = 0;
3860 operand->addr.offset.imm = 0;
3861 operand->addr.preind = 1;
3862 }
3863 else
3864 {
3865 /* Reject [Rn]! */
3866 set_syntax_error (_("missing offset in the pre-indexed address"));
3867 return false;
3868 }
3869 }
3870 else
3871 {
3872 operand->addr.preind = 1;
3873 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3874 {
3875 operand->addr.offset.is_reg = 1;
3876 operand->addr.offset.regno = REG_ZR;
3877 *offset_qualifier = AARCH64_OPND_QLF_X;
3878 }
3879 else
3880 {
3881 inst.reloc.exp.X_op = O_constant;
3882 inst.reloc.exp.X_add_number = 0;
3883 }
3884 }
3885 }
3886
3887 *str = p;
3888 return true;
3889 }
3890
3891 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3892 on success. */
3893 static bool
3894 parse_address (char **str, aarch64_opnd_info *operand)
3895 {
3896 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3897 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3898 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3899 }
3900
3901 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3902 The arguments have the same meaning as for parse_address_main.
3903 Return TRUE on success. */
3904 static bool
3905 parse_sve_address (char **str, aarch64_opnd_info *operand,
3906 aarch64_opnd_qualifier_t *base_qualifier,
3907 aarch64_opnd_qualifier_t *offset_qualifier)
3908 {
3909 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3910 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3911 SHIFTED_MUL_VL);
3912 }
3913
3914 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3915 Return TRUE on success; otherwise return FALSE. */
3916 static bool
3917 parse_half (char **str, int *internal_fixup_p)
3918 {
3919 char *p = *str;
3920
3921 skip_past_char (&p, '#');
3922
3923 gas_assert (internal_fixup_p);
3924 *internal_fixup_p = 0;
3925
3926 if (*p == ':')
3927 {
3928 struct reloc_table_entry *entry;
3929
3930 /* Try to parse a relocation. Anything else is an error. */
3931 ++p;
3932
3933 if (!(entry = find_reloc_table_entry (&p)))
3934 {
3935 set_syntax_error (_("unknown relocation modifier"));
3936 return false;
3937 }
3938
3939 if (entry->movw_type == 0)
3940 {
3941 set_syntax_error
3942 (_("this relocation modifier is not allowed on this instruction"));
3943 return false;
3944 }
3945
3946 inst.reloc.type = entry->movw_type;
3947 }
3948 else
3949 *internal_fixup_p = 1;
3950
3951 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3952 aarch64_force_reloc (inst.reloc.type) == 1))
3953 return false;
3954
3955 *str = p;
3956 return true;
3957 }
3958
3959 /* Parse an operand for an ADRP instruction:
3960 ADRP <Xd>, <label>
3961 Return TRUE on success; otherwise return FALSE. */
3962
3963 static bool
3964 parse_adrp (char **str)
3965 {
3966 char *p;
3967
3968 p = *str;
3969 if (*p == ':')
3970 {
3971 struct reloc_table_entry *entry;
3972
3973 /* Try to parse a relocation. Anything else is an error. */
3974 ++p;
3975 if (!(entry = find_reloc_table_entry (&p)))
3976 {
3977 set_syntax_error (_("unknown relocation modifier"));
3978 return false;
3979 }
3980
3981 if (entry->adrp_type == 0)
3982 {
3983 set_syntax_error
3984 (_("this relocation modifier is not allowed on this instruction"));
3985 return false;
3986 }
3987
3988 inst.reloc.type = entry->adrp_type;
3989 }
3990 else
3991 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3992
3993 inst.reloc.pc_rel = 1;
3994 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3995 aarch64_force_reloc (inst.reloc.type) == 1))
3996 return false;
3997 *str = p;
3998 return true;
3999 }
4000
4001 /* Miscellaneous. */
4002
4003 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4004 of SIZE tokens in which index I gives the token for field value I,
4005 or is null if field value I is invalid. REG_TYPE says which register
4006 names should be treated as registers rather than as symbolic immediates.
4007
4008 Return true on success, moving *STR past the operand and storing the
4009 field value in *VAL. */
4010
4011 static int
4012 parse_enum_string (char **str, int64_t *val, const char *const *array,
4013 size_t size, aarch64_reg_type reg_type)
4014 {
4015 expressionS exp;
4016 char *p, *q;
4017 size_t i;
4018
4019 /* Match C-like tokens. */
4020 p = q = *str;
4021 while (ISALNUM (*q))
4022 q++;
4023
4024 for (i = 0; i < size; ++i)
4025 if (array[i]
4026 && strncasecmp (array[i], p, q - p) == 0
4027 && array[i][q - p] == 0)
4028 {
4029 *val = i;
4030 *str = q;
4031 return true;
4032 }
4033
4034 if (!parse_immediate_expression (&p, &exp, reg_type))
4035 return false;
4036
4037 if (exp.X_op == O_constant
4038 && (uint64_t) exp.X_add_number < size)
4039 {
4040 *val = exp.X_add_number;
4041 *str = p;
4042 return true;
4043 }
4044
4045 /* Use the default error for this operand. */
4046 return false;
4047 }
4048
4049 /* Parse an option for a preload instruction. Returns the encoding for the
4050 option, or PARSE_FAIL. */
4051
4052 static int
4053 parse_pldop (char **str)
4054 {
4055 char *p, *q;
4056 const struct aarch64_name_value_pair *o;
4057
4058 p = q = *str;
4059 while (ISALNUM (*q))
4060 q++;
4061
4062 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4063 if (!o)
4064 return PARSE_FAIL;
4065
4066 *str = q;
4067 return o->value;
4068 }
4069
4070 /* Parse an option for a barrier instruction. Returns the encoding for the
4071 option, or PARSE_FAIL. */
4072
4073 static int
4074 parse_barrier (char **str)
4075 {
4076 char *p, *q;
4077 const struct aarch64_name_value_pair *o;
4078
4079 p = q = *str;
4080 while (ISALPHA (*q))
4081 q++;
4082
4083 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4084 if (!o)
4085 return PARSE_FAIL;
4086
4087 *str = q;
4088 return o->value;
4089 }
4090
4091 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4092 return 0 if successful. Otherwise return PARSE_FAIL. */
4093
4094 static int
4095 parse_barrier_psb (char **str,
4096 const struct aarch64_name_value_pair ** hint_opt)
4097 {
4098 char *p, *q;
4099 const struct aarch64_name_value_pair *o;
4100
4101 p = q = *str;
4102 while (ISALPHA (*q))
4103 q++;
4104
4105 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4106 if (!o)
4107 {
4108 set_fatal_syntax_error
4109 ( _("unknown or missing option to PSB/TSB"));
4110 return PARSE_FAIL;
4111 }
4112
4113 if (o->value != 0x11)
4114 {
4115 /* PSB only accepts option name 'CSYNC'. */
4116 set_syntax_error
4117 (_("the specified option is not accepted for PSB/TSB"));
4118 return PARSE_FAIL;
4119 }
4120
4121 *str = q;
4122 *hint_opt = o;
4123 return 0;
4124 }
4125
4126 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4127 return 0 if successful. Otherwise return PARSE_FAIL. */
4128
4129 static int
4130 parse_bti_operand (char **str,
4131 const struct aarch64_name_value_pair ** hint_opt)
4132 {
4133 char *p, *q;
4134 const struct aarch64_name_value_pair *o;
4135
4136 p = q = *str;
4137 while (ISALPHA (*q))
4138 q++;
4139
4140 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4141 if (!o)
4142 {
4143 set_fatal_syntax_error
4144 ( _("unknown option to BTI"));
4145 return PARSE_FAIL;
4146 }
4147
4148 switch (o->value)
4149 {
4150 /* Valid BTI operands. */
4151 case HINT_OPD_C:
4152 case HINT_OPD_J:
4153 case HINT_OPD_JC:
4154 break;
4155
4156 default:
4157 set_syntax_error
4158 (_("unknown option to BTI"));
4159 return PARSE_FAIL;
4160 }
4161
4162 *str = q;
4163 *hint_opt = o;
4164 return 0;
4165 }
4166
4167 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4168 Returns the encoding for the option, or PARSE_FAIL.
4169
4170 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4171 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4172
4173 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4174 field, otherwise as a system register.
4175 */
4176
4177 static int
4178 parse_sys_reg (char **str, htab_t sys_regs,
4179 int imple_defined_p, int pstatefield_p,
4180 uint32_t* flags)
4181 {
4182 char *p, *q;
4183 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4184 const aarch64_sys_reg *o;
4185 int value;
4186
4187 p = buf;
4188 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4189 if (p < buf + (sizeof (buf) - 1))
4190 *p++ = TOLOWER (*q);
4191 *p = '\0';
4192
4193 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4194 valid system register. This is enforced by construction of the hash
4195 table. */
4196 if (p - buf != q - *str)
4197 return PARSE_FAIL;
4198
4199 o = str_hash_find (sys_regs, buf);
4200 if (!o)
4201 {
4202 if (!imple_defined_p)
4203 return PARSE_FAIL;
4204 else
4205 {
4206 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4207 unsigned int op0, op1, cn, cm, op2;
4208
4209 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4210 != 5)
4211 return PARSE_FAIL;
4212 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4213 return PARSE_FAIL;
4214 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4215 if (flags)
4216 *flags = 0;
4217 }
4218 }
4219 else
4220 {
4221 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4222 as_bad (_("selected processor does not support PSTATE field "
4223 "name '%s'"), buf);
4224 if (!pstatefield_p
4225 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4226 o->value, o->flags, o->features))
4227 as_bad (_("selected processor does not support system register "
4228 "name '%s'"), buf);
4229 if (aarch64_sys_reg_deprecated_p (o->flags))
4230 as_warn (_("system register name '%s' is deprecated and may be "
4231 "removed in a future release"), buf);
4232 value = o->value;
4233 if (flags)
4234 *flags = o->flags;
4235 }
4236
4237 *str = q;
4238 return value;
4239 }
4240
4241 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4242 for the option, or NULL. */
4243
4244 static const aarch64_sys_ins_reg *
4245 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4246 {
4247 char *p, *q;
4248 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4249 const aarch64_sys_ins_reg *o;
4250
4251 p = buf;
4252 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4253 if (p < buf + (sizeof (buf) - 1))
4254 *p++ = TOLOWER (*q);
4255 *p = '\0';
4256
4257 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4258 valid system register. This is enforced by construction of the hash
4259 table. */
4260 if (p - buf != q - *str)
4261 return NULL;
4262
4263 o = str_hash_find (sys_ins_regs, buf);
4264 if (!o)
4265 return NULL;
4266
4267 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4268 o->name, o->value, o->flags, 0))
4269 as_bad (_("selected processor does not support system register "
4270 "name '%s'"), buf);
4271 if (aarch64_sys_reg_deprecated_p (o->flags))
4272 as_warn (_("system register name '%s' is deprecated and may be "
4273 "removed in a future release"), buf);
4274
4275 *str = q;
4276 return o;
4277 }
4278 \f
4279 #define po_char_or_fail(chr) do { \
4280 if (! skip_past_char (&str, chr)) \
4281 goto failure; \
4282 } while (0)
4283
4284 #define po_reg_or_fail(regtype) do { \
4285 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4286 if (val == PARSE_FAIL) \
4287 { \
4288 set_default_error (); \
4289 goto failure; \
4290 } \
4291 } while (0)
4292
4293 #define po_int_reg_or_fail(reg_type) do { \
4294 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4295 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4296 { \
4297 set_default_error (); \
4298 goto failure; \
4299 } \
4300 info->reg.regno = reg->number; \
4301 info->qualifier = qualifier; \
4302 } while (0)
4303
4304 #define po_imm_nc_or_fail() do { \
4305 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4306 goto failure; \
4307 } while (0)
4308
4309 #define po_imm_or_fail(min, max) do { \
4310 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4311 goto failure; \
4312 if (val < min || val > max) \
4313 { \
4314 set_fatal_syntax_error (_("immediate value out of range "\
4315 #min " to "#max)); \
4316 goto failure; \
4317 } \
4318 } while (0)
4319
4320 #define po_enum_or_fail(array) do { \
4321 if (!parse_enum_string (&str, &val, array, \
4322 ARRAY_SIZE (array), imm_reg_type)) \
4323 goto failure; \
4324 } while (0)
4325
4326 #define po_misc_or_fail(expr) do { \
4327 if (!expr) \
4328 goto failure; \
4329 } while (0)
4330 \f
4331 /* encode the 12-bit imm field of Add/sub immediate */
4332 static inline uint32_t
4333 encode_addsub_imm (uint32_t imm)
4334 {
4335 return imm << 10;
4336 }
4337
4338 /* encode the shift amount field of Add/sub immediate */
4339 static inline uint32_t
4340 encode_addsub_imm_shift_amount (uint32_t cnt)
4341 {
4342 return cnt << 22;
4343 }
4344
4345
4346 /* encode the imm field of Adr instruction */
4347 static inline uint32_t
4348 encode_adr_imm (uint32_t imm)
4349 {
4350 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4351 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4352 }
4353
4354 /* encode the immediate field of Move wide immediate */
4355 static inline uint32_t
4356 encode_movw_imm (uint32_t imm)
4357 {
4358 return imm << 5;
4359 }
4360
4361 /* encode the 26-bit offset of unconditional branch */
4362 static inline uint32_t
4363 encode_branch_ofs_26 (uint32_t ofs)
4364 {
4365 return ofs & ((1 << 26) - 1);
4366 }
4367
4368 /* encode the 19-bit offset of conditional branch and compare & branch */
4369 static inline uint32_t
4370 encode_cond_branch_ofs_19 (uint32_t ofs)
4371 {
4372 return (ofs & ((1 << 19) - 1)) << 5;
4373 }
4374
4375 /* encode the 19-bit offset of ld literal */
4376 static inline uint32_t
4377 encode_ld_lit_ofs_19 (uint32_t ofs)
4378 {
4379 return (ofs & ((1 << 19) - 1)) << 5;
4380 }
4381
4382 /* Encode the 14-bit offset of test & branch. */
4383 static inline uint32_t
4384 encode_tst_branch_ofs_14 (uint32_t ofs)
4385 {
4386 return (ofs & ((1 << 14) - 1)) << 5;
4387 }
4388
4389 /* Encode the 16-bit imm field of svc/hvc/smc. */
4390 static inline uint32_t
4391 encode_svc_imm (uint32_t imm)
4392 {
4393 return imm << 5;
4394 }
4395
4396 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4397 static inline uint32_t
4398 reencode_addsub_switch_add_sub (uint32_t opcode)
4399 {
4400 return opcode ^ (1 << 30);
4401 }
4402
4403 static inline uint32_t
4404 reencode_movzn_to_movz (uint32_t opcode)
4405 {
4406 return opcode | (1 << 30);
4407 }
4408
4409 static inline uint32_t
4410 reencode_movzn_to_movn (uint32_t opcode)
4411 {
4412 return opcode & ~(1 << 30);
4413 }
4414
4415 /* Overall per-instruction processing. */
4416
4417 /* We need to be able to fix up arbitrary expressions in some statements.
4418 This is so that we can handle symbols that are an arbitrary distance from
4419 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4420 which returns part of an address in a form which will be valid for
4421 a data instruction. We do this by pushing the expression into a symbol
4422 in the expr_section, and creating a fix for that. */
4423
4424 static fixS *
4425 fix_new_aarch64 (fragS * frag,
4426 int where,
4427 short int size,
4428 expressionS * exp,
4429 int pc_rel,
4430 int reloc)
4431 {
4432 fixS *new_fix;
4433
4434 switch (exp->X_op)
4435 {
4436 case O_constant:
4437 case O_symbol:
4438 case O_add:
4439 case O_subtract:
4440 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4441 break;
4442
4443 default:
4444 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4445 pc_rel, reloc);
4446 break;
4447 }
4448 return new_fix;
4449 }
4450 \f
4451 /* Diagnostics on operands errors. */
4452
4453 /* By default, output verbose error message.
4454 Disable the verbose error message by -mno-verbose-error. */
4455 static int verbose_error_p = 1;
4456
4457 #ifdef DEBUG_AARCH64
4458 /* N.B. this is only for the purpose of debugging. */
4459 const char* operand_mismatch_kind_names[] =
4460 {
4461 "AARCH64_OPDE_NIL",
4462 "AARCH64_OPDE_RECOVERABLE",
4463 "AARCH64_OPDE_SYNTAX_ERROR",
4464 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4465 "AARCH64_OPDE_INVALID_VARIANT",
4466 "AARCH64_OPDE_OUT_OF_RANGE",
4467 "AARCH64_OPDE_UNALIGNED",
4468 "AARCH64_OPDE_REG_LIST",
4469 "AARCH64_OPDE_OTHER_ERROR",
4470 };
4471 #endif /* DEBUG_AARCH64 */
4472
4473 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4474
4475 When multiple errors of different kinds are found in the same assembly
4476 line, only the error of the highest severity will be picked up for
4477 issuing the diagnostics. */
4478
4479 static inline bool
4480 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4481 enum aarch64_operand_error_kind rhs)
4482 {
4483 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4484 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4485 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4486 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4487 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4488 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4489 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4490 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4491 return lhs > rhs;
4492 }
4493
4494 /* Helper routine to get the mnemonic name from the assembly instruction
4495 line; should only be called for the diagnosis purpose, as there is
4496 string copy operation involved, which may affect the runtime
4497 performance if used in elsewhere. */
4498
4499 static const char*
4500 get_mnemonic_name (const char *str)
4501 {
4502 static char mnemonic[32];
4503 char *ptr;
4504
4505 /* Get the first 15 bytes and assume that the full name is included. */
4506 strncpy (mnemonic, str, 31);
4507 mnemonic[31] = '\0';
4508
4509 /* Scan up to the end of the mnemonic, which must end in white space,
4510 '.', or end of string. */
4511 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4512 ;
4513
4514 *ptr = '\0';
4515
4516 /* Append '...' to the truncated long name. */
4517 if (ptr - mnemonic == 31)
4518 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4519
4520 return mnemonic;
4521 }
4522
4523 static void
4524 reset_aarch64_instruction (aarch64_instruction *instruction)
4525 {
4526 memset (instruction, '\0', sizeof (aarch64_instruction));
4527 instruction->reloc.type = BFD_RELOC_UNUSED;
4528 }
4529
4530 /* Data structures storing one user error in the assembly code related to
4531 operands. */
4532
4533 struct operand_error_record
4534 {
4535 const aarch64_opcode *opcode;
4536 aarch64_operand_error detail;
4537 struct operand_error_record *next;
4538 };
4539
4540 typedef struct operand_error_record operand_error_record;
4541
4542 struct operand_errors
4543 {
4544 operand_error_record *head;
4545 operand_error_record *tail;
4546 };
4547
4548 typedef struct operand_errors operand_errors;
4549
4550 /* Top-level data structure reporting user errors for the current line of
4551 the assembly code.
4552 The way md_assemble works is that all opcodes sharing the same mnemonic
4553 name are iterated to find a match to the assembly line. In this data
4554 structure, each of the such opcodes will have one operand_error_record
4555 allocated and inserted. In other words, excessive errors related with
4556 a single opcode are disregarded. */
4557 operand_errors operand_error_report;
4558
4559 /* Free record nodes. */
4560 static operand_error_record *free_opnd_error_record_nodes = NULL;
4561
4562 /* Initialize the data structure that stores the operand mismatch
4563 information on assembling one line of the assembly code. */
4564 static void
4565 init_operand_error_report (void)
4566 {
4567 if (operand_error_report.head != NULL)
4568 {
4569 gas_assert (operand_error_report.tail != NULL);
4570 operand_error_report.tail->next = free_opnd_error_record_nodes;
4571 free_opnd_error_record_nodes = operand_error_report.head;
4572 operand_error_report.head = NULL;
4573 operand_error_report.tail = NULL;
4574 return;
4575 }
4576 gas_assert (operand_error_report.tail == NULL);
4577 }
4578
4579 /* Return TRUE if some operand error has been recorded during the
4580 parsing of the current assembly line using the opcode *OPCODE;
4581 otherwise return FALSE. */
4582 static inline bool
4583 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4584 {
4585 operand_error_record *record = operand_error_report.head;
4586 return record && record->opcode == opcode;
4587 }
4588
4589 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4590 OPCODE field is initialized with OPCODE.
4591 N.B. only one record for each opcode, i.e. the maximum of one error is
4592 recorded for each instruction template. */
4593
4594 static void
4595 add_operand_error_record (const operand_error_record* new_record)
4596 {
4597 const aarch64_opcode *opcode = new_record->opcode;
4598 operand_error_record* record = operand_error_report.head;
4599
4600 /* The record may have been created for this opcode. If not, we need
4601 to prepare one. */
4602 if (! opcode_has_operand_error_p (opcode))
4603 {
4604 /* Get one empty record. */
4605 if (free_opnd_error_record_nodes == NULL)
4606 {
4607 record = XNEW (operand_error_record);
4608 }
4609 else
4610 {
4611 record = free_opnd_error_record_nodes;
4612 free_opnd_error_record_nodes = record->next;
4613 }
4614 record->opcode = opcode;
4615 /* Insert at the head. */
4616 record->next = operand_error_report.head;
4617 operand_error_report.head = record;
4618 if (operand_error_report.tail == NULL)
4619 operand_error_report.tail = record;
4620 }
4621 else if (record->detail.kind != AARCH64_OPDE_NIL
4622 && record->detail.index <= new_record->detail.index
4623 && operand_error_higher_severity_p (record->detail.kind,
4624 new_record->detail.kind))
4625 {
4626 /* In the case of multiple errors found on operands related with a
4627 single opcode, only record the error of the leftmost operand and
4628 only if the error is of higher severity. */
4629 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4630 " the existing error %s on operand %d",
4631 operand_mismatch_kind_names[new_record->detail.kind],
4632 new_record->detail.index,
4633 operand_mismatch_kind_names[record->detail.kind],
4634 record->detail.index);
4635 return;
4636 }
4637
4638 record->detail = new_record->detail;
4639 }
4640
4641 static inline void
4642 record_operand_error_info (const aarch64_opcode *opcode,
4643 aarch64_operand_error *error_info)
4644 {
4645 operand_error_record record;
4646 record.opcode = opcode;
4647 record.detail = *error_info;
4648 add_operand_error_record (&record);
4649 }
4650
4651 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4652 error message *ERROR, for operand IDX (count from 0). */
4653
4654 static void
4655 record_operand_error (const aarch64_opcode *opcode, int idx,
4656 enum aarch64_operand_error_kind kind,
4657 const char* error)
4658 {
4659 aarch64_operand_error info;
4660 memset(&info, 0, sizeof (info));
4661 info.index = idx;
4662 info.kind = kind;
4663 info.error = error;
4664 info.non_fatal = false;
4665 record_operand_error_info (opcode, &info);
4666 }
4667
4668 static void
4669 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4670 enum aarch64_operand_error_kind kind,
4671 const char* error, const int *extra_data)
4672 {
4673 aarch64_operand_error info;
4674 info.index = idx;
4675 info.kind = kind;
4676 info.error = error;
4677 info.data[0] = extra_data[0];
4678 info.data[1] = extra_data[1];
4679 info.data[2] = extra_data[2];
4680 info.non_fatal = false;
4681 record_operand_error_info (opcode, &info);
4682 }
4683
4684 static void
4685 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4686 const char* error, int lower_bound,
4687 int upper_bound)
4688 {
4689 int data[3] = {lower_bound, upper_bound, 0};
4690 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4691 error, data);
4692 }
4693
4694 /* Remove the operand error record for *OPCODE. */
4695 static void ATTRIBUTE_UNUSED
4696 remove_operand_error_record (const aarch64_opcode *opcode)
4697 {
4698 if (opcode_has_operand_error_p (opcode))
4699 {
4700 operand_error_record* record = operand_error_report.head;
4701 gas_assert (record != NULL && operand_error_report.tail != NULL);
4702 operand_error_report.head = record->next;
4703 record->next = free_opnd_error_record_nodes;
4704 free_opnd_error_record_nodes = record;
4705 if (operand_error_report.head == NULL)
4706 {
4707 gas_assert (operand_error_report.tail == record);
4708 operand_error_report.tail = NULL;
4709 }
4710 }
4711 }
4712
4713 /* Given the instruction in *INSTR, return the index of the best matched
4714 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4715
4716 Return -1 if there is no qualifier sequence; return the first match
4717 if there is multiple matches found. */
4718
4719 static int
4720 find_best_match (const aarch64_inst *instr,
4721 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4722 {
4723 int i, num_opnds, max_num_matched, idx;
4724
4725 num_opnds = aarch64_num_of_operands (instr->opcode);
4726 if (num_opnds == 0)
4727 {
4728 DEBUG_TRACE ("no operand");
4729 return -1;
4730 }
4731
4732 max_num_matched = 0;
4733 idx = 0;
4734
4735 /* For each pattern. */
4736 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4737 {
4738 int j, num_matched;
4739 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4740
4741 /* Most opcodes has much fewer patterns in the list. */
4742 if (empty_qualifier_sequence_p (qualifiers))
4743 {
4744 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4745 break;
4746 }
4747
4748 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4749 if (*qualifiers == instr->operands[j].qualifier)
4750 ++num_matched;
4751
4752 if (num_matched > max_num_matched)
4753 {
4754 max_num_matched = num_matched;
4755 idx = i;
4756 }
4757 }
4758
4759 DEBUG_TRACE ("return with %d", idx);
4760 return idx;
4761 }
4762
4763 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4764 corresponding operands in *INSTR. */
4765
4766 static inline void
4767 assign_qualifier_sequence (aarch64_inst *instr,
4768 const aarch64_opnd_qualifier_t *qualifiers)
4769 {
4770 int i = 0;
4771 int num_opnds = aarch64_num_of_operands (instr->opcode);
4772 gas_assert (num_opnds);
4773 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4774 instr->operands[i].qualifier = *qualifiers;
4775 }
4776
4777 /* Print operands for the diagnosis purpose. */
4778
4779 static void
4780 print_operands (char *buf, const aarch64_opcode *opcode,
4781 const aarch64_opnd_info *opnds)
4782 {
4783 int i;
4784
4785 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4786 {
4787 char str[128];
4788
4789 /* We regard the opcode operand info more, however we also look into
4790 the inst->operands to support the disassembling of the optional
4791 operand.
4792 The two operand code should be the same in all cases, apart from
4793 when the operand can be optional. */
4794 if (opcode->operands[i] == AARCH64_OPND_NIL
4795 || opnds[i].type == AARCH64_OPND_NIL)
4796 break;
4797
4798 /* Generate the operand string in STR. */
4799 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4800 NULL, cpu_variant);
4801
4802 /* Delimiter. */
4803 if (str[0] != '\0')
4804 strcat (buf, i == 0 ? " " : ", ");
4805
4806 /* Append the operand string. */
4807 strcat (buf, str);
4808 }
4809 }
4810
4811 /* Send to stderr a string as information. */
4812
4813 static void
4814 output_info (const char *format, ...)
4815 {
4816 const char *file;
4817 unsigned int line;
4818 va_list args;
4819
4820 file = as_where (&line);
4821 if (file)
4822 {
4823 if (line != 0)
4824 fprintf (stderr, "%s:%u: ", file, line);
4825 else
4826 fprintf (stderr, "%s: ", file);
4827 }
4828 fprintf (stderr, _("Info: "));
4829 va_start (args, format);
4830 vfprintf (stderr, format, args);
4831 va_end (args);
4832 (void) putc ('\n', stderr);
4833 }
4834
4835 /* Output one operand error record. */
4836
4837 static void
4838 output_operand_error_record (const operand_error_record *record, char *str)
4839 {
4840 const aarch64_operand_error *detail = &record->detail;
4841 int idx = detail->index;
4842 const aarch64_opcode *opcode = record->opcode;
4843 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4844 : AARCH64_OPND_NIL);
4845
4846 typedef void (*handler_t)(const char *format, ...);
4847 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4848
4849 switch (detail->kind)
4850 {
4851 case AARCH64_OPDE_NIL:
4852 gas_assert (0);
4853 break;
4854 case AARCH64_OPDE_SYNTAX_ERROR:
4855 case AARCH64_OPDE_RECOVERABLE:
4856 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4857 case AARCH64_OPDE_OTHER_ERROR:
4858 /* Use the prepared error message if there is, otherwise use the
4859 operand description string to describe the error. */
4860 if (detail->error != NULL)
4861 {
4862 if (idx < 0)
4863 handler (_("%s -- `%s'"), detail->error, str);
4864 else
4865 handler (_("%s at operand %d -- `%s'"),
4866 detail->error, idx + 1, str);
4867 }
4868 else
4869 {
4870 gas_assert (idx >= 0);
4871 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4872 aarch64_get_operand_desc (opd_code), str);
4873 }
4874 break;
4875
4876 case AARCH64_OPDE_INVALID_VARIANT:
4877 handler (_("operand mismatch -- `%s'"), str);
4878 if (verbose_error_p)
4879 {
4880 /* We will try to correct the erroneous instruction and also provide
4881 more information e.g. all other valid variants.
4882
4883 The string representation of the corrected instruction and other
4884 valid variants are generated by
4885
4886 1) obtaining the intermediate representation of the erroneous
4887 instruction;
4888 2) manipulating the IR, e.g. replacing the operand qualifier;
4889 3) printing out the instruction by calling the printer functions
4890 shared with the disassembler.
4891
4892 The limitation of this method is that the exact input assembly
4893 line cannot be accurately reproduced in some cases, for example an
4894 optional operand present in the actual assembly line will be
4895 omitted in the output; likewise for the optional syntax rules,
4896 e.g. the # before the immediate. Another limitation is that the
4897 assembly symbols and relocation operations in the assembly line
4898 currently cannot be printed out in the error report. Last but not
4899 least, when there is other error(s) co-exist with this error, the
4900 'corrected' instruction may be still incorrect, e.g. given
4901 'ldnp h0,h1,[x0,#6]!'
4902 this diagnosis will provide the version:
4903 'ldnp s0,s1,[x0,#6]!'
4904 which is still not right. */
4905 size_t len = strlen (get_mnemonic_name (str));
4906 int i, qlf_idx;
4907 bool result;
4908 char buf[2048];
4909 aarch64_inst *inst_base = &inst.base;
4910 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4911
4912 /* Init inst. */
4913 reset_aarch64_instruction (&inst);
4914 inst_base->opcode = opcode;
4915
4916 /* Reset the error report so that there is no side effect on the
4917 following operand parsing. */
4918 init_operand_error_report ();
4919
4920 /* Fill inst. */
4921 result = parse_operands (str + len, opcode)
4922 && programmer_friendly_fixup (&inst);
4923 gas_assert (result);
4924 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4925 NULL, NULL, insn_sequence);
4926 gas_assert (!result);
4927
4928 /* Find the most matched qualifier sequence. */
4929 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4930 gas_assert (qlf_idx > -1);
4931
4932 /* Assign the qualifiers. */
4933 assign_qualifier_sequence (inst_base,
4934 opcode->qualifiers_list[qlf_idx]);
4935
4936 /* Print the hint. */
4937 output_info (_(" did you mean this?"));
4938 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4939 print_operands (buf, opcode, inst_base->operands);
4940 output_info (_(" %s"), buf);
4941
4942 /* Print out other variant(s) if there is any. */
4943 if (qlf_idx != 0 ||
4944 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4945 output_info (_(" other valid variant(s):"));
4946
4947 /* For each pattern. */
4948 qualifiers_list = opcode->qualifiers_list;
4949 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4950 {
4951 /* Most opcodes has much fewer patterns in the list.
4952 First NIL qualifier indicates the end in the list. */
4953 if (empty_qualifier_sequence_p (*qualifiers_list))
4954 break;
4955
4956 if (i != qlf_idx)
4957 {
4958 /* Mnemonics name. */
4959 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4960
4961 /* Assign the qualifiers. */
4962 assign_qualifier_sequence (inst_base, *qualifiers_list);
4963
4964 /* Print instruction. */
4965 print_operands (buf, opcode, inst_base->operands);
4966
4967 output_info (_(" %s"), buf);
4968 }
4969 }
4970 }
4971 break;
4972
4973 case AARCH64_OPDE_UNTIED_OPERAND:
4974 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4975 detail->index + 1, str);
4976 break;
4977
4978 case AARCH64_OPDE_OUT_OF_RANGE:
4979 if (detail->data[0] != detail->data[1])
4980 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4981 detail->error ? detail->error : _("immediate value"),
4982 detail->data[0], detail->data[1], idx + 1, str);
4983 else
4984 handler (_("%s must be %d at operand %d -- `%s'"),
4985 detail->error ? detail->error : _("immediate value"),
4986 detail->data[0], idx + 1, str);
4987 break;
4988
4989 case AARCH64_OPDE_REG_LIST:
4990 if (detail->data[0] == 1)
4991 handler (_("invalid number of registers in the list; "
4992 "only 1 register is expected at operand %d -- `%s'"),
4993 idx + 1, str);
4994 else
4995 handler (_("invalid number of registers in the list; "
4996 "%d registers are expected at operand %d -- `%s'"),
4997 detail->data[0], idx + 1, str);
4998 break;
4999
5000 case AARCH64_OPDE_UNALIGNED:
5001 handler (_("immediate value must be a multiple of "
5002 "%d at operand %d -- `%s'"),
5003 detail->data[0], idx + 1, str);
5004 break;
5005
5006 default:
5007 gas_assert (0);
5008 break;
5009 }
5010 }
5011
5012 /* Process and output the error message about the operand mismatching.
5013
5014 When this function is called, the operand error information had
5015 been collected for an assembly line and there will be multiple
5016 errors in the case of multiple instruction templates; output the
5017 error message that most closely describes the problem.
5018
5019 The errors to be printed can be filtered on printing all errors
5020 or only non-fatal errors. This distinction has to be made because
5021 the error buffer may already be filled with fatal errors we don't want to
5022 print due to the different instruction templates. */
5023
5024 static void
5025 output_operand_error_report (char *str, bool non_fatal_only)
5026 {
5027 int largest_error_pos;
5028 const char *msg = NULL;
5029 enum aarch64_operand_error_kind kind;
5030 operand_error_record *curr;
5031 operand_error_record *head = operand_error_report.head;
5032 operand_error_record *record = NULL;
5033
5034 /* No error to report. */
5035 if (head == NULL)
5036 return;
5037
5038 gas_assert (head != NULL && operand_error_report.tail != NULL);
5039
5040 /* Only one error. */
5041 if (head == operand_error_report.tail)
5042 {
5043 /* If the only error is a non-fatal one and we don't want to print it,
5044 just exit. */
5045 if (!non_fatal_only || head->detail.non_fatal)
5046 {
5047 DEBUG_TRACE ("single opcode entry with error kind: %s",
5048 operand_mismatch_kind_names[head->detail.kind]);
5049 output_operand_error_record (head, str);
5050 }
5051 return;
5052 }
5053
5054 /* Find the error kind of the highest severity. */
5055 DEBUG_TRACE ("multiple opcode entries with error kind");
5056 kind = AARCH64_OPDE_NIL;
5057 for (curr = head; curr != NULL; curr = curr->next)
5058 {
5059 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5060 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5061 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5062 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5063 kind = curr->detail.kind;
5064 }
5065
5066 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5067
5068 /* Pick up one of errors of KIND to report. */
5069 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5070 for (curr = head; curr != NULL; curr = curr->next)
5071 {
5072 /* If we don't want to print non-fatal errors then don't consider them
5073 at all. */
5074 if (curr->detail.kind != kind
5075 || (non_fatal_only && !curr->detail.non_fatal))
5076 continue;
5077 /* If there are multiple errors, pick up the one with the highest
5078 mismatching operand index. In the case of multiple errors with
5079 the equally highest operand index, pick up the first one or the
5080 first one with non-NULL error message. */
5081 if (curr->detail.index > largest_error_pos
5082 || (curr->detail.index == largest_error_pos && msg == NULL
5083 && curr->detail.error != NULL))
5084 {
5085 largest_error_pos = curr->detail.index;
5086 record = curr;
5087 msg = record->detail.error;
5088 }
5089 }
5090
5091 /* The way errors are collected in the back-end is a bit non-intuitive. But
5092 essentially, because each operand template is tried recursively you may
5093 always have errors collected from the previous tried OPND. These are
5094 usually skipped if there is one successful match. However now with the
5095 non-fatal errors we have to ignore those previously collected hard errors
5096 when we're only interested in printing the non-fatal ones. This condition
5097 prevents us from printing errors that are not appropriate, since we did
5098 match a condition, but it also has warnings that it wants to print. */
5099 if (non_fatal_only && !record)
5100 return;
5101
5102 gas_assert (largest_error_pos != -2 && record != NULL);
5103 DEBUG_TRACE ("Pick up error kind %s to report",
5104 operand_mismatch_kind_names[record->detail.kind]);
5105
5106 /* Output. */
5107 output_operand_error_record (record, str);
5108 }
5109 \f
5110 /* Write an AARCH64 instruction to buf - always little-endian. */
5111 static void
5112 put_aarch64_insn (char *buf, uint32_t insn)
5113 {
5114 unsigned char *where = (unsigned char *) buf;
5115 where[0] = insn;
5116 where[1] = insn >> 8;
5117 where[2] = insn >> 16;
5118 where[3] = insn >> 24;
5119 }
5120
5121 static uint32_t
5122 get_aarch64_insn (char *buf)
5123 {
5124 unsigned char *where = (unsigned char *) buf;
5125 uint32_t result;
5126 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5127 | ((uint32_t) where[3] << 24)));
5128 return result;
5129 }
5130
5131 static void
5132 output_inst (struct aarch64_inst *new_inst)
5133 {
5134 char *to = NULL;
5135
5136 to = frag_more (INSN_SIZE);
5137
5138 frag_now->tc_frag_data.recorded = 1;
5139
5140 put_aarch64_insn (to, inst.base.value);
5141
5142 if (inst.reloc.type != BFD_RELOC_UNUSED)
5143 {
5144 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5145 INSN_SIZE, &inst.reloc.exp,
5146 inst.reloc.pc_rel,
5147 inst.reloc.type);
5148 DEBUG_TRACE ("Prepared relocation fix up");
5149 /* Don't check the addend value against the instruction size,
5150 that's the job of our code in md_apply_fix(). */
5151 fixp->fx_no_overflow = 1;
5152 if (new_inst != NULL)
5153 fixp->tc_fix_data.inst = new_inst;
5154 if (aarch64_gas_internal_fixup_p ())
5155 {
5156 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5157 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5158 fixp->fx_addnumber = inst.reloc.flags;
5159 }
5160 }
5161
5162 dwarf2_emit_insn (INSN_SIZE);
5163 }
5164
5165 /* Link together opcodes of the same name. */
5166
5167 struct templates
5168 {
5169 const aarch64_opcode *opcode;
5170 struct templates *next;
5171 };
5172
5173 typedef struct templates templates;
5174
5175 static templates *
5176 lookup_mnemonic (const char *start, int len)
5177 {
5178 templates *templ = NULL;
5179
5180 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5181 return templ;
5182 }
5183
5184 /* Subroutine of md_assemble, responsible for looking up the primary
5185 opcode from the mnemonic the user wrote. STR points to the
5186 beginning of the mnemonic. */
5187
5188 static templates *
5189 opcode_lookup (char **str)
5190 {
5191 char *end, *base, *dot;
5192 const aarch64_cond *cond;
5193 char condname[16];
5194 int len;
5195
5196 /* Scan up to the end of the mnemonic, which must end in white space,
5197 '.', or end of string. */
5198 dot = 0;
5199 for (base = end = *str; is_part_of_name(*end); end++)
5200 if (*end == '.' && !dot)
5201 dot = end;
5202
5203 if (end == base || dot == base)
5204 return 0;
5205
5206 inst.cond = COND_ALWAYS;
5207
5208 /* Handle a possible condition. */
5209 if (dot)
5210 {
5211 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5212 if (cond)
5213 {
5214 inst.cond = cond->value;
5215 *str = end;
5216 }
5217 else
5218 {
5219 *str = dot;
5220 return 0;
5221 }
5222 len = dot - base;
5223 }
5224 else
5225 {
5226 *str = end;
5227 len = end - base;
5228 }
5229
5230 if (inst.cond == COND_ALWAYS)
5231 {
5232 /* Look for unaffixed mnemonic. */
5233 return lookup_mnemonic (base, len);
5234 }
5235 else if (len <= 13)
5236 {
5237 /* append ".c" to mnemonic if conditional */
5238 memcpy (condname, base, len);
5239 memcpy (condname + len, ".c", 2);
5240 base = condname;
5241 len += 2;
5242 return lookup_mnemonic (base, len);
5243 }
5244
5245 return NULL;
5246 }
5247
5248 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5249 to a corresponding operand qualifier. */
5250
5251 static inline aarch64_opnd_qualifier_t
5252 vectype_to_qualifier (const struct vector_type_el *vectype)
5253 {
5254 /* Element size in bytes indexed by vector_el_type. */
5255 const unsigned char ele_size[5]
5256 = {1, 2, 4, 8, 16};
5257 const unsigned int ele_base [5] =
5258 {
5259 AARCH64_OPND_QLF_V_4B,
5260 AARCH64_OPND_QLF_V_2H,
5261 AARCH64_OPND_QLF_V_2S,
5262 AARCH64_OPND_QLF_V_1D,
5263 AARCH64_OPND_QLF_V_1Q
5264 };
5265
5266 if (!vectype->defined || vectype->type == NT_invtype)
5267 goto vectype_conversion_fail;
5268
5269 if (vectype->type == NT_zero)
5270 return AARCH64_OPND_QLF_P_Z;
5271 if (vectype->type == NT_merge)
5272 return AARCH64_OPND_QLF_P_M;
5273
5274 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5275
5276 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5277 {
5278 /* Special case S_4B. */
5279 if (vectype->type == NT_b && vectype->width == 4)
5280 return AARCH64_OPND_QLF_S_4B;
5281
5282 /* Special case S_2H. */
5283 if (vectype->type == NT_h && vectype->width == 2)
5284 return AARCH64_OPND_QLF_S_2H;
5285
5286 /* Vector element register. */
5287 return AARCH64_OPND_QLF_S_B + vectype->type;
5288 }
5289 else
5290 {
5291 /* Vector register. */
5292 int reg_size = ele_size[vectype->type] * vectype->width;
5293 unsigned offset;
5294 unsigned shift;
5295 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5296 goto vectype_conversion_fail;
5297
5298 /* The conversion is by calculating the offset from the base operand
5299 qualifier for the vector type. The operand qualifiers are regular
5300 enough that the offset can established by shifting the vector width by
5301 a vector-type dependent amount. */
5302 shift = 0;
5303 if (vectype->type == NT_b)
5304 shift = 3;
5305 else if (vectype->type == NT_h || vectype->type == NT_s)
5306 shift = 2;
5307 else if (vectype->type >= NT_d)
5308 shift = 1;
5309 else
5310 gas_assert (0);
5311
5312 offset = ele_base [vectype->type] + (vectype->width >> shift);
5313 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5314 && offset <= AARCH64_OPND_QLF_V_1Q);
5315 return offset;
5316 }
5317
5318 vectype_conversion_fail:
5319 first_error (_("bad vector arrangement type"));
5320 return AARCH64_OPND_QLF_NIL;
5321 }
5322
5323 /* Process an optional operand that is found omitted from the assembly line.
5324 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5325 instruction's opcode entry while IDX is the index of this omitted operand.
5326 */
5327
5328 static void
5329 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5330 int idx, aarch64_opnd_info *operand)
5331 {
5332 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5333 gas_assert (optional_operand_p (opcode, idx));
5334 gas_assert (!operand->present);
5335
5336 switch (type)
5337 {
5338 case AARCH64_OPND_Rd:
5339 case AARCH64_OPND_Rn:
5340 case AARCH64_OPND_Rm:
5341 case AARCH64_OPND_Rt:
5342 case AARCH64_OPND_Rt2:
5343 case AARCH64_OPND_Rt_LS64:
5344 case AARCH64_OPND_Rt_SP:
5345 case AARCH64_OPND_Rs:
5346 case AARCH64_OPND_Ra:
5347 case AARCH64_OPND_Rt_SYS:
5348 case AARCH64_OPND_Rd_SP:
5349 case AARCH64_OPND_Rn_SP:
5350 case AARCH64_OPND_Rm_SP:
5351 case AARCH64_OPND_Fd:
5352 case AARCH64_OPND_Fn:
5353 case AARCH64_OPND_Fm:
5354 case AARCH64_OPND_Fa:
5355 case AARCH64_OPND_Ft:
5356 case AARCH64_OPND_Ft2:
5357 case AARCH64_OPND_Sd:
5358 case AARCH64_OPND_Sn:
5359 case AARCH64_OPND_Sm:
5360 case AARCH64_OPND_Va:
5361 case AARCH64_OPND_Vd:
5362 case AARCH64_OPND_Vn:
5363 case AARCH64_OPND_Vm:
5364 case AARCH64_OPND_VdD1:
5365 case AARCH64_OPND_VnD1:
5366 operand->reg.regno = default_value;
5367 break;
5368
5369 case AARCH64_OPND_Ed:
5370 case AARCH64_OPND_En:
5371 case AARCH64_OPND_Em:
5372 case AARCH64_OPND_Em16:
5373 case AARCH64_OPND_SM3_IMM2:
5374 operand->reglane.regno = default_value;
5375 break;
5376
5377 case AARCH64_OPND_IDX:
5378 case AARCH64_OPND_BIT_NUM:
5379 case AARCH64_OPND_IMMR:
5380 case AARCH64_OPND_IMMS:
5381 case AARCH64_OPND_SHLL_IMM:
5382 case AARCH64_OPND_IMM_VLSL:
5383 case AARCH64_OPND_IMM_VLSR:
5384 case AARCH64_OPND_CCMP_IMM:
5385 case AARCH64_OPND_FBITS:
5386 case AARCH64_OPND_UIMM4:
5387 case AARCH64_OPND_UIMM3_OP1:
5388 case AARCH64_OPND_UIMM3_OP2:
5389 case AARCH64_OPND_IMM:
5390 case AARCH64_OPND_IMM_2:
5391 case AARCH64_OPND_WIDTH:
5392 case AARCH64_OPND_UIMM7:
5393 case AARCH64_OPND_NZCV:
5394 case AARCH64_OPND_SVE_PATTERN:
5395 case AARCH64_OPND_SVE_PRFOP:
5396 operand->imm.value = default_value;
5397 break;
5398
5399 case AARCH64_OPND_SVE_PATTERN_SCALED:
5400 operand->imm.value = default_value;
5401 operand->shifter.kind = AARCH64_MOD_MUL;
5402 operand->shifter.amount = 1;
5403 break;
5404
5405 case AARCH64_OPND_EXCEPTION:
5406 inst.reloc.type = BFD_RELOC_UNUSED;
5407 break;
5408
5409 case AARCH64_OPND_BARRIER_ISB:
5410 operand->barrier = aarch64_barrier_options + default_value;
5411 break;
5412
5413 case AARCH64_OPND_BTI_TARGET:
5414 operand->hint_option = aarch64_hint_options + default_value;
5415 break;
5416
5417 default:
5418 break;
5419 }
5420 }
5421
5422 /* Process the relocation type for move wide instructions.
5423 Return TRUE on success; otherwise return FALSE. */
5424
5425 static bool
5426 process_movw_reloc_info (void)
5427 {
5428 int is32;
5429 unsigned shift;
5430
5431 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5432
5433 if (inst.base.opcode->op == OP_MOVK)
5434 switch (inst.reloc.type)
5435 {
5436 case BFD_RELOC_AARCH64_MOVW_G0_S:
5437 case BFD_RELOC_AARCH64_MOVW_G1_S:
5438 case BFD_RELOC_AARCH64_MOVW_G2_S:
5439 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5440 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5441 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5442 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5443 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5444 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5445 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5446 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5447 set_syntax_error
5448 (_("the specified relocation type is not allowed for MOVK"));
5449 return false;
5450 default:
5451 break;
5452 }
5453
5454 switch (inst.reloc.type)
5455 {
5456 case BFD_RELOC_AARCH64_MOVW_G0:
5457 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5458 case BFD_RELOC_AARCH64_MOVW_G0_S:
5459 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5460 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5461 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5462 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5463 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5464 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5465 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5466 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5467 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5468 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5469 shift = 0;
5470 break;
5471 case BFD_RELOC_AARCH64_MOVW_G1:
5472 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5473 case BFD_RELOC_AARCH64_MOVW_G1_S:
5474 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5475 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5476 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5477 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5478 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5479 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5480 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5481 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5482 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5483 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5484 shift = 16;
5485 break;
5486 case BFD_RELOC_AARCH64_MOVW_G2:
5487 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5488 case BFD_RELOC_AARCH64_MOVW_G2_S:
5489 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5490 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5491 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5492 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5493 if (is32)
5494 {
5495 set_fatal_syntax_error
5496 (_("the specified relocation type is not allowed for 32-bit "
5497 "register"));
5498 return false;
5499 }
5500 shift = 32;
5501 break;
5502 case BFD_RELOC_AARCH64_MOVW_G3:
5503 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5504 if (is32)
5505 {
5506 set_fatal_syntax_error
5507 (_("the specified relocation type is not allowed for 32-bit "
5508 "register"));
5509 return false;
5510 }
5511 shift = 48;
5512 break;
5513 default:
5514 /* More cases should be added when more MOVW-related relocation types
5515 are supported in GAS. */
5516 gas_assert (aarch64_gas_internal_fixup_p ());
5517 /* The shift amount should have already been set by the parser. */
5518 return true;
5519 }
5520 inst.base.operands[1].shifter.amount = shift;
5521 return true;
5522 }
5523
5524 /* A primitive log calculator. */
5525
5526 static inline unsigned int
5527 get_logsz (unsigned int size)
5528 {
5529 const unsigned char ls[16] =
5530 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5531 if (size > 16)
5532 {
5533 gas_assert (0);
5534 return -1;
5535 }
5536 gas_assert (ls[size - 1] != (unsigned char)-1);
5537 return ls[size - 1];
5538 }
5539
5540 /* Determine and return the real reloc type code for an instruction
5541 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5542
5543 static inline bfd_reloc_code_real_type
5544 ldst_lo12_determine_real_reloc_type (void)
5545 {
5546 unsigned logsz, max_logsz;
5547 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5548 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5549
5550 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5551 {
5552 BFD_RELOC_AARCH64_LDST8_LO12,
5553 BFD_RELOC_AARCH64_LDST16_LO12,
5554 BFD_RELOC_AARCH64_LDST32_LO12,
5555 BFD_RELOC_AARCH64_LDST64_LO12,
5556 BFD_RELOC_AARCH64_LDST128_LO12
5557 },
5558 {
5559 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5560 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5561 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5562 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5563 BFD_RELOC_AARCH64_NONE
5564 },
5565 {
5566 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5567 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5568 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5569 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5570 BFD_RELOC_AARCH64_NONE
5571 },
5572 {
5573 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5574 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5575 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5576 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5577 BFD_RELOC_AARCH64_NONE
5578 },
5579 {
5580 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5581 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5582 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5583 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5584 BFD_RELOC_AARCH64_NONE
5585 }
5586 };
5587
5588 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5589 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5590 || (inst.reloc.type
5591 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5592 || (inst.reloc.type
5593 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5594 || (inst.reloc.type
5595 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5596 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5597
5598 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5599 opd1_qlf =
5600 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5601 1, opd0_qlf, 0);
5602 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5603
5604 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5605
5606 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5607 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5608 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5609 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5610 max_logsz = 3;
5611 else
5612 max_logsz = 4;
5613
5614 if (logsz > max_logsz)
5615 {
5616 /* SEE PR 27904 for an example of this. */
5617 set_fatal_syntax_error
5618 (_("relocation qualifier does not match instruction size"));
5619 return BFD_RELOC_AARCH64_NONE;
5620 }
5621
5622 /* In reloc.c, these pseudo relocation types should be defined in similar
5623 order as above reloc_ldst_lo12 array. Because the array index calculation
5624 below relies on this. */
5625 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5626 }
5627
5628 /* Check whether a register list REGINFO is valid. The registers must be
5629 numbered in increasing order (modulo 32), in increments of one or two.
5630
5631 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5632 increments of two.
5633
5634 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5635
5636 static bool
5637 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5638 {
5639 uint32_t i, nb_regs, prev_regno, incr;
5640
5641 nb_regs = 1 + (reginfo & 0x3);
5642 reginfo >>= 2;
5643 prev_regno = reginfo & 0x1f;
5644 incr = accept_alternate ? 2 : 1;
5645
5646 for (i = 1; i < nb_regs; ++i)
5647 {
5648 uint32_t curr_regno;
5649 reginfo >>= 5;
5650 curr_regno = reginfo & 0x1f;
5651 if (curr_regno != ((prev_regno + incr) & 0x1f))
5652 return false;
5653 prev_regno = curr_regno;
5654 }
5655
5656 return true;
5657 }
5658
5659 /* Generic instruction operand parser. This does no encoding and no
5660 semantic validation; it merely squirrels values away in the inst
5661 structure. Returns TRUE or FALSE depending on whether the
5662 specified grammar matched. */
5663
5664 static bool
5665 parse_operands (char *str, const aarch64_opcode *opcode)
5666 {
5667 int i;
5668 char *backtrack_pos = 0;
5669 const enum aarch64_opnd *operands = opcode->operands;
5670 aarch64_reg_type imm_reg_type;
5671
5672 clear_error ();
5673 skip_whitespace (str);
5674
5675 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5676 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5677 else
5678 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5679
5680 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5681 {
5682 int64_t val;
5683 const reg_entry *reg;
5684 int comma_skipped_p = 0;
5685 aarch64_reg_type rtype;
5686 struct vector_type_el vectype;
5687 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5688 aarch64_opnd_info *info = &inst.base.operands[i];
5689 aarch64_reg_type reg_type;
5690
5691 DEBUG_TRACE ("parse operand %d", i);
5692
5693 /* Assign the operand code. */
5694 info->type = operands[i];
5695
5696 if (optional_operand_p (opcode, i))
5697 {
5698 /* Remember where we are in case we need to backtrack. */
5699 gas_assert (!backtrack_pos);
5700 backtrack_pos = str;
5701 }
5702
5703 /* Expect comma between operands; the backtrack mechanism will take
5704 care of cases of omitted optional operand. */
5705 if (i > 0 && ! skip_past_char (&str, ','))
5706 {
5707 set_syntax_error (_("comma expected between operands"));
5708 goto failure;
5709 }
5710 else
5711 comma_skipped_p = 1;
5712
5713 switch (operands[i])
5714 {
5715 case AARCH64_OPND_Rd:
5716 case AARCH64_OPND_Rn:
5717 case AARCH64_OPND_Rm:
5718 case AARCH64_OPND_Rt:
5719 case AARCH64_OPND_Rt2:
5720 case AARCH64_OPND_Rs:
5721 case AARCH64_OPND_Ra:
5722 case AARCH64_OPND_Rt_LS64:
5723 case AARCH64_OPND_Rt_SYS:
5724 case AARCH64_OPND_PAIRREG:
5725 case AARCH64_OPND_SVE_Rm:
5726 po_int_reg_or_fail (REG_TYPE_R_Z);
5727
5728 /* In LS64 load/store instructions Rt register number must be even
5729 and <=22. */
5730 if (operands[i] == AARCH64_OPND_Rt_LS64)
5731 {
5732 /* We've already checked if this is valid register.
5733 This will check if register number (Rt) is not undefined for LS64
5734 instructions:
5735 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
5736 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
5737 {
5738 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
5739 goto failure;
5740 }
5741 }
5742 break;
5743
5744 case AARCH64_OPND_Rd_SP:
5745 case AARCH64_OPND_Rn_SP:
5746 case AARCH64_OPND_Rt_SP:
5747 case AARCH64_OPND_SVE_Rn_SP:
5748 case AARCH64_OPND_Rm_SP:
5749 po_int_reg_or_fail (REG_TYPE_R_SP);
5750 break;
5751
5752 case AARCH64_OPND_Rm_EXT:
5753 case AARCH64_OPND_Rm_SFT:
5754 po_misc_or_fail (parse_shifter_operand
5755 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5756 ? SHIFTED_ARITH_IMM
5757 : SHIFTED_LOGIC_IMM)));
5758 if (!info->shifter.operator_present)
5759 {
5760 /* Default to LSL if not present. Libopcodes prefers shifter
5761 kind to be explicit. */
5762 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5763 info->shifter.kind = AARCH64_MOD_LSL;
5764 /* For Rm_EXT, libopcodes will carry out further check on whether
5765 or not stack pointer is used in the instruction (Recall that
5766 "the extend operator is not optional unless at least one of
5767 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5768 }
5769 break;
5770
5771 case AARCH64_OPND_Fd:
5772 case AARCH64_OPND_Fn:
5773 case AARCH64_OPND_Fm:
5774 case AARCH64_OPND_Fa:
5775 case AARCH64_OPND_Ft:
5776 case AARCH64_OPND_Ft2:
5777 case AARCH64_OPND_Sd:
5778 case AARCH64_OPND_Sn:
5779 case AARCH64_OPND_Sm:
5780 case AARCH64_OPND_SVE_VZn:
5781 case AARCH64_OPND_SVE_Vd:
5782 case AARCH64_OPND_SVE_Vm:
5783 case AARCH64_OPND_SVE_Vn:
5784 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5785 if (val == PARSE_FAIL)
5786 {
5787 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5788 goto failure;
5789 }
5790 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5791
5792 info->reg.regno = val;
5793 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5794 break;
5795
5796 case AARCH64_OPND_SVE_Pd:
5797 case AARCH64_OPND_SVE_Pg3:
5798 case AARCH64_OPND_SVE_Pg4_5:
5799 case AARCH64_OPND_SVE_Pg4_10:
5800 case AARCH64_OPND_SVE_Pg4_16:
5801 case AARCH64_OPND_SVE_Pm:
5802 case AARCH64_OPND_SVE_Pn:
5803 case AARCH64_OPND_SVE_Pt:
5804 reg_type = REG_TYPE_PN;
5805 goto vector_reg;
5806
5807 case AARCH64_OPND_SVE_Za_5:
5808 case AARCH64_OPND_SVE_Za_16:
5809 case AARCH64_OPND_SVE_Zd:
5810 case AARCH64_OPND_SVE_Zm_5:
5811 case AARCH64_OPND_SVE_Zm_16:
5812 case AARCH64_OPND_SVE_Zn:
5813 case AARCH64_OPND_SVE_Zt:
5814 reg_type = REG_TYPE_ZN;
5815 goto vector_reg;
5816
5817 case AARCH64_OPND_Va:
5818 case AARCH64_OPND_Vd:
5819 case AARCH64_OPND_Vn:
5820 case AARCH64_OPND_Vm:
5821 reg_type = REG_TYPE_VN;
5822 vector_reg:
5823 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5824 if (val == PARSE_FAIL)
5825 {
5826 first_error (_(get_reg_expected_msg (reg_type)));
5827 goto failure;
5828 }
5829 if (vectype.defined & NTA_HASINDEX)
5830 goto failure;
5831
5832 info->reg.regno = val;
5833 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5834 && vectype.type == NT_invtype)
5835 /* Unqualified Pn and Zn registers are allowed in certain
5836 contexts. Rely on F_STRICT qualifier checking to catch
5837 invalid uses. */
5838 info->qualifier = AARCH64_OPND_QLF_NIL;
5839 else
5840 {
5841 info->qualifier = vectype_to_qualifier (&vectype);
5842 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5843 goto failure;
5844 }
5845 break;
5846
5847 case AARCH64_OPND_VdD1:
5848 case AARCH64_OPND_VnD1:
5849 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5850 if (val == PARSE_FAIL)
5851 {
5852 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5853 goto failure;
5854 }
5855 if (vectype.type != NT_d || vectype.index != 1)
5856 {
5857 set_fatal_syntax_error
5858 (_("the top half of a 128-bit FP/SIMD register is expected"));
5859 goto failure;
5860 }
5861 info->reg.regno = val;
5862 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5863 here; it is correct for the purpose of encoding/decoding since
5864 only the register number is explicitly encoded in the related
5865 instructions, although this appears a bit hacky. */
5866 info->qualifier = AARCH64_OPND_QLF_S_D;
5867 break;
5868
5869 case AARCH64_OPND_SVE_Zm3_INDEX:
5870 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5871 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5872 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5873 case AARCH64_OPND_SVE_Zm4_INDEX:
5874 case AARCH64_OPND_SVE_Zn_INDEX:
5875 reg_type = REG_TYPE_ZN;
5876 goto vector_reg_index;
5877
5878 case AARCH64_OPND_Ed:
5879 case AARCH64_OPND_En:
5880 case AARCH64_OPND_Em:
5881 case AARCH64_OPND_Em16:
5882 case AARCH64_OPND_SM3_IMM2:
5883 reg_type = REG_TYPE_VN;
5884 vector_reg_index:
5885 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5886 if (val == PARSE_FAIL)
5887 {
5888 first_error (_(get_reg_expected_msg (reg_type)));
5889 goto failure;
5890 }
5891 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5892 goto failure;
5893
5894 info->reglane.regno = val;
5895 info->reglane.index = vectype.index;
5896 info->qualifier = vectype_to_qualifier (&vectype);
5897 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5898 goto failure;
5899 break;
5900
5901 case AARCH64_OPND_SVE_ZnxN:
5902 case AARCH64_OPND_SVE_ZtxN:
5903 reg_type = REG_TYPE_ZN;
5904 goto vector_reg_list;
5905
5906 case AARCH64_OPND_LVn:
5907 case AARCH64_OPND_LVt:
5908 case AARCH64_OPND_LVt_AL:
5909 case AARCH64_OPND_LEt:
5910 reg_type = REG_TYPE_VN;
5911 vector_reg_list:
5912 if (reg_type == REG_TYPE_ZN
5913 && get_opcode_dependent_value (opcode) == 1
5914 && *str != '{')
5915 {
5916 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5917 if (val == PARSE_FAIL)
5918 {
5919 first_error (_(get_reg_expected_msg (reg_type)));
5920 goto failure;
5921 }
5922 info->reglist.first_regno = val;
5923 info->reglist.num_regs = 1;
5924 }
5925 else
5926 {
5927 val = parse_vector_reg_list (&str, reg_type, &vectype);
5928 if (val == PARSE_FAIL)
5929 goto failure;
5930
5931 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5932 {
5933 set_fatal_syntax_error (_("invalid register list"));
5934 goto failure;
5935 }
5936
5937 if (vectype.width != 0 && *str != ',')
5938 {
5939 set_fatal_syntax_error
5940 (_("expected element type rather than vector type"));
5941 goto failure;
5942 }
5943
5944 info->reglist.first_regno = (val >> 2) & 0x1f;
5945 info->reglist.num_regs = (val & 0x3) + 1;
5946 }
5947 if (operands[i] == AARCH64_OPND_LEt)
5948 {
5949 if (!(vectype.defined & NTA_HASINDEX))
5950 goto failure;
5951 info->reglist.has_index = 1;
5952 info->reglist.index = vectype.index;
5953 }
5954 else
5955 {
5956 if (vectype.defined & NTA_HASINDEX)
5957 goto failure;
5958 if (!(vectype.defined & NTA_HASTYPE))
5959 {
5960 if (reg_type == REG_TYPE_ZN)
5961 set_fatal_syntax_error (_("missing type suffix"));
5962 goto failure;
5963 }
5964 }
5965 info->qualifier = vectype_to_qualifier (&vectype);
5966 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5967 goto failure;
5968 break;
5969
5970 case AARCH64_OPND_CRn:
5971 case AARCH64_OPND_CRm:
5972 {
5973 char prefix = *(str++);
5974 if (prefix != 'c' && prefix != 'C')
5975 goto failure;
5976
5977 po_imm_nc_or_fail ();
5978 if (val > 15)
5979 {
5980 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5981 goto failure;
5982 }
5983 info->qualifier = AARCH64_OPND_QLF_CR;
5984 info->imm.value = val;
5985 break;
5986 }
5987
5988 case AARCH64_OPND_SHLL_IMM:
5989 case AARCH64_OPND_IMM_VLSR:
5990 po_imm_or_fail (1, 64);
5991 info->imm.value = val;
5992 break;
5993
5994 case AARCH64_OPND_CCMP_IMM:
5995 case AARCH64_OPND_SIMM5:
5996 case AARCH64_OPND_FBITS:
5997 case AARCH64_OPND_TME_UIMM16:
5998 case AARCH64_OPND_UIMM4:
5999 case AARCH64_OPND_UIMM4_ADDG:
6000 case AARCH64_OPND_UIMM10:
6001 case AARCH64_OPND_UIMM3_OP1:
6002 case AARCH64_OPND_UIMM3_OP2:
6003 case AARCH64_OPND_IMM_VLSL:
6004 case AARCH64_OPND_IMM:
6005 case AARCH64_OPND_IMM_2:
6006 case AARCH64_OPND_WIDTH:
6007 case AARCH64_OPND_SVE_INV_LIMM:
6008 case AARCH64_OPND_SVE_LIMM:
6009 case AARCH64_OPND_SVE_LIMM_MOV:
6010 case AARCH64_OPND_SVE_SHLIMM_PRED:
6011 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6012 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6013 case AARCH64_OPND_SVE_SHRIMM_PRED:
6014 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6015 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6016 case AARCH64_OPND_SVE_SIMM5:
6017 case AARCH64_OPND_SVE_SIMM5B:
6018 case AARCH64_OPND_SVE_SIMM6:
6019 case AARCH64_OPND_SVE_SIMM8:
6020 case AARCH64_OPND_SVE_UIMM3:
6021 case AARCH64_OPND_SVE_UIMM7:
6022 case AARCH64_OPND_SVE_UIMM8:
6023 case AARCH64_OPND_SVE_UIMM8_53:
6024 case AARCH64_OPND_IMM_ROT1:
6025 case AARCH64_OPND_IMM_ROT2:
6026 case AARCH64_OPND_IMM_ROT3:
6027 case AARCH64_OPND_SVE_IMM_ROT1:
6028 case AARCH64_OPND_SVE_IMM_ROT2:
6029 case AARCH64_OPND_SVE_IMM_ROT3:
6030 po_imm_nc_or_fail ();
6031 info->imm.value = val;
6032 break;
6033
6034 case AARCH64_OPND_SVE_AIMM:
6035 case AARCH64_OPND_SVE_ASIMM:
6036 po_imm_nc_or_fail ();
6037 info->imm.value = val;
6038 skip_whitespace (str);
6039 if (skip_past_comma (&str))
6040 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6041 else
6042 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6043 break;
6044
6045 case AARCH64_OPND_SVE_PATTERN:
6046 po_enum_or_fail (aarch64_sve_pattern_array);
6047 info->imm.value = val;
6048 break;
6049
6050 case AARCH64_OPND_SVE_PATTERN_SCALED:
6051 po_enum_or_fail (aarch64_sve_pattern_array);
6052 info->imm.value = val;
6053 if (skip_past_comma (&str)
6054 && !parse_shift (&str, info, SHIFTED_MUL))
6055 goto failure;
6056 if (!info->shifter.operator_present)
6057 {
6058 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6059 info->shifter.kind = AARCH64_MOD_MUL;
6060 info->shifter.amount = 1;
6061 }
6062 break;
6063
6064 case AARCH64_OPND_SVE_PRFOP:
6065 po_enum_or_fail (aarch64_sve_prfop_array);
6066 info->imm.value = val;
6067 break;
6068
6069 case AARCH64_OPND_UIMM7:
6070 po_imm_or_fail (0, 127);
6071 info->imm.value = val;
6072 break;
6073
6074 case AARCH64_OPND_IDX:
6075 case AARCH64_OPND_MASK:
6076 case AARCH64_OPND_BIT_NUM:
6077 case AARCH64_OPND_IMMR:
6078 case AARCH64_OPND_IMMS:
6079 po_imm_or_fail (0, 63);
6080 info->imm.value = val;
6081 break;
6082
6083 case AARCH64_OPND_IMM0:
6084 po_imm_nc_or_fail ();
6085 if (val != 0)
6086 {
6087 set_fatal_syntax_error (_("immediate zero expected"));
6088 goto failure;
6089 }
6090 info->imm.value = 0;
6091 break;
6092
6093 case AARCH64_OPND_FPIMM0:
6094 {
6095 int qfloat;
6096 bool res1 = false, res2 = false;
6097 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6098 it is probably not worth the effort to support it. */
6099 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6100 imm_reg_type))
6101 && (error_p ()
6102 || !(res2 = parse_constant_immediate (&str, &val,
6103 imm_reg_type))))
6104 goto failure;
6105 if ((res1 && qfloat == 0) || (res2 && val == 0))
6106 {
6107 info->imm.value = 0;
6108 info->imm.is_fp = 1;
6109 break;
6110 }
6111 set_fatal_syntax_error (_("immediate zero expected"));
6112 goto failure;
6113 }
6114
6115 case AARCH64_OPND_IMM_MOV:
6116 {
6117 char *saved = str;
6118 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6119 reg_name_p (str, REG_TYPE_VN))
6120 goto failure;
6121 str = saved;
6122 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6123 GE_OPT_PREFIX, REJECT_ABSENT,
6124 NORMAL_RESOLUTION));
6125 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6126 later. fix_mov_imm_insn will try to determine a machine
6127 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6128 message if the immediate cannot be moved by a single
6129 instruction. */
6130 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6131 inst.base.operands[i].skip = 1;
6132 }
6133 break;
6134
6135 case AARCH64_OPND_SIMD_IMM:
6136 case AARCH64_OPND_SIMD_IMM_SFT:
6137 if (! parse_big_immediate (&str, &val, imm_reg_type))
6138 goto failure;
6139 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6140 /* addr_off_p */ 0,
6141 /* need_libopcodes_p */ 1,
6142 /* skip_p */ 1);
6143 /* Parse shift.
6144 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6145 shift, we don't check it here; we leave the checking to
6146 the libopcodes (operand_general_constraint_met_p). By
6147 doing this, we achieve better diagnostics. */
6148 if (skip_past_comma (&str)
6149 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6150 goto failure;
6151 if (!info->shifter.operator_present
6152 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6153 {
6154 /* Default to LSL if not present. Libopcodes prefers shifter
6155 kind to be explicit. */
6156 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6157 info->shifter.kind = AARCH64_MOD_LSL;
6158 }
6159 break;
6160
6161 case AARCH64_OPND_FPIMM:
6162 case AARCH64_OPND_SIMD_FPIMM:
6163 case AARCH64_OPND_SVE_FPIMM8:
6164 {
6165 int qfloat;
6166 bool dp_p;
6167
6168 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6169 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6170 || !aarch64_imm_float_p (qfloat))
6171 {
6172 if (!error_p ())
6173 set_fatal_syntax_error (_("invalid floating-point"
6174 " constant"));
6175 goto failure;
6176 }
6177 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6178 inst.base.operands[i].imm.is_fp = 1;
6179 }
6180 break;
6181
6182 case AARCH64_OPND_SVE_I1_HALF_ONE:
6183 case AARCH64_OPND_SVE_I1_HALF_TWO:
6184 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6185 {
6186 int qfloat;
6187 bool dp_p;
6188
6189 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6190 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6191 {
6192 if (!error_p ())
6193 set_fatal_syntax_error (_("invalid floating-point"
6194 " constant"));
6195 goto failure;
6196 }
6197 inst.base.operands[i].imm.value = qfloat;
6198 inst.base.operands[i].imm.is_fp = 1;
6199 }
6200 break;
6201
6202 case AARCH64_OPND_LIMM:
6203 po_misc_or_fail (parse_shifter_operand (&str, info,
6204 SHIFTED_LOGIC_IMM));
6205 if (info->shifter.operator_present)
6206 {
6207 set_fatal_syntax_error
6208 (_("shift not allowed for bitmask immediate"));
6209 goto failure;
6210 }
6211 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6212 /* addr_off_p */ 0,
6213 /* need_libopcodes_p */ 1,
6214 /* skip_p */ 1);
6215 break;
6216
6217 case AARCH64_OPND_AIMM:
6218 if (opcode->op == OP_ADD)
6219 /* ADD may have relocation types. */
6220 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6221 SHIFTED_ARITH_IMM));
6222 else
6223 po_misc_or_fail (parse_shifter_operand (&str, info,
6224 SHIFTED_ARITH_IMM));
6225 switch (inst.reloc.type)
6226 {
6227 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6228 info->shifter.amount = 12;
6229 break;
6230 case BFD_RELOC_UNUSED:
6231 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6232 if (info->shifter.kind != AARCH64_MOD_NONE)
6233 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6234 inst.reloc.pc_rel = 0;
6235 break;
6236 default:
6237 break;
6238 }
6239 info->imm.value = 0;
6240 if (!info->shifter.operator_present)
6241 {
6242 /* Default to LSL if not present. Libopcodes prefers shifter
6243 kind to be explicit. */
6244 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6245 info->shifter.kind = AARCH64_MOD_LSL;
6246 }
6247 break;
6248
6249 case AARCH64_OPND_HALF:
6250 {
6251 /* #<imm16> or relocation. */
6252 int internal_fixup_p;
6253 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6254 if (internal_fixup_p)
6255 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6256 skip_whitespace (str);
6257 if (skip_past_comma (&str))
6258 {
6259 /* {, LSL #<shift>} */
6260 if (! aarch64_gas_internal_fixup_p ())
6261 {
6262 set_fatal_syntax_error (_("can't mix relocation modifier "
6263 "with explicit shift"));
6264 goto failure;
6265 }
6266 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6267 }
6268 else
6269 inst.base.operands[i].shifter.amount = 0;
6270 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6271 inst.base.operands[i].imm.value = 0;
6272 if (! process_movw_reloc_info ())
6273 goto failure;
6274 }
6275 break;
6276
6277 case AARCH64_OPND_EXCEPTION:
6278 case AARCH64_OPND_UNDEFINED:
6279 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6280 imm_reg_type));
6281 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6282 /* addr_off_p */ 0,
6283 /* need_libopcodes_p */ 0,
6284 /* skip_p */ 1);
6285 break;
6286
6287 case AARCH64_OPND_NZCV:
6288 {
6289 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6290 if (nzcv != NULL)
6291 {
6292 str += 4;
6293 info->imm.value = nzcv->value;
6294 break;
6295 }
6296 po_imm_or_fail (0, 15);
6297 info->imm.value = val;
6298 }
6299 break;
6300
6301 case AARCH64_OPND_COND:
6302 case AARCH64_OPND_COND1:
6303 {
6304 char *start = str;
6305 do
6306 str++;
6307 while (ISALPHA (*str));
6308 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6309 if (info->cond == NULL)
6310 {
6311 set_syntax_error (_("invalid condition"));
6312 goto failure;
6313 }
6314 else if (operands[i] == AARCH64_OPND_COND1
6315 && (info->cond->value & 0xe) == 0xe)
6316 {
6317 /* Do not allow AL or NV. */
6318 set_default_error ();
6319 goto failure;
6320 }
6321 }
6322 break;
6323
6324 case AARCH64_OPND_ADDR_ADRP:
6325 po_misc_or_fail (parse_adrp (&str));
6326 /* Clear the value as operand needs to be relocated. */
6327 info->imm.value = 0;
6328 break;
6329
6330 case AARCH64_OPND_ADDR_PCREL14:
6331 case AARCH64_OPND_ADDR_PCREL19:
6332 case AARCH64_OPND_ADDR_PCREL21:
6333 case AARCH64_OPND_ADDR_PCREL26:
6334 po_misc_or_fail (parse_address (&str, info));
6335 if (!info->addr.pcrel)
6336 {
6337 set_syntax_error (_("invalid pc-relative address"));
6338 goto failure;
6339 }
6340 if (inst.gen_lit_pool
6341 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6342 {
6343 /* Only permit "=value" in the literal load instructions.
6344 The literal will be generated by programmer_friendly_fixup. */
6345 set_syntax_error (_("invalid use of \"=immediate\""));
6346 goto failure;
6347 }
6348 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6349 {
6350 set_syntax_error (_("unrecognized relocation suffix"));
6351 goto failure;
6352 }
6353 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6354 {
6355 info->imm.value = inst.reloc.exp.X_add_number;
6356 inst.reloc.type = BFD_RELOC_UNUSED;
6357 }
6358 else
6359 {
6360 info->imm.value = 0;
6361 if (inst.reloc.type == BFD_RELOC_UNUSED)
6362 switch (opcode->iclass)
6363 {
6364 case compbranch:
6365 case condbranch:
6366 /* e.g. CBZ or B.COND */
6367 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6368 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6369 break;
6370 case testbranch:
6371 /* e.g. TBZ */
6372 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6373 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6374 break;
6375 case branch_imm:
6376 /* e.g. B or BL */
6377 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6378 inst.reloc.type =
6379 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6380 : BFD_RELOC_AARCH64_JUMP26;
6381 break;
6382 case loadlit:
6383 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6384 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6385 break;
6386 case pcreladdr:
6387 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6388 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6389 break;
6390 default:
6391 gas_assert (0);
6392 abort ();
6393 }
6394 inst.reloc.pc_rel = 1;
6395 }
6396 break;
6397
6398 case AARCH64_OPND_ADDR_SIMPLE:
6399 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6400 {
6401 /* [<Xn|SP>{, #<simm>}] */
6402 char *start = str;
6403 /* First use the normal address-parsing routines, to get
6404 the usual syntax errors. */
6405 po_misc_or_fail (parse_address (&str, info));
6406 if (info->addr.pcrel || info->addr.offset.is_reg
6407 || !info->addr.preind || info->addr.postind
6408 || info->addr.writeback)
6409 {
6410 set_syntax_error (_("invalid addressing mode"));
6411 goto failure;
6412 }
6413
6414 /* Then retry, matching the specific syntax of these addresses. */
6415 str = start;
6416 po_char_or_fail ('[');
6417 po_reg_or_fail (REG_TYPE_R64_SP);
6418 /* Accept optional ", #0". */
6419 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6420 && skip_past_char (&str, ','))
6421 {
6422 skip_past_char (&str, '#');
6423 if (! skip_past_char (&str, '0'))
6424 {
6425 set_fatal_syntax_error
6426 (_("the optional immediate offset can only be 0"));
6427 goto failure;
6428 }
6429 }
6430 po_char_or_fail (']');
6431 break;
6432 }
6433
6434 case AARCH64_OPND_ADDR_REGOFF:
6435 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6436 po_misc_or_fail (parse_address (&str, info));
6437 regoff_addr:
6438 if (info->addr.pcrel || !info->addr.offset.is_reg
6439 || !info->addr.preind || info->addr.postind
6440 || info->addr.writeback)
6441 {
6442 set_syntax_error (_("invalid addressing mode"));
6443 goto failure;
6444 }
6445 if (!info->shifter.operator_present)
6446 {
6447 /* Default to LSL if not present. Libopcodes prefers shifter
6448 kind to be explicit. */
6449 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6450 info->shifter.kind = AARCH64_MOD_LSL;
6451 }
6452 /* Qualifier to be deduced by libopcodes. */
6453 break;
6454
6455 case AARCH64_OPND_ADDR_SIMM7:
6456 po_misc_or_fail (parse_address (&str, info));
6457 if (info->addr.pcrel || info->addr.offset.is_reg
6458 || (!info->addr.preind && !info->addr.postind))
6459 {
6460 set_syntax_error (_("invalid addressing mode"));
6461 goto failure;
6462 }
6463 if (inst.reloc.type != BFD_RELOC_UNUSED)
6464 {
6465 set_syntax_error (_("relocation not allowed"));
6466 goto failure;
6467 }
6468 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6469 /* addr_off_p */ 1,
6470 /* need_libopcodes_p */ 1,
6471 /* skip_p */ 0);
6472 break;
6473
6474 case AARCH64_OPND_ADDR_SIMM9:
6475 case AARCH64_OPND_ADDR_SIMM9_2:
6476 case AARCH64_OPND_ADDR_SIMM11:
6477 case AARCH64_OPND_ADDR_SIMM13:
6478 po_misc_or_fail (parse_address (&str, info));
6479 if (info->addr.pcrel || info->addr.offset.is_reg
6480 || (!info->addr.preind && !info->addr.postind)
6481 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6482 && info->addr.writeback))
6483 {
6484 set_syntax_error (_("invalid addressing mode"));
6485 goto failure;
6486 }
6487 if (inst.reloc.type != BFD_RELOC_UNUSED)
6488 {
6489 set_syntax_error (_("relocation not allowed"));
6490 goto failure;
6491 }
6492 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6493 /* addr_off_p */ 1,
6494 /* need_libopcodes_p */ 1,
6495 /* skip_p */ 0);
6496 break;
6497
6498 case AARCH64_OPND_ADDR_SIMM10:
6499 case AARCH64_OPND_ADDR_OFFSET:
6500 po_misc_or_fail (parse_address (&str, info));
6501 if (info->addr.pcrel || info->addr.offset.is_reg
6502 || !info->addr.preind || info->addr.postind)
6503 {
6504 set_syntax_error (_("invalid addressing mode"));
6505 goto failure;
6506 }
6507 if (inst.reloc.type != BFD_RELOC_UNUSED)
6508 {
6509 set_syntax_error (_("relocation not allowed"));
6510 goto failure;
6511 }
6512 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6513 /* addr_off_p */ 1,
6514 /* need_libopcodes_p */ 1,
6515 /* skip_p */ 0);
6516 break;
6517
6518 case AARCH64_OPND_ADDR_UIMM12:
6519 po_misc_or_fail (parse_address (&str, info));
6520 if (info->addr.pcrel || info->addr.offset.is_reg
6521 || !info->addr.preind || info->addr.writeback)
6522 {
6523 set_syntax_error (_("invalid addressing mode"));
6524 goto failure;
6525 }
6526 if (inst.reloc.type == BFD_RELOC_UNUSED)
6527 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6528 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6529 || (inst.reloc.type
6530 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6531 || (inst.reloc.type
6532 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6533 || (inst.reloc.type
6534 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6535 || (inst.reloc.type
6536 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6537 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6538 /* Leave qualifier to be determined by libopcodes. */
6539 break;
6540
6541 case AARCH64_OPND_SIMD_ADDR_POST:
6542 /* [<Xn|SP>], <Xm|#<amount>> */
6543 po_misc_or_fail (parse_address (&str, info));
6544 if (!info->addr.postind || !info->addr.writeback)
6545 {
6546 set_syntax_error (_("invalid addressing mode"));
6547 goto failure;
6548 }
6549 if (!info->addr.offset.is_reg)
6550 {
6551 if (inst.reloc.exp.X_op == O_constant)
6552 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6553 else
6554 {
6555 set_fatal_syntax_error
6556 (_("writeback value must be an immediate constant"));
6557 goto failure;
6558 }
6559 }
6560 /* No qualifier. */
6561 break;
6562
6563 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6564 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6565 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6566 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6567 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6568 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6569 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6570 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6571 case AARCH64_OPND_SVE_ADDR_RI_U6:
6572 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6573 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6574 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6575 /* [X<n>{, #imm, MUL VL}]
6576 [X<n>{, #imm}]
6577 but recognizing SVE registers. */
6578 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6579 &offset_qualifier));
6580 if (base_qualifier != AARCH64_OPND_QLF_X)
6581 {
6582 set_syntax_error (_("invalid addressing mode"));
6583 goto failure;
6584 }
6585 sve_regimm:
6586 if (info->addr.pcrel || info->addr.offset.is_reg
6587 || !info->addr.preind || info->addr.writeback)
6588 {
6589 set_syntax_error (_("invalid addressing mode"));
6590 goto failure;
6591 }
6592 if (inst.reloc.type != BFD_RELOC_UNUSED
6593 || inst.reloc.exp.X_op != O_constant)
6594 {
6595 /* Make sure this has priority over
6596 "invalid addressing mode". */
6597 set_fatal_syntax_error (_("constant offset required"));
6598 goto failure;
6599 }
6600 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6601 break;
6602
6603 case AARCH64_OPND_SVE_ADDR_R:
6604 /* [<Xn|SP>{, <R><m>}]
6605 but recognizing SVE registers. */
6606 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6607 &offset_qualifier));
6608 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6609 {
6610 offset_qualifier = AARCH64_OPND_QLF_X;
6611 info->addr.offset.is_reg = 1;
6612 info->addr.offset.regno = 31;
6613 }
6614 else if (base_qualifier != AARCH64_OPND_QLF_X
6615 || offset_qualifier != AARCH64_OPND_QLF_X)
6616 {
6617 set_syntax_error (_("invalid addressing mode"));
6618 goto failure;
6619 }
6620 goto regoff_addr;
6621
6622 case AARCH64_OPND_SVE_ADDR_RR:
6623 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6624 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6625 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6626 case AARCH64_OPND_SVE_ADDR_RX:
6627 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6628 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6629 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6630 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6631 but recognizing SVE registers. */
6632 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6633 &offset_qualifier));
6634 if (base_qualifier != AARCH64_OPND_QLF_X
6635 || offset_qualifier != AARCH64_OPND_QLF_X)
6636 {
6637 set_syntax_error (_("invalid addressing mode"));
6638 goto failure;
6639 }
6640 goto regoff_addr;
6641
6642 case AARCH64_OPND_SVE_ADDR_RZ:
6643 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6644 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6645 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6646 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6647 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6648 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6649 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6650 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6651 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6652 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6653 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6654 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6655 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6656 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6657 &offset_qualifier));
6658 if (base_qualifier != AARCH64_OPND_QLF_X
6659 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6660 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6661 {
6662 set_syntax_error (_("invalid addressing mode"));
6663 goto failure;
6664 }
6665 info->qualifier = offset_qualifier;
6666 goto regoff_addr;
6667
6668 case AARCH64_OPND_SVE_ADDR_ZX:
6669 /* [Zn.<T>{, <Xm>}]. */
6670 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6671 &offset_qualifier));
6672 /* Things to check:
6673 base_qualifier either S_S or S_D
6674 offset_qualifier must be X
6675 */
6676 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6677 && base_qualifier != AARCH64_OPND_QLF_S_D)
6678 || offset_qualifier != AARCH64_OPND_QLF_X)
6679 {
6680 set_syntax_error (_("invalid addressing mode"));
6681 goto failure;
6682 }
6683 info->qualifier = base_qualifier;
6684 if (!info->addr.offset.is_reg || info->addr.pcrel
6685 || !info->addr.preind || info->addr.writeback
6686 || info->shifter.operator_present != 0)
6687 {
6688 set_syntax_error (_("invalid addressing mode"));
6689 goto failure;
6690 }
6691 info->shifter.kind = AARCH64_MOD_LSL;
6692 break;
6693
6694
6695 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6696 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6697 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6698 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6699 /* [Z<n>.<T>{, #imm}] */
6700 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6701 &offset_qualifier));
6702 if (base_qualifier != AARCH64_OPND_QLF_S_S
6703 && base_qualifier != AARCH64_OPND_QLF_S_D)
6704 {
6705 set_syntax_error (_("invalid addressing mode"));
6706 goto failure;
6707 }
6708 info->qualifier = base_qualifier;
6709 goto sve_regimm;
6710
6711 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6712 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6713 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6714 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6715 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6716
6717 We don't reject:
6718
6719 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6720
6721 here since we get better error messages by leaving it to
6722 the qualifier checking routines. */
6723 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6724 &offset_qualifier));
6725 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6726 && base_qualifier != AARCH64_OPND_QLF_S_D)
6727 || offset_qualifier != base_qualifier)
6728 {
6729 set_syntax_error (_("invalid addressing mode"));
6730 goto failure;
6731 }
6732 info->qualifier = base_qualifier;
6733 goto regoff_addr;
6734
6735 case AARCH64_OPND_SYSREG:
6736 {
6737 uint32_t sysreg_flags;
6738 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6739 &sysreg_flags)) == PARSE_FAIL)
6740 {
6741 set_syntax_error (_("unknown or missing system register name"));
6742 goto failure;
6743 }
6744 inst.base.operands[i].sysreg.value = val;
6745 inst.base.operands[i].sysreg.flags = sysreg_flags;
6746 break;
6747 }
6748
6749 case AARCH64_OPND_PSTATEFIELD:
6750 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6751 == PARSE_FAIL)
6752 {
6753 set_syntax_error (_("unknown or missing PSTATE field name"));
6754 goto failure;
6755 }
6756 inst.base.operands[i].pstatefield = val;
6757 break;
6758
6759 case AARCH64_OPND_SYSREG_IC:
6760 inst.base.operands[i].sysins_op =
6761 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6762 goto sys_reg_ins;
6763
6764 case AARCH64_OPND_SYSREG_DC:
6765 inst.base.operands[i].sysins_op =
6766 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6767 goto sys_reg_ins;
6768
6769 case AARCH64_OPND_SYSREG_AT:
6770 inst.base.operands[i].sysins_op =
6771 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6772 goto sys_reg_ins;
6773
6774 case AARCH64_OPND_SYSREG_SR:
6775 inst.base.operands[i].sysins_op =
6776 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6777 goto sys_reg_ins;
6778
6779 case AARCH64_OPND_SYSREG_TLBI:
6780 inst.base.operands[i].sysins_op =
6781 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6782 sys_reg_ins:
6783 if (inst.base.operands[i].sysins_op == NULL)
6784 {
6785 set_fatal_syntax_error ( _("unknown or missing operation name"));
6786 goto failure;
6787 }
6788 break;
6789
6790 case AARCH64_OPND_BARRIER:
6791 case AARCH64_OPND_BARRIER_ISB:
6792 val = parse_barrier (&str);
6793 if (val != PARSE_FAIL
6794 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6795 {
6796 /* ISB only accepts options name 'sy'. */
6797 set_syntax_error
6798 (_("the specified option is not accepted in ISB"));
6799 /* Turn off backtrack as this optional operand is present. */
6800 backtrack_pos = 0;
6801 goto failure;
6802 }
6803 if (val != PARSE_FAIL
6804 && operands[i] == AARCH64_OPND_BARRIER)
6805 {
6806 /* Regular barriers accept options CRm (C0-C15).
6807 DSB nXS barrier variant accepts values > 15. */
6808 if (val < 0 || val > 15)
6809 {
6810 set_syntax_error (_("the specified option is not accepted in DSB"));
6811 goto failure;
6812 }
6813 }
6814 /* This is an extension to accept a 0..15 immediate. */
6815 if (val == PARSE_FAIL)
6816 po_imm_or_fail (0, 15);
6817 info->barrier = aarch64_barrier_options + val;
6818 break;
6819
6820 case AARCH64_OPND_BARRIER_DSB_NXS:
6821 val = parse_barrier (&str);
6822 if (val != PARSE_FAIL)
6823 {
6824 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6825 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6826 {
6827 set_syntax_error (_("the specified option is not accepted in DSB"));
6828 /* Turn off backtrack as this optional operand is present. */
6829 backtrack_pos = 0;
6830 goto failure;
6831 }
6832 }
6833 else
6834 {
6835 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6836 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6837 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6838 goto failure;
6839 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6840 {
6841 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6842 goto failure;
6843 }
6844 }
6845 /* Option index is encoded as 2-bit value in val<3:2>. */
6846 val = (val >> 2) - 4;
6847 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6848 break;
6849
6850 case AARCH64_OPND_PRFOP:
6851 val = parse_pldop (&str);
6852 /* This is an extension to accept a 0..31 immediate. */
6853 if (val == PARSE_FAIL)
6854 po_imm_or_fail (0, 31);
6855 inst.base.operands[i].prfop = aarch64_prfops + val;
6856 break;
6857
6858 case AARCH64_OPND_BARRIER_PSB:
6859 val = parse_barrier_psb (&str, &(info->hint_option));
6860 if (val == PARSE_FAIL)
6861 goto failure;
6862 break;
6863
6864 case AARCH64_OPND_BTI_TARGET:
6865 val = parse_bti_operand (&str, &(info->hint_option));
6866 if (val == PARSE_FAIL)
6867 goto failure;
6868 break;
6869
6870 default:
6871 as_fatal (_("unhandled operand code %d"), operands[i]);
6872 }
6873
6874 /* If we get here, this operand was successfully parsed. */
6875 inst.base.operands[i].present = 1;
6876 continue;
6877
6878 failure:
6879 /* The parse routine should already have set the error, but in case
6880 not, set a default one here. */
6881 if (! error_p ())
6882 set_default_error ();
6883
6884 if (! backtrack_pos)
6885 goto parse_operands_return;
6886
6887 {
6888 /* We reach here because this operand is marked as optional, and
6889 either no operand was supplied or the operand was supplied but it
6890 was syntactically incorrect. In the latter case we report an
6891 error. In the former case we perform a few more checks before
6892 dropping through to the code to insert the default operand. */
6893
6894 char *tmp = backtrack_pos;
6895 char endchar = END_OF_INSN;
6896
6897 if (i != (aarch64_num_of_operands (opcode) - 1))
6898 endchar = ',';
6899 skip_past_char (&tmp, ',');
6900
6901 if (*tmp != endchar)
6902 /* The user has supplied an operand in the wrong format. */
6903 goto parse_operands_return;
6904
6905 /* Make sure there is not a comma before the optional operand.
6906 For example the fifth operand of 'sys' is optional:
6907
6908 sys #0,c0,c0,#0, <--- wrong
6909 sys #0,c0,c0,#0 <--- correct. */
6910 if (comma_skipped_p && i && endchar == END_OF_INSN)
6911 {
6912 set_fatal_syntax_error
6913 (_("unexpected comma before the omitted optional operand"));
6914 goto parse_operands_return;
6915 }
6916 }
6917
6918 /* Reaching here means we are dealing with an optional operand that is
6919 omitted from the assembly line. */
6920 gas_assert (optional_operand_p (opcode, i));
6921 info->present = 0;
6922 process_omitted_operand (operands[i], opcode, i, info);
6923
6924 /* Try again, skipping the optional operand at backtrack_pos. */
6925 str = backtrack_pos;
6926 backtrack_pos = 0;
6927
6928 /* Clear any error record after the omitted optional operand has been
6929 successfully handled. */
6930 clear_error ();
6931 }
6932
6933 /* Check if we have parsed all the operands. */
6934 if (*str != '\0' && ! error_p ())
6935 {
6936 /* Set I to the index of the last present operand; this is
6937 for the purpose of diagnostics. */
6938 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6939 ;
6940 set_fatal_syntax_error
6941 (_("unexpected characters following instruction"));
6942 }
6943
6944 parse_operands_return:
6945
6946 if (error_p ())
6947 {
6948 DEBUG_TRACE ("parsing FAIL: %s - %s",
6949 operand_mismatch_kind_names[get_error_kind ()],
6950 get_error_message ());
6951 /* Record the operand error properly; this is useful when there
6952 are multiple instruction templates for a mnemonic name, so that
6953 later on, we can select the error that most closely describes
6954 the problem. */
6955 record_operand_error (opcode, i, get_error_kind (),
6956 get_error_message ());
6957 return false;
6958 }
6959 else
6960 {
6961 DEBUG_TRACE ("parsing SUCCESS");
6962 return true;
6963 }
6964 }
6965
6966 /* It does some fix-up to provide some programmer friendly feature while
6967 keeping the libopcodes happy, i.e. libopcodes only accepts
6968 the preferred architectural syntax.
6969 Return FALSE if there is any failure; otherwise return TRUE. */
6970
6971 static bool
6972 programmer_friendly_fixup (aarch64_instruction *instr)
6973 {
6974 aarch64_inst *base = &instr->base;
6975 const aarch64_opcode *opcode = base->opcode;
6976 enum aarch64_op op = opcode->op;
6977 aarch64_opnd_info *operands = base->operands;
6978
6979 DEBUG_TRACE ("enter");
6980
6981 switch (opcode->iclass)
6982 {
6983 case testbranch:
6984 /* TBNZ Xn|Wn, #uimm6, label
6985 Test and Branch Not Zero: conditionally jumps to label if bit number
6986 uimm6 in register Xn is not zero. The bit number implies the width of
6987 the register, which may be written and should be disassembled as Wn if
6988 uimm is less than 32. */
6989 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6990 {
6991 if (operands[1].imm.value >= 32)
6992 {
6993 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6994 0, 31);
6995 return false;
6996 }
6997 operands[0].qualifier = AARCH64_OPND_QLF_X;
6998 }
6999 break;
7000 case loadlit:
7001 /* LDR Wt, label | =value
7002 As a convenience assemblers will typically permit the notation
7003 "=value" in conjunction with the pc-relative literal load instructions
7004 to automatically place an immediate value or symbolic address in a
7005 nearby literal pool and generate a hidden label which references it.
7006 ISREG has been set to 0 in the case of =value. */
7007 if (instr->gen_lit_pool
7008 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7009 {
7010 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7011 if (op == OP_LDRSW_LIT)
7012 size = 4;
7013 if (instr->reloc.exp.X_op != O_constant
7014 && instr->reloc.exp.X_op != O_big
7015 && instr->reloc.exp.X_op != O_symbol)
7016 {
7017 record_operand_error (opcode, 1,
7018 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7019 _("constant expression expected"));
7020 return false;
7021 }
7022 if (! add_to_lit_pool (&instr->reloc.exp, size))
7023 {
7024 record_operand_error (opcode, 1,
7025 AARCH64_OPDE_OTHER_ERROR,
7026 _("literal pool insertion failed"));
7027 return false;
7028 }
7029 }
7030 break;
7031 case log_shift:
7032 case bitfield:
7033 /* UXT[BHW] Wd, Wn
7034 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7035 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7036 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7037 A programmer-friendly assembler should accept a destination Xd in
7038 place of Wd, however that is not the preferred form for disassembly.
7039 */
7040 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7041 && operands[1].qualifier == AARCH64_OPND_QLF_W
7042 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7043 operands[0].qualifier = AARCH64_OPND_QLF_W;
7044 break;
7045
7046 case addsub_ext:
7047 {
7048 /* In the 64-bit form, the final register operand is written as Wm
7049 for all but the (possibly omitted) UXTX/LSL and SXTX
7050 operators.
7051 As a programmer-friendly assembler, we accept e.g.
7052 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7053 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7054 int idx = aarch64_operand_index (opcode->operands,
7055 AARCH64_OPND_Rm_EXT);
7056 gas_assert (idx == 1 || idx == 2);
7057 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7058 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7059 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7060 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7061 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7062 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7063 }
7064 break;
7065
7066 default:
7067 break;
7068 }
7069
7070 DEBUG_TRACE ("exit with SUCCESS");
7071 return true;
7072 }
7073
7074 /* Check for loads and stores that will cause unpredictable behavior. */
7075
7076 static void
7077 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7078 {
7079 aarch64_inst *base = &instr->base;
7080 const aarch64_opcode *opcode = base->opcode;
7081 const aarch64_opnd_info *opnds = base->operands;
7082 switch (opcode->iclass)
7083 {
7084 case ldst_pos:
7085 case ldst_imm9:
7086 case ldst_imm10:
7087 case ldst_unscaled:
7088 case ldst_unpriv:
7089 /* Loading/storing the base register is unpredictable if writeback. */
7090 if ((aarch64_get_operand_class (opnds[0].type)
7091 == AARCH64_OPND_CLASS_INT_REG)
7092 && opnds[0].reg.regno == opnds[1].addr.base_regno
7093 && opnds[1].addr.base_regno != REG_SP
7094 /* Exempt STG/STZG/ST2G/STZ2G. */
7095 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7096 && opnds[1].addr.writeback)
7097 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7098 break;
7099
7100 case ldstpair_off:
7101 case ldstnapair_offs:
7102 case ldstpair_indexed:
7103 /* Loading/storing the base register is unpredictable if writeback. */
7104 if ((aarch64_get_operand_class (opnds[0].type)
7105 == AARCH64_OPND_CLASS_INT_REG)
7106 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7107 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7108 && opnds[2].addr.base_regno != REG_SP
7109 /* Exempt STGP. */
7110 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7111 && opnds[2].addr.writeback)
7112 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7113 /* Load operations must load different registers. */
7114 if ((opcode->opcode & (1 << 22))
7115 && opnds[0].reg.regno == opnds[1].reg.regno)
7116 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7117 break;
7118
7119 case ldstexcl:
7120 if ((aarch64_get_operand_class (opnds[0].type)
7121 == AARCH64_OPND_CLASS_INT_REG)
7122 && (aarch64_get_operand_class (opnds[1].type)
7123 == AARCH64_OPND_CLASS_INT_REG))
7124 {
7125 if ((opcode->opcode & (1 << 22)))
7126 {
7127 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7128 if ((opcode->opcode & (1 << 21))
7129 && opnds[0].reg.regno == opnds[1].reg.regno)
7130 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7131 }
7132 else
7133 {
7134 /* Store-Exclusive is unpredictable if Rt == Rs. */
7135 if (opnds[0].reg.regno == opnds[1].reg.regno)
7136 as_warn
7137 (_("unpredictable: identical transfer and status registers"
7138 " --`%s'"),str);
7139
7140 if (opnds[0].reg.regno == opnds[2].reg.regno)
7141 {
7142 if (!(opcode->opcode & (1 << 21)))
7143 /* Store-Exclusive is unpredictable if Rn == Rs. */
7144 as_warn
7145 (_("unpredictable: identical base and status registers"
7146 " --`%s'"),str);
7147 else
7148 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7149 as_warn
7150 (_("unpredictable: "
7151 "identical transfer and status registers"
7152 " --`%s'"),str);
7153 }
7154
7155 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7156 if ((opcode->opcode & (1 << 21))
7157 && opnds[0].reg.regno == opnds[3].reg.regno
7158 && opnds[3].reg.regno != REG_SP)
7159 as_warn (_("unpredictable: identical base and status registers"
7160 " --`%s'"),str);
7161 }
7162 }
7163 break;
7164
7165 default:
7166 break;
7167 }
7168 }
7169
7170 static void
7171 force_automatic_sequence_close (void)
7172 {
7173 if (now_instr_sequence.instr)
7174 {
7175 as_warn (_("previous `%s' sequence has not been closed"),
7176 now_instr_sequence.instr->opcode->name);
7177 init_insn_sequence (NULL, &now_instr_sequence);
7178 }
7179 }
7180
7181 /* A wrapper function to interface with libopcodes on encoding and
7182 record the error message if there is any.
7183
7184 Return TRUE on success; otherwise return FALSE. */
7185
7186 static bool
7187 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7188 aarch64_insn *code)
7189 {
7190 aarch64_operand_error error_info;
7191 memset (&error_info, '\0', sizeof (error_info));
7192 error_info.kind = AARCH64_OPDE_NIL;
7193 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7194 && !error_info.non_fatal)
7195 return true;
7196
7197 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7198 record_operand_error_info (opcode, &error_info);
7199 return error_info.non_fatal;
7200 }
7201
7202 #ifdef DEBUG_AARCH64
7203 static inline void
7204 dump_opcode_operands (const aarch64_opcode *opcode)
7205 {
7206 int i = 0;
7207 while (opcode->operands[i] != AARCH64_OPND_NIL)
7208 {
7209 aarch64_verbose ("\t\t opnd%d: %s", i,
7210 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7211 ? aarch64_get_operand_name (opcode->operands[i])
7212 : aarch64_get_operand_desc (opcode->operands[i]));
7213 ++i;
7214 }
7215 }
7216 #endif /* DEBUG_AARCH64 */
7217
7218 /* This is the guts of the machine-dependent assembler. STR points to a
7219 machine dependent instruction. This function is supposed to emit
7220 the frags/bytes it assembles to. */
7221
7222 void
7223 md_assemble (char *str)
7224 {
7225 char *p = str;
7226 templates *template;
7227 const aarch64_opcode *opcode;
7228 aarch64_inst *inst_base;
7229 unsigned saved_cond;
7230
7231 /* Align the previous label if needed. */
7232 if (last_label_seen != NULL)
7233 {
7234 symbol_set_frag (last_label_seen, frag_now);
7235 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7236 S_SET_SEGMENT (last_label_seen, now_seg);
7237 }
7238
7239 /* Update the current insn_sequence from the segment. */
7240 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7241
7242 inst.reloc.type = BFD_RELOC_UNUSED;
7243
7244 DEBUG_TRACE ("\n\n");
7245 DEBUG_TRACE ("==============================");
7246 DEBUG_TRACE ("Enter md_assemble with %s", str);
7247
7248 template = opcode_lookup (&p);
7249 if (!template)
7250 {
7251 /* It wasn't an instruction, but it might be a register alias of
7252 the form alias .req reg directive. */
7253 if (!create_register_alias (str, p))
7254 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7255 str);
7256 return;
7257 }
7258
7259 skip_whitespace (p);
7260 if (*p == ',')
7261 {
7262 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7263 get_mnemonic_name (str), str);
7264 return;
7265 }
7266
7267 init_operand_error_report ();
7268
7269 /* Sections are assumed to start aligned. In executable section, there is no
7270 MAP_DATA symbol pending. So we only align the address during
7271 MAP_DATA --> MAP_INSN transition.
7272 For other sections, this is not guaranteed. */
7273 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7274 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7275 frag_align_code (2, 0);
7276
7277 saved_cond = inst.cond;
7278 reset_aarch64_instruction (&inst);
7279 inst.cond = saved_cond;
7280
7281 /* Iterate through all opcode entries with the same mnemonic name. */
7282 do
7283 {
7284 opcode = template->opcode;
7285
7286 DEBUG_TRACE ("opcode %s found", opcode->name);
7287 #ifdef DEBUG_AARCH64
7288 if (debug_dump)
7289 dump_opcode_operands (opcode);
7290 #endif /* DEBUG_AARCH64 */
7291
7292 mapping_state (MAP_INSN);
7293
7294 inst_base = &inst.base;
7295 inst_base->opcode = opcode;
7296
7297 /* Truly conditionally executed instructions, e.g. b.cond. */
7298 if (opcode->flags & F_COND)
7299 {
7300 gas_assert (inst.cond != COND_ALWAYS);
7301 inst_base->cond = get_cond_from_value (inst.cond);
7302 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7303 }
7304 else if (inst.cond != COND_ALWAYS)
7305 {
7306 /* It shouldn't arrive here, where the assembly looks like a
7307 conditional instruction but the found opcode is unconditional. */
7308 gas_assert (0);
7309 continue;
7310 }
7311
7312 if (parse_operands (p, opcode)
7313 && programmer_friendly_fixup (&inst)
7314 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7315 {
7316 /* Check that this instruction is supported for this CPU. */
7317 if (!opcode->avariant
7318 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7319 {
7320 as_bad (_("selected processor does not support `%s'"), str);
7321 return;
7322 }
7323
7324 warn_unpredictable_ldst (&inst, str);
7325
7326 if (inst.reloc.type == BFD_RELOC_UNUSED
7327 || !inst.reloc.need_libopcodes_p)
7328 output_inst (NULL);
7329 else
7330 {
7331 /* If there is relocation generated for the instruction,
7332 store the instruction information for the future fix-up. */
7333 struct aarch64_inst *copy;
7334 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7335 copy = XNEW (struct aarch64_inst);
7336 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7337 output_inst (copy);
7338 }
7339
7340 /* Issue non-fatal messages if any. */
7341 output_operand_error_report (str, true);
7342 return;
7343 }
7344
7345 template = template->next;
7346 if (template != NULL)
7347 {
7348 reset_aarch64_instruction (&inst);
7349 inst.cond = saved_cond;
7350 }
7351 }
7352 while (template != NULL);
7353
7354 /* Issue the error messages if any. */
7355 output_operand_error_report (str, false);
7356 }
7357
7358 /* Various frobbings of labels and their addresses. */
7359
7360 void
7361 aarch64_start_line_hook (void)
7362 {
7363 last_label_seen = NULL;
7364 }
7365
7366 void
7367 aarch64_frob_label (symbolS * sym)
7368 {
7369 last_label_seen = sym;
7370
7371 dwarf2_emit_label (sym);
7372 }
7373
7374 void
7375 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7376 {
7377 /* Check to see if we have a block to close. */
7378 force_automatic_sequence_close ();
7379 }
7380
7381 int
7382 aarch64_data_in_code (void)
7383 {
7384 if (startswith (input_line_pointer + 1, "data:"))
7385 {
7386 *input_line_pointer = '/';
7387 input_line_pointer += 5;
7388 *input_line_pointer = 0;
7389 return 1;
7390 }
7391
7392 return 0;
7393 }
7394
7395 char *
7396 aarch64_canonicalize_symbol_name (char *name)
7397 {
7398 int len;
7399
7400 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7401 *(name + len - 5) = 0;
7402
7403 return name;
7404 }
7405 \f
7406 /* Table of all register names defined by default. The user can
7407 define additional names with .req. Note that all register names
7408 should appear in both upper and lowercase variants. Some registers
7409 also have mixed-case names. */
7410
7411 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7412 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7413 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7414 #define REGSET16(p,t) \
7415 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7416 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7417 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7418 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7419 #define REGSET31(p,t) \
7420 REGSET16(p, t), \
7421 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7422 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7423 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7424 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7425 #define REGSET(p,t) \
7426 REGSET31(p,t), REGNUM(p,31,t)
7427
7428 /* These go into aarch64_reg_hsh hash-table. */
7429 static const reg_entry reg_names[] = {
7430 /* Integer registers. */
7431 REGSET31 (x, R_64), REGSET31 (X, R_64),
7432 REGSET31 (w, R_32), REGSET31 (W, R_32),
7433
7434 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7435 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7436 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7437 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7438 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7439 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7440
7441 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7442 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7443
7444 /* Floating-point single precision registers. */
7445 REGSET (s, FP_S), REGSET (S, FP_S),
7446
7447 /* Floating-point double precision registers. */
7448 REGSET (d, FP_D), REGSET (D, FP_D),
7449
7450 /* Floating-point half precision registers. */
7451 REGSET (h, FP_H), REGSET (H, FP_H),
7452
7453 /* Floating-point byte precision registers. */
7454 REGSET (b, FP_B), REGSET (B, FP_B),
7455
7456 /* Floating-point quad precision registers. */
7457 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7458
7459 /* FP/SIMD registers. */
7460 REGSET (v, VN), REGSET (V, VN),
7461
7462 /* SVE vector registers. */
7463 REGSET (z, ZN), REGSET (Z, ZN),
7464
7465 /* SVE predicate registers. */
7466 REGSET16 (p, PN), REGSET16 (P, PN)
7467 };
7468
7469 #undef REGDEF
7470 #undef REGDEF_ALIAS
7471 #undef REGNUM
7472 #undef REGSET16
7473 #undef REGSET31
7474 #undef REGSET
7475
7476 #define N 1
7477 #define n 0
7478 #define Z 1
7479 #define z 0
7480 #define C 1
7481 #define c 0
7482 #define V 1
7483 #define v 0
7484 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7485 static const asm_nzcv nzcv_names[] = {
7486 {"nzcv", B (n, z, c, v)},
7487 {"nzcV", B (n, z, c, V)},
7488 {"nzCv", B (n, z, C, v)},
7489 {"nzCV", B (n, z, C, V)},
7490 {"nZcv", B (n, Z, c, v)},
7491 {"nZcV", B (n, Z, c, V)},
7492 {"nZCv", B (n, Z, C, v)},
7493 {"nZCV", B (n, Z, C, V)},
7494 {"Nzcv", B (N, z, c, v)},
7495 {"NzcV", B (N, z, c, V)},
7496 {"NzCv", B (N, z, C, v)},
7497 {"NzCV", B (N, z, C, V)},
7498 {"NZcv", B (N, Z, c, v)},
7499 {"NZcV", B (N, Z, c, V)},
7500 {"NZCv", B (N, Z, C, v)},
7501 {"NZCV", B (N, Z, C, V)}
7502 };
7503
7504 #undef N
7505 #undef n
7506 #undef Z
7507 #undef z
7508 #undef C
7509 #undef c
7510 #undef V
7511 #undef v
7512 #undef B
7513 \f
7514 /* MD interface: bits in the object file. */
7515
7516 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7517 for use in the a.out file, and stores them in the array pointed to by buf.
7518 This knows about the endian-ness of the target machine and does
7519 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7520 2 (short) and 4 (long) Floating numbers are put out as a series of
7521 LITTLENUMS (shorts, here at least). */
7522
7523 void
7524 md_number_to_chars (char *buf, valueT val, int n)
7525 {
7526 if (target_big_endian)
7527 number_to_chars_bigendian (buf, val, n);
7528 else
7529 number_to_chars_littleendian (buf, val, n);
7530 }
7531
7532 /* MD interface: Sections. */
7533
7534 /* Estimate the size of a frag before relaxing. Assume everything fits in
7535 4 bytes. */
7536
7537 int
7538 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7539 {
7540 fragp->fr_var = 4;
7541 return 4;
7542 }
7543
7544 /* Round up a section size to the appropriate boundary. */
7545
7546 valueT
7547 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7548 {
7549 return size;
7550 }
7551
7552 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7553 of an rs_align_code fragment.
7554
7555 Here we fill the frag with the appropriate info for padding the
7556 output stream. The resulting frag will consist of a fixed (fr_fix)
7557 and of a repeating (fr_var) part.
7558
7559 The fixed content is always emitted before the repeating content and
7560 these two parts are used as follows in constructing the output:
7561 - the fixed part will be used to align to a valid instruction word
7562 boundary, in case that we start at a misaligned address; as no
7563 executable instruction can live at the misaligned location, we
7564 simply fill with zeros;
7565 - the variable part will be used to cover the remaining padding and
7566 we fill using the AArch64 NOP instruction.
7567
7568 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7569 enough storage space for up to 3 bytes for padding the back to a valid
7570 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7571
7572 void
7573 aarch64_handle_align (fragS * fragP)
7574 {
7575 /* NOP = d503201f */
7576 /* AArch64 instructions are always little-endian. */
7577 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7578
7579 int bytes, fix, noop_size;
7580 char *p;
7581
7582 if (fragP->fr_type != rs_align_code)
7583 return;
7584
7585 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7586 p = fragP->fr_literal + fragP->fr_fix;
7587
7588 #ifdef OBJ_ELF
7589 gas_assert (fragP->tc_frag_data.recorded);
7590 #endif
7591
7592 noop_size = sizeof (aarch64_noop);
7593
7594 fix = bytes & (noop_size - 1);
7595 if (fix)
7596 {
7597 #ifdef OBJ_ELF
7598 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7599 #endif
7600 memset (p, 0, fix);
7601 p += fix;
7602 fragP->fr_fix += fix;
7603 }
7604
7605 if (noop_size)
7606 memcpy (p, aarch64_noop, noop_size);
7607 fragP->fr_var = noop_size;
7608 }
7609
7610 /* Perform target specific initialisation of a frag.
7611 Note - despite the name this initialisation is not done when the frag
7612 is created, but only when its type is assigned. A frag can be created
7613 and used a long time before its type is set, so beware of assuming that
7614 this initialisation is performed first. */
7615
7616 #ifndef OBJ_ELF
7617 void
7618 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7619 int max_chars ATTRIBUTE_UNUSED)
7620 {
7621 }
7622
7623 #else /* OBJ_ELF is defined. */
7624 void
7625 aarch64_init_frag (fragS * fragP, int max_chars)
7626 {
7627 /* Record a mapping symbol for alignment frags. We will delete this
7628 later if the alignment ends up empty. */
7629 if (!fragP->tc_frag_data.recorded)
7630 fragP->tc_frag_data.recorded = 1;
7631
7632 /* PR 21809: Do not set a mapping state for debug sections
7633 - it just confuses other tools. */
7634 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7635 return;
7636
7637 switch (fragP->fr_type)
7638 {
7639 case rs_align_test:
7640 case rs_fill:
7641 mapping_state_2 (MAP_DATA, max_chars);
7642 break;
7643 case rs_align:
7644 /* PR 20364: We can get alignment frags in code sections,
7645 so do not just assume that we should use the MAP_DATA state. */
7646 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7647 break;
7648 case rs_align_code:
7649 mapping_state_2 (MAP_INSN, max_chars);
7650 break;
7651 default:
7652 break;
7653 }
7654 }
7655 \f
7656 /* Initialize the DWARF-2 unwind information for this procedure. */
7657
7658 void
7659 tc_aarch64_frame_initial_instructions (void)
7660 {
7661 cfi_add_CFA_def_cfa (REG_SP, 0);
7662 }
7663 #endif /* OBJ_ELF */
7664
7665 /* Convert REGNAME to a DWARF-2 register number. */
7666
7667 int
7668 tc_aarch64_regname_to_dw2regnum (char *regname)
7669 {
7670 const reg_entry *reg = parse_reg (&regname);
7671 if (reg == NULL)
7672 return -1;
7673
7674 switch (reg->type)
7675 {
7676 case REG_TYPE_SP_32:
7677 case REG_TYPE_SP_64:
7678 case REG_TYPE_R_32:
7679 case REG_TYPE_R_64:
7680 return reg->number;
7681
7682 case REG_TYPE_FP_B:
7683 case REG_TYPE_FP_H:
7684 case REG_TYPE_FP_S:
7685 case REG_TYPE_FP_D:
7686 case REG_TYPE_FP_Q:
7687 return reg->number + 64;
7688
7689 default:
7690 break;
7691 }
7692 return -1;
7693 }
7694
7695 /* Implement DWARF2_ADDR_SIZE. */
7696
7697 int
7698 aarch64_dwarf2_addr_size (void)
7699 {
7700 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7701 if (ilp32_p)
7702 return 4;
7703 #endif
7704 return bfd_arch_bits_per_address (stdoutput) / 8;
7705 }
7706
7707 /* MD interface: Symbol and relocation handling. */
7708
7709 /* Return the address within the segment that a PC-relative fixup is
7710 relative to. For AArch64 PC-relative fixups applied to instructions
7711 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7712
7713 long
7714 md_pcrel_from_section (fixS * fixP, segT seg)
7715 {
7716 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7717
7718 /* If this is pc-relative and we are going to emit a relocation
7719 then we just want to put out any pipeline compensation that the linker
7720 will need. Otherwise we want to use the calculated base. */
7721 if (fixP->fx_pcrel
7722 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7723 || aarch64_force_relocation (fixP)))
7724 base = 0;
7725
7726 /* AArch64 should be consistent for all pc-relative relocations. */
7727 return base + AARCH64_PCREL_OFFSET;
7728 }
7729
7730 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7731 Otherwise we have no need to default values of symbols. */
7732
7733 symbolS *
7734 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7735 {
7736 #ifdef OBJ_ELF
7737 if (name[0] == '_' && name[1] == 'G'
7738 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7739 {
7740 if (!GOT_symbol)
7741 {
7742 if (symbol_find (name))
7743 as_bad (_("GOT already in the symbol table"));
7744
7745 GOT_symbol = symbol_new (name, undefined_section,
7746 &zero_address_frag, 0);
7747 }
7748
7749 return GOT_symbol;
7750 }
7751 #endif
7752
7753 return 0;
7754 }
7755
7756 /* Return non-zero if the indicated VALUE has overflowed the maximum
7757 range expressible by a unsigned number with the indicated number of
7758 BITS. */
7759
7760 static bool
7761 unsigned_overflow (valueT value, unsigned bits)
7762 {
7763 valueT lim;
7764 if (bits >= sizeof (valueT) * 8)
7765 return false;
7766 lim = (valueT) 1 << bits;
7767 return (value >= lim);
7768 }
7769
7770
7771 /* Return non-zero if the indicated VALUE has overflowed the maximum
7772 range expressible by an signed number with the indicated number of
7773 BITS. */
7774
7775 static bool
7776 signed_overflow (offsetT value, unsigned bits)
7777 {
7778 offsetT lim;
7779 if (bits >= sizeof (offsetT) * 8)
7780 return false;
7781 lim = (offsetT) 1 << (bits - 1);
7782 return (value < -lim || value >= lim);
7783 }
7784
7785 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7786 unsigned immediate offset load/store instruction, try to encode it as
7787 an unscaled, 9-bit, signed immediate offset load/store instruction.
7788 Return TRUE if it is successful; otherwise return FALSE.
7789
7790 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7791 in response to the standard LDR/STR mnemonics when the immediate offset is
7792 unambiguous, i.e. when it is negative or unaligned. */
7793
7794 static bool
7795 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7796 {
7797 int idx;
7798 enum aarch64_op new_op;
7799 const aarch64_opcode *new_opcode;
7800
7801 gas_assert (instr->opcode->iclass == ldst_pos);
7802
7803 switch (instr->opcode->op)
7804 {
7805 case OP_LDRB_POS:new_op = OP_LDURB; break;
7806 case OP_STRB_POS: new_op = OP_STURB; break;
7807 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7808 case OP_LDRH_POS: new_op = OP_LDURH; break;
7809 case OP_STRH_POS: new_op = OP_STURH; break;
7810 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7811 case OP_LDR_POS: new_op = OP_LDUR; break;
7812 case OP_STR_POS: new_op = OP_STUR; break;
7813 case OP_LDRF_POS: new_op = OP_LDURV; break;
7814 case OP_STRF_POS: new_op = OP_STURV; break;
7815 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7816 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7817 default: new_op = OP_NIL; break;
7818 }
7819
7820 if (new_op == OP_NIL)
7821 return false;
7822
7823 new_opcode = aarch64_get_opcode (new_op);
7824 gas_assert (new_opcode != NULL);
7825
7826 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7827 instr->opcode->op, new_opcode->op);
7828
7829 aarch64_replace_opcode (instr, new_opcode);
7830
7831 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7832 qualifier matching may fail because the out-of-date qualifier will
7833 prevent the operand being updated with a new and correct qualifier. */
7834 idx = aarch64_operand_index (instr->opcode->operands,
7835 AARCH64_OPND_ADDR_SIMM9);
7836 gas_assert (idx == 1);
7837 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7838
7839 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7840
7841 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7842 insn_sequence))
7843 return false;
7844
7845 return true;
7846 }
7847
7848 /* Called by fix_insn to fix a MOV immediate alias instruction.
7849
7850 Operand for a generic move immediate instruction, which is an alias
7851 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7852 a 32-bit/64-bit immediate value into general register. An assembler error
7853 shall result if the immediate cannot be created by a single one of these
7854 instructions. If there is a choice, then to ensure reversability an
7855 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7856
7857 static void
7858 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7859 {
7860 const aarch64_opcode *opcode;
7861
7862 /* Need to check if the destination is SP/ZR. The check has to be done
7863 before any aarch64_replace_opcode. */
7864 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7865 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7866
7867 instr->operands[1].imm.value = value;
7868 instr->operands[1].skip = 0;
7869
7870 if (try_mov_wide_p)
7871 {
7872 /* Try the MOVZ alias. */
7873 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7874 aarch64_replace_opcode (instr, opcode);
7875 if (aarch64_opcode_encode (instr->opcode, instr,
7876 &instr->value, NULL, NULL, insn_sequence))
7877 {
7878 put_aarch64_insn (buf, instr->value);
7879 return;
7880 }
7881 /* Try the MOVK alias. */
7882 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7883 aarch64_replace_opcode (instr, opcode);
7884 if (aarch64_opcode_encode (instr->opcode, instr,
7885 &instr->value, NULL, NULL, insn_sequence))
7886 {
7887 put_aarch64_insn (buf, instr->value);
7888 return;
7889 }
7890 }
7891
7892 if (try_mov_bitmask_p)
7893 {
7894 /* Try the ORR alias. */
7895 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7896 aarch64_replace_opcode (instr, opcode);
7897 if (aarch64_opcode_encode (instr->opcode, instr,
7898 &instr->value, NULL, NULL, insn_sequence))
7899 {
7900 put_aarch64_insn (buf, instr->value);
7901 return;
7902 }
7903 }
7904
7905 as_bad_where (fixP->fx_file, fixP->fx_line,
7906 _("immediate cannot be moved by a single instruction"));
7907 }
7908
7909 /* An instruction operand which is immediate related may have symbol used
7910 in the assembly, e.g.
7911
7912 mov w0, u32
7913 .set u32, 0x00ffff00
7914
7915 At the time when the assembly instruction is parsed, a referenced symbol,
7916 like 'u32' in the above example may not have been seen; a fixS is created
7917 in such a case and is handled here after symbols have been resolved.
7918 Instruction is fixed up with VALUE using the information in *FIXP plus
7919 extra information in FLAGS.
7920
7921 This function is called by md_apply_fix to fix up instructions that need
7922 a fix-up described above but does not involve any linker-time relocation. */
7923
7924 static void
7925 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7926 {
7927 int idx;
7928 uint32_t insn;
7929 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7930 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7931 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7932
7933 if (new_inst)
7934 {
7935 /* Now the instruction is about to be fixed-up, so the operand that
7936 was previously marked as 'ignored' needs to be unmarked in order
7937 to get the encoding done properly. */
7938 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7939 new_inst->operands[idx].skip = 0;
7940 }
7941
7942 gas_assert (opnd != AARCH64_OPND_NIL);
7943
7944 switch (opnd)
7945 {
7946 case AARCH64_OPND_EXCEPTION:
7947 case AARCH64_OPND_UNDEFINED:
7948 if (unsigned_overflow (value, 16))
7949 as_bad_where (fixP->fx_file, fixP->fx_line,
7950 _("immediate out of range"));
7951 insn = get_aarch64_insn (buf);
7952 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7953 put_aarch64_insn (buf, insn);
7954 break;
7955
7956 case AARCH64_OPND_AIMM:
7957 /* ADD or SUB with immediate.
7958 NOTE this assumes we come here with a add/sub shifted reg encoding
7959 3 322|2222|2 2 2 21111 111111
7960 1 098|7654|3 2 1 09876 543210 98765 43210
7961 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7962 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7963 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7964 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7965 ->
7966 3 322|2222|2 2 221111111111
7967 1 098|7654|3 2 109876543210 98765 43210
7968 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7969 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7970 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7971 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7972 Fields sf Rn Rd are already set. */
7973 insn = get_aarch64_insn (buf);
7974 if (value < 0)
7975 {
7976 /* Add <-> sub. */
7977 insn = reencode_addsub_switch_add_sub (insn);
7978 value = -value;
7979 }
7980
7981 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7982 && unsigned_overflow (value, 12))
7983 {
7984 /* Try to shift the value by 12 to make it fit. */
7985 if (((value >> 12) << 12) == value
7986 && ! unsigned_overflow (value, 12 + 12))
7987 {
7988 value >>= 12;
7989 insn |= encode_addsub_imm_shift_amount (1);
7990 }
7991 }
7992
7993 if (unsigned_overflow (value, 12))
7994 as_bad_where (fixP->fx_file, fixP->fx_line,
7995 _("immediate out of range"));
7996
7997 insn |= encode_addsub_imm (value);
7998
7999 put_aarch64_insn (buf, insn);
8000 break;
8001
8002 case AARCH64_OPND_SIMD_IMM:
8003 case AARCH64_OPND_SIMD_IMM_SFT:
8004 case AARCH64_OPND_LIMM:
8005 /* Bit mask immediate. */
8006 gas_assert (new_inst != NULL);
8007 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8008 new_inst->operands[idx].imm.value = value;
8009 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8010 &new_inst->value, NULL, NULL, insn_sequence))
8011 put_aarch64_insn (buf, new_inst->value);
8012 else
8013 as_bad_where (fixP->fx_file, fixP->fx_line,
8014 _("invalid immediate"));
8015 break;
8016
8017 case AARCH64_OPND_HALF:
8018 /* 16-bit unsigned immediate. */
8019 if (unsigned_overflow (value, 16))
8020 as_bad_where (fixP->fx_file, fixP->fx_line,
8021 _("immediate out of range"));
8022 insn = get_aarch64_insn (buf);
8023 insn |= encode_movw_imm (value & 0xffff);
8024 put_aarch64_insn (buf, insn);
8025 break;
8026
8027 case AARCH64_OPND_IMM_MOV:
8028 /* Operand for a generic move immediate instruction, which is
8029 an alias instruction that generates a single MOVZ, MOVN or ORR
8030 instruction to loads a 32-bit/64-bit immediate value into general
8031 register. An assembler error shall result if the immediate cannot be
8032 created by a single one of these instructions. If there is a choice,
8033 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8034 and MOVZ or MOVN to ORR. */
8035 gas_assert (new_inst != NULL);
8036 fix_mov_imm_insn (fixP, buf, new_inst, value);
8037 break;
8038
8039 case AARCH64_OPND_ADDR_SIMM7:
8040 case AARCH64_OPND_ADDR_SIMM9:
8041 case AARCH64_OPND_ADDR_SIMM9_2:
8042 case AARCH64_OPND_ADDR_SIMM10:
8043 case AARCH64_OPND_ADDR_UIMM12:
8044 case AARCH64_OPND_ADDR_SIMM11:
8045 case AARCH64_OPND_ADDR_SIMM13:
8046 /* Immediate offset in an address. */
8047 insn = get_aarch64_insn (buf);
8048
8049 gas_assert (new_inst != NULL && new_inst->value == insn);
8050 gas_assert (new_inst->opcode->operands[1] == opnd
8051 || new_inst->opcode->operands[2] == opnd);
8052
8053 /* Get the index of the address operand. */
8054 if (new_inst->opcode->operands[1] == opnd)
8055 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8056 idx = 1;
8057 else
8058 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8059 idx = 2;
8060
8061 /* Update the resolved offset value. */
8062 new_inst->operands[idx].addr.offset.imm = value;
8063
8064 /* Encode/fix-up. */
8065 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8066 &new_inst->value, NULL, NULL, insn_sequence))
8067 {
8068 put_aarch64_insn (buf, new_inst->value);
8069 break;
8070 }
8071 else if (new_inst->opcode->iclass == ldst_pos
8072 && try_to_encode_as_unscaled_ldst (new_inst))
8073 {
8074 put_aarch64_insn (buf, new_inst->value);
8075 break;
8076 }
8077
8078 as_bad_where (fixP->fx_file, fixP->fx_line,
8079 _("immediate offset out of range"));
8080 break;
8081
8082 default:
8083 gas_assert (0);
8084 as_fatal (_("unhandled operand code %d"), opnd);
8085 }
8086 }
8087
8088 /* Apply a fixup (fixP) to segment data, once it has been determined
8089 by our caller that we have all the info we need to fix it up.
8090
8091 Parameter valP is the pointer to the value of the bits. */
8092
8093 void
8094 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8095 {
8096 offsetT value = *valP;
8097 uint32_t insn;
8098 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8099 int scale;
8100 unsigned flags = fixP->fx_addnumber;
8101
8102 DEBUG_TRACE ("\n\n");
8103 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8104 DEBUG_TRACE ("Enter md_apply_fix");
8105
8106 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8107
8108 /* Note whether this will delete the relocation. */
8109
8110 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8111 fixP->fx_done = 1;
8112
8113 /* Process the relocations. */
8114 switch (fixP->fx_r_type)
8115 {
8116 case BFD_RELOC_NONE:
8117 /* This will need to go in the object file. */
8118 fixP->fx_done = 0;
8119 break;
8120
8121 case BFD_RELOC_8:
8122 case BFD_RELOC_8_PCREL:
8123 if (fixP->fx_done || !seg->use_rela_p)
8124 md_number_to_chars (buf, value, 1);
8125 break;
8126
8127 case BFD_RELOC_16:
8128 case BFD_RELOC_16_PCREL:
8129 if (fixP->fx_done || !seg->use_rela_p)
8130 md_number_to_chars (buf, value, 2);
8131 break;
8132
8133 case BFD_RELOC_32:
8134 case BFD_RELOC_32_PCREL:
8135 if (fixP->fx_done || !seg->use_rela_p)
8136 md_number_to_chars (buf, value, 4);
8137 break;
8138
8139 case BFD_RELOC_64:
8140 case BFD_RELOC_64_PCREL:
8141 if (fixP->fx_done || !seg->use_rela_p)
8142 md_number_to_chars (buf, value, 8);
8143 break;
8144
8145 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8146 /* We claim that these fixups have been processed here, even if
8147 in fact we generate an error because we do not have a reloc
8148 for them, so tc_gen_reloc() will reject them. */
8149 fixP->fx_done = 1;
8150 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8151 {
8152 as_bad_where (fixP->fx_file, fixP->fx_line,
8153 _("undefined symbol %s used as an immediate value"),
8154 S_GET_NAME (fixP->fx_addsy));
8155 goto apply_fix_return;
8156 }
8157 fix_insn (fixP, flags, value);
8158 break;
8159
8160 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8161 if (fixP->fx_done || !seg->use_rela_p)
8162 {
8163 if (value & 3)
8164 as_bad_where (fixP->fx_file, fixP->fx_line,
8165 _("pc-relative load offset not word aligned"));
8166 if (signed_overflow (value, 21))
8167 as_bad_where (fixP->fx_file, fixP->fx_line,
8168 _("pc-relative load offset out of range"));
8169 insn = get_aarch64_insn (buf);
8170 insn |= encode_ld_lit_ofs_19 (value >> 2);
8171 put_aarch64_insn (buf, insn);
8172 }
8173 break;
8174
8175 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8176 if (fixP->fx_done || !seg->use_rela_p)
8177 {
8178 if (signed_overflow (value, 21))
8179 as_bad_where (fixP->fx_file, fixP->fx_line,
8180 _("pc-relative address offset out of range"));
8181 insn = get_aarch64_insn (buf);
8182 insn |= encode_adr_imm (value);
8183 put_aarch64_insn (buf, insn);
8184 }
8185 break;
8186
8187 case BFD_RELOC_AARCH64_BRANCH19:
8188 if (fixP->fx_done || !seg->use_rela_p)
8189 {
8190 if (value & 3)
8191 as_bad_where (fixP->fx_file, fixP->fx_line,
8192 _("conditional branch target not word aligned"));
8193 if (signed_overflow (value, 21))
8194 as_bad_where (fixP->fx_file, fixP->fx_line,
8195 _("conditional branch out of range"));
8196 insn = get_aarch64_insn (buf);
8197 insn |= encode_cond_branch_ofs_19 (value >> 2);
8198 put_aarch64_insn (buf, insn);
8199 }
8200 break;
8201
8202 case BFD_RELOC_AARCH64_TSTBR14:
8203 if (fixP->fx_done || !seg->use_rela_p)
8204 {
8205 if (value & 3)
8206 as_bad_where (fixP->fx_file, fixP->fx_line,
8207 _("conditional branch target not word aligned"));
8208 if (signed_overflow (value, 16))
8209 as_bad_where (fixP->fx_file, fixP->fx_line,
8210 _("conditional branch out of range"));
8211 insn = get_aarch64_insn (buf);
8212 insn |= encode_tst_branch_ofs_14 (value >> 2);
8213 put_aarch64_insn (buf, insn);
8214 }
8215 break;
8216
8217 case BFD_RELOC_AARCH64_CALL26:
8218 case BFD_RELOC_AARCH64_JUMP26:
8219 if (fixP->fx_done || !seg->use_rela_p)
8220 {
8221 if (value & 3)
8222 as_bad_where (fixP->fx_file, fixP->fx_line,
8223 _("branch target not word aligned"));
8224 if (signed_overflow (value, 28))
8225 as_bad_where (fixP->fx_file, fixP->fx_line,
8226 _("branch out of range"));
8227 insn = get_aarch64_insn (buf);
8228 insn |= encode_branch_ofs_26 (value >> 2);
8229 put_aarch64_insn (buf, insn);
8230 }
8231 break;
8232
8233 case BFD_RELOC_AARCH64_MOVW_G0:
8234 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8235 case BFD_RELOC_AARCH64_MOVW_G0_S:
8236 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8237 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8238 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8239 scale = 0;
8240 goto movw_common;
8241 case BFD_RELOC_AARCH64_MOVW_G1:
8242 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8243 case BFD_RELOC_AARCH64_MOVW_G1_S:
8244 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8245 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8246 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8247 scale = 16;
8248 goto movw_common;
8249 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8250 scale = 0;
8251 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8252 /* Should always be exported to object file, see
8253 aarch64_force_relocation(). */
8254 gas_assert (!fixP->fx_done);
8255 gas_assert (seg->use_rela_p);
8256 goto movw_common;
8257 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8258 scale = 16;
8259 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8260 /* Should always be exported to object file, see
8261 aarch64_force_relocation(). */
8262 gas_assert (!fixP->fx_done);
8263 gas_assert (seg->use_rela_p);
8264 goto movw_common;
8265 case BFD_RELOC_AARCH64_MOVW_G2:
8266 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8267 case BFD_RELOC_AARCH64_MOVW_G2_S:
8268 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8269 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8270 scale = 32;
8271 goto movw_common;
8272 case BFD_RELOC_AARCH64_MOVW_G3:
8273 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8274 scale = 48;
8275 movw_common:
8276 if (fixP->fx_done || !seg->use_rela_p)
8277 {
8278 insn = get_aarch64_insn (buf);
8279
8280 if (!fixP->fx_done)
8281 {
8282 /* REL signed addend must fit in 16 bits */
8283 if (signed_overflow (value, 16))
8284 as_bad_where (fixP->fx_file, fixP->fx_line,
8285 _("offset out of range"));
8286 }
8287 else
8288 {
8289 /* Check for overflow and scale. */
8290 switch (fixP->fx_r_type)
8291 {
8292 case BFD_RELOC_AARCH64_MOVW_G0:
8293 case BFD_RELOC_AARCH64_MOVW_G1:
8294 case BFD_RELOC_AARCH64_MOVW_G2:
8295 case BFD_RELOC_AARCH64_MOVW_G3:
8296 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8297 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8298 if (unsigned_overflow (value, scale + 16))
8299 as_bad_where (fixP->fx_file, fixP->fx_line,
8300 _("unsigned value out of range"));
8301 break;
8302 case BFD_RELOC_AARCH64_MOVW_G0_S:
8303 case BFD_RELOC_AARCH64_MOVW_G1_S:
8304 case BFD_RELOC_AARCH64_MOVW_G2_S:
8305 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8306 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8307 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8308 /* NOTE: We can only come here with movz or movn. */
8309 if (signed_overflow (value, scale + 16))
8310 as_bad_where (fixP->fx_file, fixP->fx_line,
8311 _("signed value out of range"));
8312 if (value < 0)
8313 {
8314 /* Force use of MOVN. */
8315 value = ~value;
8316 insn = reencode_movzn_to_movn (insn);
8317 }
8318 else
8319 {
8320 /* Force use of MOVZ. */
8321 insn = reencode_movzn_to_movz (insn);
8322 }
8323 break;
8324 default:
8325 /* Unchecked relocations. */
8326 break;
8327 }
8328 value >>= scale;
8329 }
8330
8331 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8332 insn |= encode_movw_imm (value & 0xffff);
8333
8334 put_aarch64_insn (buf, insn);
8335 }
8336 break;
8337
8338 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8339 fixP->fx_r_type = (ilp32_p
8340 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8341 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8342 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8343 /* Should always be exported to object file, see
8344 aarch64_force_relocation(). */
8345 gas_assert (!fixP->fx_done);
8346 gas_assert (seg->use_rela_p);
8347 break;
8348
8349 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8350 fixP->fx_r_type = (ilp32_p
8351 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8352 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8353 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8354 /* Should always be exported to object file, see
8355 aarch64_force_relocation(). */
8356 gas_assert (!fixP->fx_done);
8357 gas_assert (seg->use_rela_p);
8358 break;
8359
8360 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8361 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8362 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8363 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8364 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8365 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8366 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8367 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8368 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8369 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8370 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8371 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8372 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8373 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8374 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8375 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8376 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8377 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8378 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8379 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8380 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8381 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8382 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8383 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8384 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8385 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8386 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8387 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8388 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8389 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8390 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8391 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8392 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8393 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8394 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8395 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8396 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8397 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8398 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8399 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8400 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8401 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8402 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8403 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8404 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8405 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8406 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8407 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8408 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8409 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8410 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8411 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8412 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8413 /* Should always be exported to object file, see
8414 aarch64_force_relocation(). */
8415 gas_assert (!fixP->fx_done);
8416 gas_assert (seg->use_rela_p);
8417 break;
8418
8419 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8420 /* Should always be exported to object file, see
8421 aarch64_force_relocation(). */
8422 fixP->fx_r_type = (ilp32_p
8423 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8424 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8425 gas_assert (!fixP->fx_done);
8426 gas_assert (seg->use_rela_p);
8427 break;
8428
8429 case BFD_RELOC_AARCH64_ADD_LO12:
8430 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8431 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8432 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8433 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8434 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8435 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8436 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8437 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8438 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8439 case BFD_RELOC_AARCH64_LDST128_LO12:
8440 case BFD_RELOC_AARCH64_LDST16_LO12:
8441 case BFD_RELOC_AARCH64_LDST32_LO12:
8442 case BFD_RELOC_AARCH64_LDST64_LO12:
8443 case BFD_RELOC_AARCH64_LDST8_LO12:
8444 /* Should always be exported to object file, see
8445 aarch64_force_relocation(). */
8446 gas_assert (!fixP->fx_done);
8447 gas_assert (seg->use_rela_p);
8448 break;
8449
8450 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8451 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8452 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8453 break;
8454
8455 case BFD_RELOC_UNUSED:
8456 /* An error will already have been reported. */
8457 break;
8458
8459 default:
8460 as_bad_where (fixP->fx_file, fixP->fx_line,
8461 _("unexpected %s fixup"),
8462 bfd_get_reloc_code_name (fixP->fx_r_type));
8463 break;
8464 }
8465
8466 apply_fix_return:
8467 /* Free the allocated the struct aarch64_inst.
8468 N.B. currently there are very limited number of fix-up types actually use
8469 this field, so the impact on the performance should be minimal . */
8470 free (fixP->tc_fix_data.inst);
8471
8472 return;
8473 }
8474
8475 /* Translate internal representation of relocation info to BFD target
8476 format. */
8477
8478 arelent *
8479 tc_gen_reloc (asection * section, fixS * fixp)
8480 {
8481 arelent *reloc;
8482 bfd_reloc_code_real_type code;
8483
8484 reloc = XNEW (arelent);
8485
8486 reloc->sym_ptr_ptr = XNEW (asymbol *);
8487 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8488 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8489
8490 if (fixp->fx_pcrel)
8491 {
8492 if (section->use_rela_p)
8493 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8494 else
8495 fixp->fx_offset = reloc->address;
8496 }
8497 reloc->addend = fixp->fx_offset;
8498
8499 code = fixp->fx_r_type;
8500 switch (code)
8501 {
8502 case BFD_RELOC_16:
8503 if (fixp->fx_pcrel)
8504 code = BFD_RELOC_16_PCREL;
8505 break;
8506
8507 case BFD_RELOC_32:
8508 if (fixp->fx_pcrel)
8509 code = BFD_RELOC_32_PCREL;
8510 break;
8511
8512 case BFD_RELOC_64:
8513 if (fixp->fx_pcrel)
8514 code = BFD_RELOC_64_PCREL;
8515 break;
8516
8517 default:
8518 break;
8519 }
8520
8521 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8522 if (reloc->howto == NULL)
8523 {
8524 as_bad_where (fixp->fx_file, fixp->fx_line,
8525 _
8526 ("cannot represent %s relocation in this object file format"),
8527 bfd_get_reloc_code_name (code));
8528 return NULL;
8529 }
8530
8531 return reloc;
8532 }
8533
8534 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8535
8536 void
8537 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8538 {
8539 bfd_reloc_code_real_type type;
8540 int pcrel = 0;
8541
8542 /* Pick a reloc.
8543 FIXME: @@ Should look at CPU word size. */
8544 switch (size)
8545 {
8546 case 1:
8547 type = BFD_RELOC_8;
8548 break;
8549 case 2:
8550 type = BFD_RELOC_16;
8551 break;
8552 case 4:
8553 type = BFD_RELOC_32;
8554 break;
8555 case 8:
8556 type = BFD_RELOC_64;
8557 break;
8558 default:
8559 as_bad (_("cannot do %u-byte relocation"), size);
8560 type = BFD_RELOC_UNUSED;
8561 break;
8562 }
8563
8564 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8565 }
8566
8567 #ifdef OBJ_ELF
8568
8569 /* Implement md_after_parse_args. This is the earliest time we need to decide
8570 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8571
8572 void
8573 aarch64_after_parse_args (void)
8574 {
8575 if (aarch64_abi != AARCH64_ABI_NONE)
8576 return;
8577
8578 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8579 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8580 aarch64_abi = AARCH64_ABI_ILP32;
8581 else
8582 aarch64_abi = AARCH64_ABI_LP64;
8583 }
8584
8585 const char *
8586 elf64_aarch64_target_format (void)
8587 {
8588 #ifdef TE_CLOUDABI
8589 /* FIXME: What to do for ilp32_p ? */
8590 if (target_big_endian)
8591 return "elf64-bigaarch64-cloudabi";
8592 else
8593 return "elf64-littleaarch64-cloudabi";
8594 #else
8595 if (target_big_endian)
8596 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8597 else
8598 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8599 #endif
8600 }
8601
8602 void
8603 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8604 {
8605 elf_frob_symbol (symp, puntp);
8606 }
8607 #endif
8608
8609 /* MD interface: Finalization. */
8610
8611 /* A good place to do this, although this was probably not intended
8612 for this kind of use. We need to dump the literal pool before
8613 references are made to a null symbol pointer. */
8614
8615 void
8616 aarch64_cleanup (void)
8617 {
8618 literal_pool *pool;
8619
8620 for (pool = list_of_pools; pool; pool = pool->next)
8621 {
8622 /* Put it at the end of the relevant section. */
8623 subseg_set (pool->section, pool->sub_section);
8624 s_ltorg (0);
8625 }
8626 }
8627
8628 #ifdef OBJ_ELF
8629 /* Remove any excess mapping symbols generated for alignment frags in
8630 SEC. We may have created a mapping symbol before a zero byte
8631 alignment; remove it if there's a mapping symbol after the
8632 alignment. */
8633 static void
8634 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8635 void *dummy ATTRIBUTE_UNUSED)
8636 {
8637 segment_info_type *seginfo = seg_info (sec);
8638 fragS *fragp;
8639
8640 if (seginfo == NULL || seginfo->frchainP == NULL)
8641 return;
8642
8643 for (fragp = seginfo->frchainP->frch_root;
8644 fragp != NULL; fragp = fragp->fr_next)
8645 {
8646 symbolS *sym = fragp->tc_frag_data.last_map;
8647 fragS *next = fragp->fr_next;
8648
8649 /* Variable-sized frags have been converted to fixed size by
8650 this point. But if this was variable-sized to start with,
8651 there will be a fixed-size frag after it. So don't handle
8652 next == NULL. */
8653 if (sym == NULL || next == NULL)
8654 continue;
8655
8656 if (S_GET_VALUE (sym) < next->fr_address)
8657 /* Not at the end of this frag. */
8658 continue;
8659 know (S_GET_VALUE (sym) == next->fr_address);
8660
8661 do
8662 {
8663 if (next->tc_frag_data.first_map != NULL)
8664 {
8665 /* Next frag starts with a mapping symbol. Discard this
8666 one. */
8667 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8668 break;
8669 }
8670
8671 if (next->fr_next == NULL)
8672 {
8673 /* This mapping symbol is at the end of the section. Discard
8674 it. */
8675 know (next->fr_fix == 0 && next->fr_var == 0);
8676 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8677 break;
8678 }
8679
8680 /* As long as we have empty frags without any mapping symbols,
8681 keep looking. */
8682 /* If the next frag is non-empty and does not start with a
8683 mapping symbol, then this mapping symbol is required. */
8684 if (next->fr_address != next->fr_next->fr_address)
8685 break;
8686
8687 next = next->fr_next;
8688 }
8689 while (next != NULL);
8690 }
8691 }
8692 #endif
8693
8694 /* Adjust the symbol table. */
8695
8696 void
8697 aarch64_adjust_symtab (void)
8698 {
8699 #ifdef OBJ_ELF
8700 /* Remove any overlapping mapping symbols generated by alignment frags. */
8701 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8702 /* Now do generic ELF adjustments. */
8703 elf_adjust_symtab ();
8704 #endif
8705 }
8706
8707 static void
8708 checked_hash_insert (htab_t table, const char *key, void *value)
8709 {
8710 str_hash_insert (table, key, value, 0);
8711 }
8712
8713 static void
8714 sysreg_hash_insert (htab_t table, const char *key, void *value)
8715 {
8716 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8717 checked_hash_insert (table, key, value);
8718 }
8719
8720 static void
8721 fill_instruction_hash_table (void)
8722 {
8723 const aarch64_opcode *opcode = aarch64_opcode_table;
8724
8725 while (opcode->name != NULL)
8726 {
8727 templates *templ, *new_templ;
8728 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8729
8730 new_templ = XNEW (templates);
8731 new_templ->opcode = opcode;
8732 new_templ->next = NULL;
8733
8734 if (!templ)
8735 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8736 else
8737 {
8738 new_templ->next = templ->next;
8739 templ->next = new_templ;
8740 }
8741 ++opcode;
8742 }
8743 }
8744
8745 static inline void
8746 convert_to_upper (char *dst, const char *src, size_t num)
8747 {
8748 unsigned int i;
8749 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8750 *dst = TOUPPER (*src);
8751 *dst = '\0';
8752 }
8753
8754 /* Assume STR point to a lower-case string, allocate, convert and return
8755 the corresponding upper-case string. */
8756 static inline const char*
8757 get_upper_str (const char *str)
8758 {
8759 char *ret;
8760 size_t len = strlen (str);
8761 ret = XNEWVEC (char, len + 1);
8762 convert_to_upper (ret, str, len);
8763 return ret;
8764 }
8765
8766 /* MD interface: Initialization. */
8767
8768 void
8769 md_begin (void)
8770 {
8771 unsigned mach;
8772 unsigned int i;
8773
8774 aarch64_ops_hsh = str_htab_create ();
8775 aarch64_cond_hsh = str_htab_create ();
8776 aarch64_shift_hsh = str_htab_create ();
8777 aarch64_sys_regs_hsh = str_htab_create ();
8778 aarch64_pstatefield_hsh = str_htab_create ();
8779 aarch64_sys_regs_ic_hsh = str_htab_create ();
8780 aarch64_sys_regs_dc_hsh = str_htab_create ();
8781 aarch64_sys_regs_at_hsh = str_htab_create ();
8782 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8783 aarch64_sys_regs_sr_hsh = str_htab_create ();
8784 aarch64_reg_hsh = str_htab_create ();
8785 aarch64_barrier_opt_hsh = str_htab_create ();
8786 aarch64_nzcv_hsh = str_htab_create ();
8787 aarch64_pldop_hsh = str_htab_create ();
8788 aarch64_hint_opt_hsh = str_htab_create ();
8789
8790 fill_instruction_hash_table ();
8791
8792 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8793 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8794 (void *) (aarch64_sys_regs + i));
8795
8796 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8797 sysreg_hash_insert (aarch64_pstatefield_hsh,
8798 aarch64_pstatefields[i].name,
8799 (void *) (aarch64_pstatefields + i));
8800
8801 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8802 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8803 aarch64_sys_regs_ic[i].name,
8804 (void *) (aarch64_sys_regs_ic + i));
8805
8806 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8807 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8808 aarch64_sys_regs_dc[i].name,
8809 (void *) (aarch64_sys_regs_dc + i));
8810
8811 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8812 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8813 aarch64_sys_regs_at[i].name,
8814 (void *) (aarch64_sys_regs_at + i));
8815
8816 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8817 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8818 aarch64_sys_regs_tlbi[i].name,
8819 (void *) (aarch64_sys_regs_tlbi + i));
8820
8821 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8822 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8823 aarch64_sys_regs_sr[i].name,
8824 (void *) (aarch64_sys_regs_sr + i));
8825
8826 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8827 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8828 (void *) (reg_names + i));
8829
8830 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8831 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8832 (void *) (nzcv_names + i));
8833
8834 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8835 {
8836 const char *name = aarch64_operand_modifiers[i].name;
8837 checked_hash_insert (aarch64_shift_hsh, name,
8838 (void *) (aarch64_operand_modifiers + i));
8839 /* Also hash the name in the upper case. */
8840 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8841 (void *) (aarch64_operand_modifiers + i));
8842 }
8843
8844 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8845 {
8846 unsigned int j;
8847 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8848 the same condition code. */
8849 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8850 {
8851 const char *name = aarch64_conds[i].names[j];
8852 if (name == NULL)
8853 break;
8854 checked_hash_insert (aarch64_cond_hsh, name,
8855 (void *) (aarch64_conds + i));
8856 /* Also hash the name in the upper case. */
8857 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8858 (void *) (aarch64_conds + i));
8859 }
8860 }
8861
8862 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8863 {
8864 const char *name = aarch64_barrier_options[i].name;
8865 /* Skip xx00 - the unallocated values of option. */
8866 if ((i & 0x3) == 0)
8867 continue;
8868 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8869 (void *) (aarch64_barrier_options + i));
8870 /* Also hash the name in the upper case. */
8871 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8872 (void *) (aarch64_barrier_options + i));
8873 }
8874
8875 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
8876 {
8877 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
8878 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8879 (void *) (aarch64_barrier_dsb_nxs_options + i));
8880 /* Also hash the name in the upper case. */
8881 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8882 (void *) (aarch64_barrier_dsb_nxs_options + i));
8883 }
8884
8885 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8886 {
8887 const char* name = aarch64_prfops[i].name;
8888 /* Skip the unallocated hint encodings. */
8889 if (name == NULL)
8890 continue;
8891 checked_hash_insert (aarch64_pldop_hsh, name,
8892 (void *) (aarch64_prfops + i));
8893 /* Also hash the name in the upper case. */
8894 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8895 (void *) (aarch64_prfops + i));
8896 }
8897
8898 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8899 {
8900 const char* name = aarch64_hint_options[i].name;
8901 const char* upper_name = get_upper_str(name);
8902
8903 checked_hash_insert (aarch64_hint_opt_hsh, name,
8904 (void *) (aarch64_hint_options + i));
8905
8906 /* Also hash the name in the upper case if not the same. */
8907 if (strcmp (name, upper_name) != 0)
8908 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8909 (void *) (aarch64_hint_options + i));
8910 }
8911
8912 /* Set the cpu variant based on the command-line options. */
8913 if (!mcpu_cpu_opt)
8914 mcpu_cpu_opt = march_cpu_opt;
8915
8916 if (!mcpu_cpu_opt)
8917 mcpu_cpu_opt = &cpu_default;
8918
8919 cpu_variant = *mcpu_cpu_opt;
8920
8921 /* Record the CPU type. */
8922 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8923
8924 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8925 }
8926
8927 /* Command line processing. */
8928
8929 const char *md_shortopts = "m:";
8930
8931 #ifdef AARCH64_BI_ENDIAN
8932 #define OPTION_EB (OPTION_MD_BASE + 0)
8933 #define OPTION_EL (OPTION_MD_BASE + 1)
8934 #else
8935 #if TARGET_BYTES_BIG_ENDIAN
8936 #define OPTION_EB (OPTION_MD_BASE + 0)
8937 #else
8938 #define OPTION_EL (OPTION_MD_BASE + 1)
8939 #endif
8940 #endif
8941
8942 struct option md_longopts[] = {
8943 #ifdef OPTION_EB
8944 {"EB", no_argument, NULL, OPTION_EB},
8945 #endif
8946 #ifdef OPTION_EL
8947 {"EL", no_argument, NULL, OPTION_EL},
8948 #endif
8949 {NULL, no_argument, NULL, 0}
8950 };
8951
8952 size_t md_longopts_size = sizeof (md_longopts);
8953
8954 struct aarch64_option_table
8955 {
8956 const char *option; /* Option name to match. */
8957 const char *help; /* Help information. */
8958 int *var; /* Variable to change. */
8959 int value; /* What to change it to. */
8960 char *deprecated; /* If non-null, print this message. */
8961 };
8962
8963 static struct aarch64_option_table aarch64_opts[] = {
8964 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8965 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8966 NULL},
8967 #ifdef DEBUG_AARCH64
8968 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8969 #endif /* DEBUG_AARCH64 */
8970 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8971 NULL},
8972 {"mno-verbose-error", N_("do not output verbose error messages"),
8973 &verbose_error_p, 0, NULL},
8974 {NULL, NULL, NULL, 0, NULL}
8975 };
8976
8977 struct aarch64_cpu_option_table
8978 {
8979 const char *name;
8980 const aarch64_feature_set value;
8981 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8982 case. */
8983 const char *canonical_name;
8984 };
8985
8986 /* This list should, at a minimum, contain all the cpu names
8987 recognized by GCC. */
8988 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8989 {"all", AARCH64_ANY, NULL},
8990 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8991 AARCH64_FEATURE_CRC), "Cortex-A34"},
8992 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8993 AARCH64_FEATURE_CRC), "Cortex-A35"},
8994 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8995 AARCH64_FEATURE_CRC), "Cortex-A53"},
8996 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8997 AARCH64_FEATURE_CRC), "Cortex-A57"},
8998 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8999 AARCH64_FEATURE_CRC), "Cortex-A72"},
9000 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9001 AARCH64_FEATURE_CRC), "Cortex-A73"},
9002 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9003 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9004 "Cortex-A55"},
9005 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9006 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9007 "Cortex-A75"},
9008 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9009 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9010 "Cortex-A76"},
9011 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9012 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9013 | AARCH64_FEATURE_DOTPROD
9014 | AARCH64_FEATURE_SSBS),
9015 "Cortex-A76AE"},
9016 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9017 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9018 | AARCH64_FEATURE_DOTPROD
9019 | AARCH64_FEATURE_SSBS),
9020 "Cortex-A77"},
9021 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9022 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9023 | AARCH64_FEATURE_DOTPROD
9024 | AARCH64_FEATURE_SSBS),
9025 "Cortex-A65"},
9026 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9027 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9028 | AARCH64_FEATURE_DOTPROD
9029 | AARCH64_FEATURE_SSBS),
9030 "Cortex-A65AE"},
9031 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9032 AARCH64_FEATURE_F16
9033 | AARCH64_FEATURE_RCPC
9034 | AARCH64_FEATURE_DOTPROD
9035 | AARCH64_FEATURE_SSBS
9036 | AARCH64_FEATURE_PROFILE),
9037 "Cortex-A78"},
9038 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9039 AARCH64_FEATURE_F16
9040 | AARCH64_FEATURE_RCPC
9041 | AARCH64_FEATURE_DOTPROD
9042 | AARCH64_FEATURE_SSBS
9043 | AARCH64_FEATURE_PROFILE),
9044 "Cortex-A78AE"},
9045 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9046 AARCH64_FEATURE_DOTPROD
9047 | AARCH64_FEATURE_F16
9048 | AARCH64_FEATURE_FLAGM
9049 | AARCH64_FEATURE_PAC
9050 | AARCH64_FEATURE_PROFILE
9051 | AARCH64_FEATURE_RCPC
9052 | AARCH64_FEATURE_SSBS),
9053 "Cortex-A78C"},
9054 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9055 AARCH64_FEATURE_BFLOAT16
9056 | AARCH64_FEATURE_I8MM
9057 | AARCH64_FEATURE_MEMTAG
9058 | AARCH64_FEATURE_SVE2_BITPERM),
9059 "Cortex-A510"},
9060 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9061 AARCH64_FEATURE_BFLOAT16
9062 | AARCH64_FEATURE_I8MM
9063 | AARCH64_FEATURE_MEMTAG
9064 | AARCH64_FEATURE_SVE2_BITPERM),
9065 "Cortex-A710"},
9066 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9067 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9068 | AARCH64_FEATURE_DOTPROD
9069 | AARCH64_FEATURE_PROFILE),
9070 "Ares"},
9071 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9072 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9073 "Samsung Exynos M1"},
9074 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9075 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9076 | AARCH64_FEATURE_RDMA),
9077 "Qualcomm Falkor"},
9078 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9079 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9080 | AARCH64_FEATURE_DOTPROD
9081 | AARCH64_FEATURE_SSBS),
9082 "Neoverse E1"},
9083 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9084 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9085 | AARCH64_FEATURE_DOTPROD
9086 | AARCH64_FEATURE_PROFILE),
9087 "Neoverse N1"},
9088 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9089 AARCH64_FEATURE_BFLOAT16
9090 | AARCH64_FEATURE_I8MM
9091 | AARCH64_FEATURE_F16
9092 | AARCH64_FEATURE_SVE
9093 | AARCH64_FEATURE_SVE2
9094 | AARCH64_FEATURE_SVE2_BITPERM
9095 | AARCH64_FEATURE_MEMTAG
9096 | AARCH64_FEATURE_RNG),
9097 "Neoverse N2"},
9098 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9099 AARCH64_FEATURE_PROFILE
9100 | AARCH64_FEATURE_CVADP
9101 | AARCH64_FEATURE_SVE
9102 | AARCH64_FEATURE_SSBS
9103 | AARCH64_FEATURE_RNG
9104 | AARCH64_FEATURE_F16
9105 | AARCH64_FEATURE_BFLOAT16
9106 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9107 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9108 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9109 | AARCH64_FEATURE_RDMA),
9110 "Qualcomm QDF24XX"},
9111 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9112 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9113 "Qualcomm Saphira"},
9114 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9115 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9116 "Cavium ThunderX"},
9117 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9118 AARCH64_FEATURE_CRYPTO),
9119 "Broadcom Vulcan"},
9120 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9121 in earlier releases and is superseded by 'xgene1' in all
9122 tools. */
9123 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9124 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9125 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9126 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9127 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9128 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9129 AARCH64_FEATURE_F16
9130 | AARCH64_FEATURE_RCPC
9131 | AARCH64_FEATURE_DOTPROD
9132 | AARCH64_FEATURE_SSBS
9133 | AARCH64_FEATURE_PROFILE),
9134 "Cortex-X1"},
9135 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9136 AARCH64_FEATURE_BFLOAT16
9137 | AARCH64_FEATURE_I8MM
9138 | AARCH64_FEATURE_MEMTAG
9139 | AARCH64_FEATURE_SVE2_BITPERM),
9140 "Cortex-X2"},
9141 {"generic", AARCH64_ARCH_V8, NULL},
9142
9143 {NULL, AARCH64_ARCH_NONE, NULL}
9144 };
9145
9146 struct aarch64_arch_option_table
9147 {
9148 const char *name;
9149 const aarch64_feature_set value;
9150 };
9151
9152 /* This list should, at a minimum, contain all the architecture names
9153 recognized by GCC. */
9154 static const struct aarch64_arch_option_table aarch64_archs[] = {
9155 {"all", AARCH64_ANY},
9156 {"armv8-a", AARCH64_ARCH_V8},
9157 {"armv8.1-a", AARCH64_ARCH_V8_1},
9158 {"armv8.2-a", AARCH64_ARCH_V8_2},
9159 {"armv8.3-a", AARCH64_ARCH_V8_3},
9160 {"armv8.4-a", AARCH64_ARCH_V8_4},
9161 {"armv8.5-a", AARCH64_ARCH_V8_5},
9162 {"armv8.6-a", AARCH64_ARCH_V8_6},
9163 {"armv8.7-a", AARCH64_ARCH_V8_7},
9164 {"armv8-r", AARCH64_ARCH_V8_R},
9165 {"armv9-a", AARCH64_ARCH_V9},
9166 {NULL, AARCH64_ARCH_NONE}
9167 };
9168
9169 /* ISA extensions. */
9170 struct aarch64_option_cpu_value_table
9171 {
9172 const char *name;
9173 const aarch64_feature_set value;
9174 const aarch64_feature_set require; /* Feature dependencies. */
9175 };
9176
9177 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9178 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9179 AARCH64_ARCH_NONE},
9180 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9181 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9182 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9183 AARCH64_ARCH_NONE},
9184 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9185 AARCH64_ARCH_NONE},
9186 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9187 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9188 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9189 AARCH64_ARCH_NONE},
9190 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9191 AARCH64_ARCH_NONE},
9192 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9193 AARCH64_ARCH_NONE},
9194 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9195 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9196 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9197 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9198 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9199 AARCH64_FEATURE (AARCH64_FEATURE_FP
9200 | AARCH64_FEATURE_F16, 0)},
9201 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9202 AARCH64_ARCH_NONE},
9203 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9204 AARCH64_FEATURE (AARCH64_FEATURE_F16
9205 | AARCH64_FEATURE_SIMD
9206 | AARCH64_FEATURE_COMPNUM, 0)},
9207 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9208 AARCH64_ARCH_NONE},
9209 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9210 AARCH64_FEATURE (AARCH64_FEATURE_F16
9211 | AARCH64_FEATURE_SIMD, 0)},
9212 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9213 AARCH64_ARCH_NONE},
9214 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9215 AARCH64_ARCH_NONE},
9216 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9217 AARCH64_ARCH_NONE},
9218 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9219 AARCH64_ARCH_NONE},
9220 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9221 AARCH64_ARCH_NONE},
9222 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9223 AARCH64_ARCH_NONE},
9224 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9225 AARCH64_ARCH_NONE},
9226 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9227 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9228 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9229 AARCH64_ARCH_NONE},
9230 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9231 AARCH64_ARCH_NONE},
9232 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9233 AARCH64_ARCH_NONE},
9234 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9235 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9236 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9237 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9238 | AARCH64_FEATURE_SM4, 0)},
9239 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9240 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9241 | AARCH64_FEATURE_AES, 0)},
9242 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9243 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9244 | AARCH64_FEATURE_SHA3, 0)},
9245 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9246 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9247 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9248 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9249 | AARCH64_FEATURE_BFLOAT16, 0)},
9250 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9251 AARCH64_FEATURE (AARCH64_FEATURE_SME
9252 | AARCH64_FEATURE_SVE2
9253 | AARCH64_FEATURE_BFLOAT16, 0)},
9254 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9255 AARCH64_FEATURE (AARCH64_FEATURE_SME
9256 | AARCH64_FEATURE_SVE2
9257 | AARCH64_FEATURE_BFLOAT16, 0)},
9258 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9259 AARCH64_ARCH_NONE},
9260 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9261 AARCH64_ARCH_NONE},
9262 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9263 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9264 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9265 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9266 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9267 AARCH64_ARCH_NONE},
9268 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9269 AARCH64_ARCH_NONE},
9270 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9271 AARCH64_ARCH_NONE},
9272 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9273 };
9274
9275 struct aarch64_long_option_table
9276 {
9277 const char *option; /* Substring to match. */
9278 const char *help; /* Help information. */
9279 int (*func) (const char *subopt); /* Function to decode sub-option. */
9280 char *deprecated; /* If non-null, print this message. */
9281 };
9282
9283 /* Transitive closure of features depending on set. */
9284 static aarch64_feature_set
9285 aarch64_feature_disable_set (aarch64_feature_set set)
9286 {
9287 const struct aarch64_option_cpu_value_table *opt;
9288 aarch64_feature_set prev = 0;
9289
9290 while (prev != set) {
9291 prev = set;
9292 for (opt = aarch64_features; opt->name != NULL; opt++)
9293 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9294 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9295 }
9296 return set;
9297 }
9298
9299 /* Transitive closure of dependencies of set. */
9300 static aarch64_feature_set
9301 aarch64_feature_enable_set (aarch64_feature_set set)
9302 {
9303 const struct aarch64_option_cpu_value_table *opt;
9304 aarch64_feature_set prev = 0;
9305
9306 while (prev != set) {
9307 prev = set;
9308 for (opt = aarch64_features; opt->name != NULL; opt++)
9309 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9310 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9311 }
9312 return set;
9313 }
9314
9315 static int
9316 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9317 bool ext_only)
9318 {
9319 /* We insist on extensions being added before being removed. We achieve
9320 this by using the ADDING_VALUE variable to indicate whether we are
9321 adding an extension (1) or removing it (0) and only allowing it to
9322 change in the order -1 -> 1 -> 0. */
9323 int adding_value = -1;
9324 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9325
9326 /* Copy the feature set, so that we can modify it. */
9327 *ext_set = **opt_p;
9328 *opt_p = ext_set;
9329
9330 while (str != NULL && *str != 0)
9331 {
9332 const struct aarch64_option_cpu_value_table *opt;
9333 const char *ext = NULL;
9334 int optlen;
9335
9336 if (!ext_only)
9337 {
9338 if (*str != '+')
9339 {
9340 as_bad (_("invalid architectural extension"));
9341 return 0;
9342 }
9343
9344 ext = strchr (++str, '+');
9345 }
9346
9347 if (ext != NULL)
9348 optlen = ext - str;
9349 else
9350 optlen = strlen (str);
9351
9352 if (optlen >= 2 && startswith (str, "no"))
9353 {
9354 if (adding_value != 0)
9355 adding_value = 0;
9356 optlen -= 2;
9357 str += 2;
9358 }
9359 else if (optlen > 0)
9360 {
9361 if (adding_value == -1)
9362 adding_value = 1;
9363 else if (adding_value != 1)
9364 {
9365 as_bad (_("must specify extensions to add before specifying "
9366 "those to remove"));
9367 return false;
9368 }
9369 }
9370
9371 if (optlen == 0)
9372 {
9373 as_bad (_("missing architectural extension"));
9374 return 0;
9375 }
9376
9377 gas_assert (adding_value != -1);
9378
9379 for (opt = aarch64_features; opt->name != NULL; opt++)
9380 if (strncmp (opt->name, str, optlen) == 0)
9381 {
9382 aarch64_feature_set set;
9383
9384 /* Add or remove the extension. */
9385 if (adding_value)
9386 {
9387 set = aarch64_feature_enable_set (opt->value);
9388 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9389 }
9390 else
9391 {
9392 set = aarch64_feature_disable_set (opt->value);
9393 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9394 }
9395 break;
9396 }
9397
9398 if (opt->name == NULL)
9399 {
9400 as_bad (_("unknown architectural extension `%s'"), str);
9401 return 0;
9402 }
9403
9404 str = ext;
9405 };
9406
9407 return 1;
9408 }
9409
9410 static int
9411 aarch64_parse_cpu (const char *str)
9412 {
9413 const struct aarch64_cpu_option_table *opt;
9414 const char *ext = strchr (str, '+');
9415 size_t optlen;
9416
9417 if (ext != NULL)
9418 optlen = ext - str;
9419 else
9420 optlen = strlen (str);
9421
9422 if (optlen == 0)
9423 {
9424 as_bad (_("missing cpu name `%s'"), str);
9425 return 0;
9426 }
9427
9428 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9429 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9430 {
9431 mcpu_cpu_opt = &opt->value;
9432 if (ext != NULL)
9433 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
9434
9435 return 1;
9436 }
9437
9438 as_bad (_("unknown cpu `%s'"), str);
9439 return 0;
9440 }
9441
9442 static int
9443 aarch64_parse_arch (const char *str)
9444 {
9445 const struct aarch64_arch_option_table *opt;
9446 const char *ext = strchr (str, '+');
9447 size_t optlen;
9448
9449 if (ext != NULL)
9450 optlen = ext - str;
9451 else
9452 optlen = strlen (str);
9453
9454 if (optlen == 0)
9455 {
9456 as_bad (_("missing architecture name `%s'"), str);
9457 return 0;
9458 }
9459
9460 for (opt = aarch64_archs; opt->name != NULL; opt++)
9461 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9462 {
9463 march_cpu_opt = &opt->value;
9464 if (ext != NULL)
9465 return aarch64_parse_features (ext, &march_cpu_opt, false);
9466
9467 return 1;
9468 }
9469
9470 as_bad (_("unknown architecture `%s'\n"), str);
9471 return 0;
9472 }
9473
9474 /* ABIs. */
9475 struct aarch64_option_abi_value_table
9476 {
9477 const char *name;
9478 enum aarch64_abi_type value;
9479 };
9480
9481 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9482 {"ilp32", AARCH64_ABI_ILP32},
9483 {"lp64", AARCH64_ABI_LP64},
9484 };
9485
9486 static int
9487 aarch64_parse_abi (const char *str)
9488 {
9489 unsigned int i;
9490
9491 if (str[0] == '\0')
9492 {
9493 as_bad (_("missing abi name `%s'"), str);
9494 return 0;
9495 }
9496
9497 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9498 if (strcmp (str, aarch64_abis[i].name) == 0)
9499 {
9500 aarch64_abi = aarch64_abis[i].value;
9501 return 1;
9502 }
9503
9504 as_bad (_("unknown abi `%s'\n"), str);
9505 return 0;
9506 }
9507
9508 static struct aarch64_long_option_table aarch64_long_opts[] = {
9509 #ifdef OBJ_ELF
9510 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9511 aarch64_parse_abi, NULL},
9512 #endif /* OBJ_ELF */
9513 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9514 aarch64_parse_cpu, NULL},
9515 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9516 aarch64_parse_arch, NULL},
9517 {NULL, NULL, 0, NULL}
9518 };
9519
9520 int
9521 md_parse_option (int c, const char *arg)
9522 {
9523 struct aarch64_option_table *opt;
9524 struct aarch64_long_option_table *lopt;
9525
9526 switch (c)
9527 {
9528 #ifdef OPTION_EB
9529 case OPTION_EB:
9530 target_big_endian = 1;
9531 break;
9532 #endif
9533
9534 #ifdef OPTION_EL
9535 case OPTION_EL:
9536 target_big_endian = 0;
9537 break;
9538 #endif
9539
9540 case 'a':
9541 /* Listing option. Just ignore these, we don't support additional
9542 ones. */
9543 return 0;
9544
9545 default:
9546 for (opt = aarch64_opts; opt->option != NULL; opt++)
9547 {
9548 if (c == opt->option[0]
9549 && ((arg == NULL && opt->option[1] == 0)
9550 || streq (arg, opt->option + 1)))
9551 {
9552 /* If the option is deprecated, tell the user. */
9553 if (opt->deprecated != NULL)
9554 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9555 arg ? arg : "", _(opt->deprecated));
9556
9557 if (opt->var != NULL)
9558 *opt->var = opt->value;
9559
9560 return 1;
9561 }
9562 }
9563
9564 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9565 {
9566 /* These options are expected to have an argument. */
9567 if (c == lopt->option[0]
9568 && arg != NULL
9569 && startswith (arg, lopt->option + 1))
9570 {
9571 /* If the option is deprecated, tell the user. */
9572 if (lopt->deprecated != NULL)
9573 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9574 _(lopt->deprecated));
9575
9576 /* Call the sup-option parser. */
9577 return lopt->func (arg + strlen (lopt->option) - 1);
9578 }
9579 }
9580
9581 return 0;
9582 }
9583
9584 return 1;
9585 }
9586
9587 void
9588 md_show_usage (FILE * fp)
9589 {
9590 struct aarch64_option_table *opt;
9591 struct aarch64_long_option_table *lopt;
9592
9593 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9594
9595 for (opt = aarch64_opts; opt->option != NULL; opt++)
9596 if (opt->help != NULL)
9597 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9598
9599 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9600 if (lopt->help != NULL)
9601 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9602
9603 #ifdef OPTION_EB
9604 fprintf (fp, _("\
9605 -EB assemble code for a big-endian cpu\n"));
9606 #endif
9607
9608 #ifdef OPTION_EL
9609 fprintf (fp, _("\
9610 -EL assemble code for a little-endian cpu\n"));
9611 #endif
9612 }
9613
9614 /* Parse a .cpu directive. */
9615
9616 static void
9617 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9618 {
9619 const struct aarch64_cpu_option_table *opt;
9620 char saved_char;
9621 char *name;
9622 char *ext;
9623 size_t optlen;
9624
9625 name = input_line_pointer;
9626 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9627 input_line_pointer++;
9628 saved_char = *input_line_pointer;
9629 *input_line_pointer = 0;
9630
9631 ext = strchr (name, '+');
9632
9633 if (ext != NULL)
9634 optlen = ext - name;
9635 else
9636 optlen = strlen (name);
9637
9638 /* Skip the first "all" entry. */
9639 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9640 if (strlen (opt->name) == optlen
9641 && strncmp (name, opt->name, optlen) == 0)
9642 {
9643 mcpu_cpu_opt = &opt->value;
9644 if (ext != NULL)
9645 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9646 return;
9647
9648 cpu_variant = *mcpu_cpu_opt;
9649
9650 *input_line_pointer = saved_char;
9651 demand_empty_rest_of_line ();
9652 return;
9653 }
9654 as_bad (_("unknown cpu `%s'"), name);
9655 *input_line_pointer = saved_char;
9656 ignore_rest_of_line ();
9657 }
9658
9659
9660 /* Parse a .arch directive. */
9661
9662 static void
9663 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9664 {
9665 const struct aarch64_arch_option_table *opt;
9666 char saved_char;
9667 char *name;
9668 char *ext;
9669 size_t optlen;
9670
9671 name = input_line_pointer;
9672 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9673 input_line_pointer++;
9674 saved_char = *input_line_pointer;
9675 *input_line_pointer = 0;
9676
9677 ext = strchr (name, '+');
9678
9679 if (ext != NULL)
9680 optlen = ext - name;
9681 else
9682 optlen = strlen (name);
9683
9684 /* Skip the first "all" entry. */
9685 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9686 if (strlen (opt->name) == optlen
9687 && strncmp (name, opt->name, optlen) == 0)
9688 {
9689 mcpu_cpu_opt = &opt->value;
9690 if (ext != NULL)
9691 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9692 return;
9693
9694 cpu_variant = *mcpu_cpu_opt;
9695
9696 *input_line_pointer = saved_char;
9697 demand_empty_rest_of_line ();
9698 return;
9699 }
9700
9701 as_bad (_("unknown architecture `%s'\n"), name);
9702 *input_line_pointer = saved_char;
9703 ignore_rest_of_line ();
9704 }
9705
9706 /* Parse a .arch_extension directive. */
9707
9708 static void
9709 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9710 {
9711 char saved_char;
9712 char *ext = input_line_pointer;;
9713
9714 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9715 input_line_pointer++;
9716 saved_char = *input_line_pointer;
9717 *input_line_pointer = 0;
9718
9719 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
9720 return;
9721
9722 cpu_variant = *mcpu_cpu_opt;
9723
9724 *input_line_pointer = saved_char;
9725 demand_empty_rest_of_line ();
9726 }
9727
9728 /* Copy symbol information. */
9729
9730 void
9731 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9732 {
9733 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9734 }
9735
9736 #ifdef OBJ_ELF
9737 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9738 This is needed so AArch64 specific st_other values can be independently
9739 specified for an IFUNC resolver (that is called by the dynamic linker)
9740 and the symbol it resolves (aliased to the resolver). In particular,
9741 if a function symbol has special st_other value set via directives,
9742 then attaching an IFUNC resolver to that symbol should not override
9743 the st_other setting. Requiring the directive on the IFUNC resolver
9744 symbol would be unexpected and problematic in C code, where the two
9745 symbols appear as two independent function declarations. */
9746
9747 void
9748 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9749 {
9750 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9751 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9752 if (srcelf->size)
9753 {
9754 if (destelf->size == NULL)
9755 destelf->size = XNEW (expressionS);
9756 *destelf->size = *srcelf->size;
9757 }
9758 else
9759 {
9760 free (destelf->size);
9761 destelf->size = NULL;
9762 }
9763 S_SET_SIZE (dest, S_GET_SIZE (src));
9764 }
9765 #endif