]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: [SME] Add MOV and MOVA instructions
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 #ifdef OBJ_ELF
164 # define now_instr_sequence seg_info \
165 (now_seg)->tc_segment_info_data.insn_sequence
166 #else
167 static struct aarch64_instr_sequence now_instr_sequence;
168 #endif
169
170 /* Diagnostics inline function utilities.
171
172 These are lightweight utilities which should only be called by parse_operands
173 and other parsers. GAS processes each assembly line by parsing it against
174 instruction template(s), in the case of multiple templates (for the same
175 mnemonic name), those templates are tried one by one until one succeeds or
176 all fail. An assembly line may fail a few templates before being
177 successfully parsed; an error saved here in most cases is not a user error
178 but an error indicating the current template is not the right template.
179 Therefore it is very important that errors can be saved at a low cost during
180 the parsing; we don't want to slow down the whole parsing by recording
181 non-user errors in detail.
182
183 Remember that the objective is to help GAS pick up the most appropriate
184 error message in the case of multiple templates, e.g. FMOV which has 8
185 templates. */
186
187 static inline void
188 clear_error (void)
189 {
190 inst.parsing_error.kind = AARCH64_OPDE_NIL;
191 inst.parsing_error.error = NULL;
192 }
193
194 static inline bool
195 error_p (void)
196 {
197 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
198 }
199
200 static inline const char *
201 get_error_message (void)
202 {
203 return inst.parsing_error.error;
204 }
205
206 static inline enum aarch64_operand_error_kind
207 get_error_kind (void)
208 {
209 return inst.parsing_error.kind;
210 }
211
212 static inline void
213 set_error (enum aarch64_operand_error_kind kind, const char *error)
214 {
215 inst.parsing_error.kind = kind;
216 inst.parsing_error.error = error;
217 }
218
219 static inline void
220 set_recoverable_error (const char *error)
221 {
222 set_error (AARCH64_OPDE_RECOVERABLE, error);
223 }
224
225 /* Use the DESC field of the corresponding aarch64_operand entry to compose
226 the error message. */
227 static inline void
228 set_default_error (void)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
231 }
232
233 static inline void
234 set_syntax_error (const char *error)
235 {
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_first_syntax_error (const char *error)
241 {
242 if (! error_p ())
243 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
244 }
245
246 static inline void
247 set_fatal_syntax_error (const char *error)
248 {
249 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
250 }
251 \f
252 /* Return value for certain parsers when the parsing fails; those parsers
253 return the information of the parsed result, e.g. register number, on
254 success. */
255 #define PARSE_FAIL -1
256
257 /* This is an invalid condition code that means no conditional field is
258 present. */
259 #define COND_ALWAYS 0x10
260
261 typedef struct
262 {
263 const char *template;
264 uint32_t value;
265 } asm_nzcv;
266
267 struct reloc_entry
268 {
269 char *name;
270 bfd_reloc_code_real_type reloc;
271 };
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
291 BASIC_REG_TYPE(PN) /* p[0-15] */ \
292 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
293 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
294 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
295 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
296 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
301 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
303 /* Typecheck: same, plus SVE registers. */ \
304 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
306 | REG_TYPE(ZN)) \
307 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
308 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
309 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
310 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
314 /* Typecheck: any [BHSDQ]P FP. */ \
315 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
318 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
322 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
323 be used for SVE instructions, since Zn and Pn are valid symbols \
324 in other contexts. */ \
325 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
330 | REG_TYPE(ZN) | REG_TYPE(PN)) \
331 /* Any integer register; used for error messages only. */ \
332 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
335 /* Pseudo type to mark the end of the enumerator sequence. */ \
336 BASIC_REG_TYPE(MAX)
337
338 #undef BASIC_REG_TYPE
339 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
342
343 /* Register type enumerators. */
344 typedef enum aarch64_reg_type_
345 {
346 /* A list of REG_TYPE_*. */
347 AARCH64_REG_TYPES
348 } aarch64_reg_type;
349
350 #undef BASIC_REG_TYPE
351 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
352 #undef REG_TYPE
353 #define REG_TYPE(T) (1 << REG_TYPE_##T)
354 #undef MULTI_REG_TYPE
355 #define MULTI_REG_TYPE(T,V) V,
356
357 /* Structure for a hash table entry for a register. */
358 typedef struct
359 {
360 const char *name;
361 unsigned char number;
362 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
363 unsigned char builtin;
364 } reg_entry;
365
366 /* Values indexed by aarch64_reg_type to assist the type checking. */
367 static const unsigned reg_type_masks[] =
368 {
369 AARCH64_REG_TYPES
370 };
371
372 #undef BASIC_REG_TYPE
373 #undef REG_TYPE
374 #undef MULTI_REG_TYPE
375 #undef AARCH64_REG_TYPES
376
377 /* Diagnostics used when we don't get a register of the expected type.
378 Note: this has to synchronized with aarch64_reg_type definitions
379 above. */
380 static const char *
381 get_reg_expected_msg (aarch64_reg_type reg_type)
382 {
383 const char *msg;
384
385 switch (reg_type)
386 {
387 case REG_TYPE_R_32:
388 msg = N_("integer 32-bit register expected");
389 break;
390 case REG_TYPE_R_64:
391 msg = N_("integer 64-bit register expected");
392 break;
393 case REG_TYPE_R_N:
394 msg = N_("integer register expected");
395 break;
396 case REG_TYPE_R64_SP:
397 msg = N_("64-bit integer or SP register expected");
398 break;
399 case REG_TYPE_SVE_BASE:
400 msg = N_("base register expected");
401 break;
402 case REG_TYPE_R_Z:
403 msg = N_("integer or zero register expected");
404 break;
405 case REG_TYPE_SVE_OFFSET:
406 msg = N_("offset register expected");
407 break;
408 case REG_TYPE_R_SP:
409 msg = N_("integer or SP register expected");
410 break;
411 case REG_TYPE_R_Z_SP:
412 msg = N_("integer, zero or SP register expected");
413 break;
414 case REG_TYPE_FP_B:
415 msg = N_("8-bit SIMD scalar register expected");
416 break;
417 case REG_TYPE_FP_H:
418 msg = N_("16-bit SIMD scalar or floating-point half precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_S:
422 msg = N_("32-bit SIMD scalar or floating-point single precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_D:
426 msg = N_("64-bit SIMD scalar or floating-point double precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_Q:
430 msg = N_("128-bit SIMD scalar or floating-point quad precision "
431 "register expected");
432 break;
433 case REG_TYPE_R_Z_BHSDQ_V:
434 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
435 msg = N_("register expected");
436 break;
437 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
438 msg = N_("SIMD scalar or floating-point register expected");
439 break;
440 case REG_TYPE_VN: /* any V reg */
441 msg = N_("vector register expected");
442 break;
443 case REG_TYPE_ZN:
444 msg = N_("SVE vector register expected");
445 break;
446 case REG_TYPE_PN:
447 msg = N_("SVE predicate register expected");
448 break;
449 default:
450 as_fatal (_("invalid register type %d"), reg_type);
451 }
452 return msg;
453 }
454
455 /* Some well known registers that we refer to directly elsewhere. */
456 #define REG_SP 31
457 #define REG_ZR 31
458
459 /* Instructions take 4 bytes in the object file. */
460 #define INSN_SIZE 4
461
462 static htab_t aarch64_ops_hsh;
463 static htab_t aarch64_cond_hsh;
464 static htab_t aarch64_shift_hsh;
465 static htab_t aarch64_sys_regs_hsh;
466 static htab_t aarch64_pstatefield_hsh;
467 static htab_t aarch64_sys_regs_ic_hsh;
468 static htab_t aarch64_sys_regs_dc_hsh;
469 static htab_t aarch64_sys_regs_at_hsh;
470 static htab_t aarch64_sys_regs_tlbi_hsh;
471 static htab_t aarch64_sys_regs_sr_hsh;
472 static htab_t aarch64_reg_hsh;
473 static htab_t aarch64_barrier_opt_hsh;
474 static htab_t aarch64_nzcv_hsh;
475 static htab_t aarch64_pldop_hsh;
476 static htab_t aarch64_hint_opt_hsh;
477
478 /* Stuff needed to resolve the label ambiguity
479 As:
480 ...
481 label: <insn>
482 may differ from:
483 ...
484 label:
485 <insn> */
486
487 static symbolS *last_label_seen;
488
489 /* Literal pool structure. Held on a per-section
490 and per-sub-section basis. */
491
492 #define MAX_LITERAL_POOL_SIZE 1024
493 typedef struct literal_expression
494 {
495 expressionS exp;
496 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
497 LITTLENUM_TYPE * bignum;
498 } literal_expression;
499
500 typedef struct literal_pool
501 {
502 literal_expression literals[MAX_LITERAL_POOL_SIZE];
503 unsigned int next_free_entry;
504 unsigned int id;
505 symbolS *symbol;
506 segT section;
507 subsegT sub_section;
508 int size;
509 struct literal_pool *next;
510 } literal_pool;
511
512 /* Pointer to a linked list of literal pools. */
513 static literal_pool *list_of_pools = NULL;
514 \f
515 /* Pure syntax. */
516
517 /* This array holds the chars that always start a comment. If the
518 pre-processor is disabled, these aren't very useful. */
519 const char comment_chars[] = "";
520
521 /* This array holds the chars that only start a comment at the beginning of
522 a line. If the line seems to have the form '# 123 filename'
523 .line and .file directives will appear in the pre-processed output. */
524 /* Note that input_file.c hand checks for '#' at the beginning of the
525 first line of the input file. This is because the compiler outputs
526 #NO_APP at the beginning of its output. */
527 /* Also note that comments like this one will always work. */
528 const char line_comment_chars[] = "#";
529
530 const char line_separator_chars[] = ";";
531
532 /* Chars that can be used to separate mant
533 from exp in floating point numbers. */
534 const char EXP_CHARS[] = "eE";
535
536 /* Chars that mean this number is a floating point constant. */
537 /* As in 0f12.456 */
538 /* or 0d1.2345e12 */
539
540 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
541
542 /* Prefix character that indicates the start of an immediate value. */
543 #define is_immediate_prefix(C) ((C) == '#')
544
545 /* Separator character handling. */
546
547 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
548
549 static inline bool
550 skip_past_char (char **str, char c)
551 {
552 if (**str == c)
553 {
554 (*str)++;
555 return true;
556 }
557 else
558 return false;
559 }
560
561 #define skip_past_comma(str) skip_past_char (str, ',')
562
563 /* Arithmetic expressions (possibly involving symbols). */
564
565 static bool in_aarch64_get_expression = false;
566
567 /* Third argument to aarch64_get_expression. */
568 #define GE_NO_PREFIX false
569 #define GE_OPT_PREFIX true
570
571 /* Fourth argument to aarch64_get_expression. */
572 #define ALLOW_ABSENT false
573 #define REJECT_ABSENT true
574
575 /* Fifth argument to aarch64_get_expression. */
576 #define NORMAL_RESOLUTION false
577
578 /* Return TRUE if the string pointed by *STR is successfully parsed
579 as an valid expression; *EP will be filled with the information of
580 such an expression. Otherwise return FALSE.
581
582 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
583 If REJECT_ABSENT is true then trat missing expressions as an error.
584 If DEFER_RESOLUTION is true, then do not resolve expressions against
585 constant symbols. Necessary if the expression is part of a fixup
586 that uses a reloc that must be emitted. */
587
588 static bool
589 aarch64_get_expression (expressionS * ep,
590 char ** str,
591 bool allow_immediate_prefix,
592 bool reject_absent,
593 bool defer_resolution)
594 {
595 char *save_in;
596 segT seg;
597 bool prefix_present = false;
598
599 if (allow_immediate_prefix)
600 {
601 if (is_immediate_prefix (**str))
602 {
603 (*str)++;
604 prefix_present = true;
605 }
606 }
607
608 memset (ep, 0, sizeof (expressionS));
609
610 save_in = input_line_pointer;
611 input_line_pointer = *str;
612 in_aarch64_get_expression = true;
613 if (defer_resolution)
614 seg = deferred_expression (ep);
615 else
616 seg = expression (ep);
617 in_aarch64_get_expression = false;
618
619 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
620 {
621 /* We found a bad expression in md_operand(). */
622 *str = input_line_pointer;
623 input_line_pointer = save_in;
624 if (prefix_present && ! error_p ())
625 set_fatal_syntax_error (_("bad expression"));
626 else
627 set_first_syntax_error (_("bad expression"));
628 return false;
629 }
630
631 #ifdef OBJ_AOUT
632 if (seg != absolute_section
633 && seg != text_section
634 && seg != data_section
635 && seg != bss_section
636 && seg != undefined_section)
637 {
638 set_syntax_error (_("bad segment"));
639 *str = input_line_pointer;
640 input_line_pointer = save_in;
641 return false;
642 }
643 #else
644 (void) seg;
645 #endif
646
647 *str = input_line_pointer;
648 input_line_pointer = save_in;
649 return true;
650 }
651
652 /* Turn a string in input_line_pointer into a floating point constant
653 of type TYPE, and store the appropriate bytes in *LITP. The number
654 of LITTLENUMS emitted is stored in *SIZEP. An error message is
655 returned, or NULL on OK. */
656
657 const char *
658 md_atof (int type, char *litP, int *sizeP)
659 {
660 return ieee_md_atof (type, litP, sizeP, target_big_endian);
661 }
662
663 /* We handle all bad expressions here, so that we can report the faulty
664 instruction in the error message. */
665 void
666 md_operand (expressionS * exp)
667 {
668 if (in_aarch64_get_expression)
669 exp->X_op = O_illegal;
670 }
671
672 /* Immediate values. */
673
674 /* Errors may be set multiple times during parsing or bit encoding
675 (particularly in the Neon bits), but usually the earliest error which is set
676 will be the most meaningful. Avoid overwriting it with later (cascading)
677 errors by calling this function. */
678
679 static void
680 first_error (const char *error)
681 {
682 if (! error_p ())
683 set_syntax_error (error);
684 }
685
686 /* Similar to first_error, but this function accepts formatted error
687 message. */
688 static void
689 first_error_fmt (const char *format, ...)
690 {
691 va_list args;
692 enum
693 { size = 100 };
694 /* N.B. this single buffer will not cause error messages for different
695 instructions to pollute each other; this is because at the end of
696 processing of each assembly line, error message if any will be
697 collected by as_bad. */
698 static char buffer[size];
699
700 if (! error_p ())
701 {
702 int ret ATTRIBUTE_UNUSED;
703 va_start (args, format);
704 ret = vsnprintf (buffer, size, format, args);
705 know (ret <= size - 1 && ret >= 0);
706 va_end (args);
707 set_syntax_error (buffer);
708 }
709 }
710
711 /* Register parsing. */
712
713 /* Generic register parser which is called by other specialized
714 register parsers.
715 CCP points to what should be the beginning of a register name.
716 If it is indeed a valid register name, advance CCP over it and
717 return the reg_entry structure; otherwise return NULL.
718 It does not issue diagnostics. */
719
720 static reg_entry *
721 parse_reg (char **ccp)
722 {
723 char *start = *ccp;
724 char *p;
725 reg_entry *reg;
726
727 #ifdef REGISTER_PREFIX
728 if (*start != REGISTER_PREFIX)
729 return NULL;
730 start++;
731 #endif
732
733 p = start;
734 if (!ISALPHA (*p) || !is_name_beginner (*p))
735 return NULL;
736
737 do
738 p++;
739 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
740
741 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
742
743 if (!reg)
744 return NULL;
745
746 *ccp = p;
747 return reg;
748 }
749
750 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
751 return FALSE. */
752 static bool
753 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
754 {
755 return (reg_type_masks[type] & (1 << reg->type)) != 0;
756 }
757
758 /* Try to parse a base or offset register. Allow SVE base and offset
759 registers if REG_TYPE includes SVE registers. Return the register
760 entry on success, setting *QUALIFIER to the register qualifier.
761 Return null otherwise.
762
763 Note that this function does not issue any diagnostics. */
764
765 static const reg_entry *
766 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
767 aarch64_opnd_qualifier_t *qualifier)
768 {
769 char *str = *ccp;
770 const reg_entry *reg = parse_reg (&str);
771
772 if (reg == NULL)
773 return NULL;
774
775 switch (reg->type)
776 {
777 case REG_TYPE_R_32:
778 case REG_TYPE_SP_32:
779 case REG_TYPE_Z_32:
780 *qualifier = AARCH64_OPND_QLF_W;
781 break;
782
783 case REG_TYPE_R_64:
784 case REG_TYPE_SP_64:
785 case REG_TYPE_Z_64:
786 *qualifier = AARCH64_OPND_QLF_X;
787 break;
788
789 case REG_TYPE_ZN:
790 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
791 || str[0] != '.')
792 return NULL;
793 switch (TOLOWER (str[1]))
794 {
795 case 's':
796 *qualifier = AARCH64_OPND_QLF_S_S;
797 break;
798 case 'd':
799 *qualifier = AARCH64_OPND_QLF_S_D;
800 break;
801 default:
802 return NULL;
803 }
804 str += 2;
805 break;
806
807 default:
808 return NULL;
809 }
810
811 *ccp = str;
812
813 return reg;
814 }
815
816 /* Try to parse a base or offset register. Return the register entry
817 on success, setting *QUALIFIER to the register qualifier. Return null
818 otherwise.
819
820 Note that this function does not issue any diagnostics. */
821
822 static const reg_entry *
823 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
824 {
825 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
826 }
827
828 /* Parse the qualifier of a vector register or vector element of type
829 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
830 succeeds; otherwise return FALSE.
831
832 Accept only one occurrence of:
833 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
834 b h s d q */
835 static bool
836 parse_vector_type_for_operand (aarch64_reg_type reg_type,
837 struct vector_type_el *parsed_type, char **str)
838 {
839 char *ptr = *str;
840 unsigned width;
841 unsigned element_size;
842 enum vector_el_type type;
843
844 /* skip '.' */
845 gas_assert (*ptr == '.');
846 ptr++;
847
848 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
849 {
850 width = 0;
851 goto elt_size;
852 }
853 width = strtoul (ptr, &ptr, 10);
854 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
855 {
856 first_error_fmt (_("bad size %d in vector width specifier"), width);
857 return false;
858 }
859
860 elt_size:
861 switch (TOLOWER (*ptr))
862 {
863 case 'b':
864 type = NT_b;
865 element_size = 8;
866 break;
867 case 'h':
868 type = NT_h;
869 element_size = 16;
870 break;
871 case 's':
872 type = NT_s;
873 element_size = 32;
874 break;
875 case 'd':
876 type = NT_d;
877 element_size = 64;
878 break;
879 case 'q':
880 if (reg_type == REG_TYPE_ZN || width == 1)
881 {
882 type = NT_q;
883 element_size = 128;
884 break;
885 }
886 /* fall through. */
887 default:
888 if (*ptr != '\0')
889 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
890 else
891 first_error (_("missing element size"));
892 return false;
893 }
894 if (width != 0 && width * element_size != 64
895 && width * element_size != 128
896 && !(width == 2 && element_size == 16)
897 && !(width == 4 && element_size == 8))
898 {
899 first_error_fmt (_
900 ("invalid element size %d and vector size combination %c"),
901 width, *ptr);
902 return false;
903 }
904 ptr++;
905
906 parsed_type->type = type;
907 parsed_type->width = width;
908
909 *str = ptr;
910
911 return true;
912 }
913
914 /* *STR contains an SVE zero/merge predication suffix. Parse it into
915 *PARSED_TYPE and point *STR at the end of the suffix. */
916
917 static bool
918 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
919 {
920 char *ptr = *str;
921
922 /* Skip '/'. */
923 gas_assert (*ptr == '/');
924 ptr++;
925 switch (TOLOWER (*ptr))
926 {
927 case 'z':
928 parsed_type->type = NT_zero;
929 break;
930 case 'm':
931 parsed_type->type = NT_merge;
932 break;
933 default:
934 if (*ptr != '\0' && *ptr != ',')
935 first_error_fmt (_("unexpected character `%c' in predication type"),
936 *ptr);
937 else
938 first_error (_("missing predication type"));
939 return false;
940 }
941 parsed_type->width = 0;
942 *str = ptr + 1;
943 return true;
944 }
945
946 /* Parse a register of the type TYPE.
947
948 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
949 name or the parsed register is not of TYPE.
950
951 Otherwise return the register number, and optionally fill in the actual
952 type of the register in *RTYPE when multiple alternatives were given, and
953 return the register shape and element index information in *TYPEINFO.
954
955 IN_REG_LIST should be set with TRUE if the caller is parsing a register
956 list. */
957
958 static int
959 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
960 struct vector_type_el *typeinfo, bool in_reg_list)
961 {
962 char *str = *ccp;
963 const reg_entry *reg = parse_reg (&str);
964 struct vector_type_el atype;
965 struct vector_type_el parsetype;
966 bool is_typed_vecreg = false;
967
968 atype.defined = 0;
969 atype.type = NT_invtype;
970 atype.width = -1;
971 atype.index = 0;
972
973 if (reg == NULL)
974 {
975 if (typeinfo)
976 *typeinfo = atype;
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980
981 if (! aarch64_check_reg_type (reg, type))
982 {
983 DEBUG_TRACE ("reg type check failed");
984 set_default_error ();
985 return PARSE_FAIL;
986 }
987 type = reg->type;
988
989 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
990 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
991 {
992 if (*str == '.')
993 {
994 if (!parse_vector_type_for_operand (type, &parsetype, &str))
995 return PARSE_FAIL;
996 }
997 else
998 {
999 if (!parse_predication_for_operand (&parsetype, &str))
1000 return PARSE_FAIL;
1001 }
1002
1003 /* Register if of the form Vn.[bhsdq]. */
1004 is_typed_vecreg = true;
1005
1006 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1007 {
1008 /* The width is always variable; we don't allow an integer width
1009 to be specified. */
1010 gas_assert (parsetype.width == 0);
1011 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1012 }
1013 else if (parsetype.width == 0)
1014 /* Expect index. In the new scheme we cannot have
1015 Vn.[bhsdq] represent a scalar. Therefore any
1016 Vn.[bhsdq] should have an index following it.
1017 Except in reglists of course. */
1018 atype.defined |= NTA_HASINDEX;
1019 else
1020 atype.defined |= NTA_HASTYPE;
1021
1022 atype.type = parsetype.type;
1023 atype.width = parsetype.width;
1024 }
1025
1026 if (skip_past_char (&str, '['))
1027 {
1028 expressionS exp;
1029
1030 /* Reject Sn[index] syntax. */
1031 if (!is_typed_vecreg)
1032 {
1033 first_error (_("this type of register can't be indexed"));
1034 return PARSE_FAIL;
1035 }
1036
1037 if (in_reg_list)
1038 {
1039 first_error (_("index not allowed inside register list"));
1040 return PARSE_FAIL;
1041 }
1042
1043 atype.defined |= NTA_HASINDEX;
1044
1045 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1046 NORMAL_RESOLUTION);
1047
1048 if (exp.X_op != O_constant)
1049 {
1050 first_error (_("constant expression required"));
1051 return PARSE_FAIL;
1052 }
1053
1054 if (! skip_past_char (&str, ']'))
1055 return PARSE_FAIL;
1056
1057 atype.index = exp.X_add_number;
1058 }
1059 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1060 {
1061 /* Indexed vector register expected. */
1062 first_error (_("indexed vector register expected"));
1063 return PARSE_FAIL;
1064 }
1065
1066 /* A vector reg Vn should be typed or indexed. */
1067 if (type == REG_TYPE_VN && atype.defined == 0)
1068 {
1069 first_error (_("invalid use of vector register"));
1070 }
1071
1072 if (typeinfo)
1073 *typeinfo = atype;
1074
1075 if (rtype)
1076 *rtype = type;
1077
1078 *ccp = str;
1079
1080 return reg->number;
1081 }
1082
1083 /* Parse register.
1084
1085 Return the register number on success; return PARSE_FAIL otherwise.
1086
1087 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1088 the register (e.g. NEON double or quad reg when either has been requested).
1089
1090 If this is a NEON vector register with additional type information, fill
1091 in the struct pointed to by VECTYPE (if non-NULL).
1092
1093 This parser does not handle register list. */
1094
1095 static int
1096 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1097 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1098 {
1099 struct vector_type_el atype;
1100 char *str = *ccp;
1101 int reg = parse_typed_reg (&str, type, rtype, &atype,
1102 /*in_reg_list= */ false);
1103
1104 if (reg == PARSE_FAIL)
1105 return PARSE_FAIL;
1106
1107 if (vectype)
1108 *vectype = atype;
1109
1110 *ccp = str;
1111
1112 return reg;
1113 }
1114
1115 static inline bool
1116 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1117 {
1118 return
1119 e1.type == e2.type
1120 && e1.defined == e2.defined
1121 && e1.width == e2.width && e1.index == e2.index;
1122 }
1123
1124 /* This function parses a list of vector registers of type TYPE.
1125 On success, it returns the parsed register list information in the
1126 following encoded format:
1127
1128 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1129 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1130
1131 The information of the register shape and/or index is returned in
1132 *VECTYPE.
1133
1134 It returns PARSE_FAIL if the register list is invalid.
1135
1136 The list contains one to four registers.
1137 Each register can be one of:
1138 <Vt>.<T>[<index>]
1139 <Vt>.<T>
1140 All <T> should be identical.
1141 All <index> should be identical.
1142 There are restrictions on <Vt> numbers which are checked later
1143 (by reg_list_valid_p). */
1144
1145 static int
1146 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1147 struct vector_type_el *vectype)
1148 {
1149 char *str = *ccp;
1150 int nb_regs;
1151 struct vector_type_el typeinfo, typeinfo_first;
1152 int val, val_range;
1153 int in_range;
1154 int ret_val;
1155 int i;
1156 bool error = false;
1157 bool expect_index = false;
1158
1159 if (*str != '{')
1160 {
1161 set_syntax_error (_("expecting {"));
1162 return PARSE_FAIL;
1163 }
1164 str++;
1165
1166 nb_regs = 0;
1167 typeinfo_first.defined = 0;
1168 typeinfo_first.type = NT_invtype;
1169 typeinfo_first.width = -1;
1170 typeinfo_first.index = 0;
1171 ret_val = 0;
1172 val = -1;
1173 val_range = -1;
1174 in_range = 0;
1175 do
1176 {
1177 if (in_range)
1178 {
1179 str++; /* skip over '-' */
1180 val_range = val;
1181 }
1182 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1183 /*in_reg_list= */ true);
1184 if (val == PARSE_FAIL)
1185 {
1186 set_first_syntax_error (_("invalid vector register in list"));
1187 error = true;
1188 continue;
1189 }
1190 /* reject [bhsd]n */
1191 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1192 {
1193 set_first_syntax_error (_("invalid scalar register in list"));
1194 error = true;
1195 continue;
1196 }
1197
1198 if (typeinfo.defined & NTA_HASINDEX)
1199 expect_index = true;
1200
1201 if (in_range)
1202 {
1203 if (val < val_range)
1204 {
1205 set_first_syntax_error
1206 (_("invalid range in vector register list"));
1207 error = true;
1208 }
1209 val_range++;
1210 }
1211 else
1212 {
1213 val_range = val;
1214 if (nb_regs == 0)
1215 typeinfo_first = typeinfo;
1216 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1217 {
1218 set_first_syntax_error
1219 (_("type mismatch in vector register list"));
1220 error = true;
1221 }
1222 }
1223 if (! error)
1224 for (i = val_range; i <= val; i++)
1225 {
1226 ret_val |= i << (5 * nb_regs);
1227 nb_regs++;
1228 }
1229 in_range = 0;
1230 }
1231 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1232
1233 skip_whitespace (str);
1234 if (*str != '}')
1235 {
1236 set_first_syntax_error (_("end of vector register list not found"));
1237 error = true;
1238 }
1239 str++;
1240
1241 skip_whitespace (str);
1242
1243 if (expect_index)
1244 {
1245 if (skip_past_char (&str, '['))
1246 {
1247 expressionS exp;
1248
1249 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1250 NORMAL_RESOLUTION);
1251 if (exp.X_op != O_constant)
1252 {
1253 set_first_syntax_error (_("constant expression required."));
1254 error = true;
1255 }
1256 if (! skip_past_char (&str, ']'))
1257 error = true;
1258 else
1259 typeinfo_first.index = exp.X_add_number;
1260 }
1261 else
1262 {
1263 set_first_syntax_error (_("expected index"));
1264 error = true;
1265 }
1266 }
1267
1268 if (nb_regs > 4)
1269 {
1270 set_first_syntax_error (_("too many registers in vector register list"));
1271 error = true;
1272 }
1273 else if (nb_regs == 0)
1274 {
1275 set_first_syntax_error (_("empty vector register list"));
1276 error = true;
1277 }
1278
1279 *ccp = str;
1280 if (! error)
1281 *vectype = typeinfo_first;
1282
1283 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1284 }
1285
1286 /* Directives: register aliases. */
1287
1288 static reg_entry *
1289 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1290 {
1291 reg_entry *new;
1292 const char *name;
1293
1294 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1295 {
1296 if (new->builtin)
1297 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1298 str);
1299
1300 /* Only warn about a redefinition if it's not defined as the
1301 same register. */
1302 else if (new->number != number || new->type != type)
1303 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1304
1305 return NULL;
1306 }
1307
1308 name = xstrdup (str);
1309 new = XNEW (reg_entry);
1310
1311 new->name = name;
1312 new->number = number;
1313 new->type = type;
1314 new->builtin = false;
1315
1316 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1317
1318 return new;
1319 }
1320
1321 /* Look for the .req directive. This is of the form:
1322
1323 new_register_name .req existing_register_name
1324
1325 If we find one, or if it looks sufficiently like one that we want to
1326 handle any error here, return TRUE. Otherwise return FALSE. */
1327
1328 static bool
1329 create_register_alias (char *newname, char *p)
1330 {
1331 const reg_entry *old;
1332 char *oldname, *nbuf;
1333 size_t nlen;
1334
1335 /* The input scrubber ensures that whitespace after the mnemonic is
1336 collapsed to single spaces. */
1337 oldname = p;
1338 if (!startswith (oldname, " .req "))
1339 return false;
1340
1341 oldname += 6;
1342 if (*oldname == '\0')
1343 return false;
1344
1345 old = str_hash_find (aarch64_reg_hsh, oldname);
1346 if (!old)
1347 {
1348 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1349 return true;
1350 }
1351
1352 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1353 the desired alias name, and p points to its end. If not, then
1354 the desired alias name is in the global original_case_string. */
1355 #ifdef TC_CASE_SENSITIVE
1356 nlen = p - newname;
1357 #else
1358 newname = original_case_string;
1359 nlen = strlen (newname);
1360 #endif
1361
1362 nbuf = xmemdup0 (newname, nlen);
1363
1364 /* Create aliases under the new name as stated; an all-lowercase
1365 version of the new name; and an all-uppercase version of the new
1366 name. */
1367 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1368 {
1369 for (p = nbuf; *p; p++)
1370 *p = TOUPPER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 {
1374 /* If this attempt to create an additional alias fails, do not bother
1375 trying to create the all-lower case alias. We will fail and issue
1376 a second, duplicate error message. This situation arises when the
1377 programmer does something like:
1378 foo .req r0
1379 Foo .req r1
1380 The second .req creates the "Foo" alias but then fails to create
1381 the artificial FOO alias because it has already been created by the
1382 first .req. */
1383 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1384 {
1385 free (nbuf);
1386 return true;
1387 }
1388 }
1389
1390 for (p = nbuf; *p; p++)
1391 *p = TOLOWER (*p);
1392
1393 if (strncmp (nbuf, newname, nlen))
1394 insert_reg_alias (nbuf, old->number, old->type);
1395 }
1396
1397 free (nbuf);
1398 return true;
1399 }
1400
1401 /* Should never be called, as .req goes between the alias and the
1402 register name, not at the beginning of the line. */
1403 static void
1404 s_req (int a ATTRIBUTE_UNUSED)
1405 {
1406 as_bad (_("invalid syntax for .req directive"));
1407 }
1408
1409 /* The .unreq directive deletes an alias which was previously defined
1410 by .req. For example:
1411
1412 my_alias .req r11
1413 .unreq my_alias */
1414
1415 static void
1416 s_unreq (int a ATTRIBUTE_UNUSED)
1417 {
1418 char *name;
1419 char saved_char;
1420
1421 name = input_line_pointer;
1422
1423 while (*input_line_pointer != 0
1424 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1425 ++input_line_pointer;
1426
1427 saved_char = *input_line_pointer;
1428 *input_line_pointer = 0;
1429
1430 if (!*name)
1431 as_bad (_("invalid syntax for .unreq directive"));
1432 else
1433 {
1434 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1435
1436 if (!reg)
1437 as_bad (_("unknown register alias '%s'"), name);
1438 else if (reg->builtin)
1439 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1440 name);
1441 else
1442 {
1443 char *p;
1444 char *nbuf;
1445
1446 str_hash_delete (aarch64_reg_hsh, name);
1447 free ((char *) reg->name);
1448 free (reg);
1449
1450 /* Also locate the all upper case and all lower case versions.
1451 Do not complain if we cannot find one or the other as it
1452 was probably deleted above. */
1453
1454 nbuf = strdup (name);
1455 for (p = nbuf; *p; p++)
1456 *p = TOUPPER (*p);
1457 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1458 if (reg)
1459 {
1460 str_hash_delete (aarch64_reg_hsh, nbuf);
1461 free ((char *) reg->name);
1462 free (reg);
1463 }
1464
1465 for (p = nbuf; *p; p++)
1466 *p = TOLOWER (*p);
1467 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1468 if (reg)
1469 {
1470 str_hash_delete (aarch64_reg_hsh, nbuf);
1471 free ((char *) reg->name);
1472 free (reg);
1473 }
1474
1475 free (nbuf);
1476 }
1477 }
1478
1479 *input_line_pointer = saved_char;
1480 demand_empty_rest_of_line ();
1481 }
1482
1483 /* Directives: Instruction set selection. */
1484
1485 #ifdef OBJ_ELF
1486 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1487 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1488 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1489 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1490
1491 /* Create a new mapping symbol for the transition to STATE. */
1492
1493 static void
1494 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1495 {
1496 symbolS *symbolP;
1497 const char *symname;
1498 int type;
1499
1500 switch (state)
1501 {
1502 case MAP_DATA:
1503 symname = "$d";
1504 type = BSF_NO_FLAGS;
1505 break;
1506 case MAP_INSN:
1507 symname = "$x";
1508 type = BSF_NO_FLAGS;
1509 break;
1510 default:
1511 abort ();
1512 }
1513
1514 symbolP = symbol_new (symname, now_seg, frag, value);
1515 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1516
1517 /* Save the mapping symbols for future reference. Also check that
1518 we do not place two mapping symbols at the same offset within a
1519 frag. We'll handle overlap between frags in
1520 check_mapping_symbols.
1521
1522 If .fill or other data filling directive generates zero sized data,
1523 the mapping symbol for the following code will have the same value
1524 as the one generated for the data filling directive. In this case,
1525 we replace the old symbol with the new one at the same address. */
1526 if (value == 0)
1527 {
1528 if (frag->tc_frag_data.first_map != NULL)
1529 {
1530 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1531 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1532 &symbol_lastP);
1533 }
1534 frag->tc_frag_data.first_map = symbolP;
1535 }
1536 if (frag->tc_frag_data.last_map != NULL)
1537 {
1538 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1539 S_GET_VALUE (symbolP));
1540 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1541 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1542 &symbol_lastP);
1543 }
1544 frag->tc_frag_data.last_map = symbolP;
1545 }
1546
1547 /* We must sometimes convert a region marked as code to data during
1548 code alignment, if an odd number of bytes have to be padded. The
1549 code mapping symbol is pushed to an aligned address. */
1550
1551 static void
1552 insert_data_mapping_symbol (enum mstate state,
1553 valueT value, fragS * frag, offsetT bytes)
1554 {
1555 /* If there was already a mapping symbol, remove it. */
1556 if (frag->tc_frag_data.last_map != NULL
1557 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1558 frag->fr_address + value)
1559 {
1560 symbolS *symp = frag->tc_frag_data.last_map;
1561
1562 if (value == 0)
1563 {
1564 know (frag->tc_frag_data.first_map == symp);
1565 frag->tc_frag_data.first_map = NULL;
1566 }
1567 frag->tc_frag_data.last_map = NULL;
1568 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1569 }
1570
1571 make_mapping_symbol (MAP_DATA, value, frag);
1572 make_mapping_symbol (state, value + bytes, frag);
1573 }
1574
1575 static void mapping_state_2 (enum mstate state, int max_chars);
1576
1577 /* Set the mapping state to STATE. Only call this when about to
1578 emit some STATE bytes to the file. */
1579
1580 void
1581 mapping_state (enum mstate state)
1582 {
1583 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1584
1585 if (state == MAP_INSN)
1586 /* AArch64 instructions require 4-byte alignment. When emitting
1587 instructions into any section, record the appropriate section
1588 alignment. */
1589 record_alignment (now_seg, 2);
1590
1591 if (mapstate == state)
1592 /* The mapping symbol has already been emitted.
1593 There is nothing else to do. */
1594 return;
1595
1596 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1597 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1598 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1599 evaluated later in the next else. */
1600 return;
1601 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1602 {
1603 /* Only add the symbol if the offset is > 0:
1604 if we're at the first frag, check it's size > 0;
1605 if we're not at the first frag, then for sure
1606 the offset is > 0. */
1607 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1608 const int add_symbol = (frag_now != frag_first)
1609 || (frag_now_fix () > 0);
1610
1611 if (add_symbol)
1612 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1613 }
1614 #undef TRANSITION
1615
1616 mapping_state_2 (state, 0);
1617 }
1618
1619 /* Same as mapping_state, but MAX_CHARS bytes have already been
1620 allocated. Put the mapping symbol that far back. */
1621
1622 static void
1623 mapping_state_2 (enum mstate state, int max_chars)
1624 {
1625 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1626
1627 if (!SEG_NORMAL (now_seg))
1628 return;
1629
1630 if (mapstate == state)
1631 /* The mapping symbol has already been emitted.
1632 There is nothing else to do. */
1633 return;
1634
1635 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1636 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1637 }
1638 #else
1639 #define mapping_state(x) /* nothing */
1640 #define mapping_state_2(x, y) /* nothing */
1641 #endif
1642
1643 /* Directives: sectioning and alignment. */
1644
1645 static void
1646 s_bss (int ignore ATTRIBUTE_UNUSED)
1647 {
1648 /* We don't support putting frags in the BSS segment, we fake it by
1649 marking in_bss, then looking at s_skip for clues. */
1650 subseg_set (bss_section, 0);
1651 demand_empty_rest_of_line ();
1652 mapping_state (MAP_DATA);
1653 }
1654
1655 static void
1656 s_even (int ignore ATTRIBUTE_UNUSED)
1657 {
1658 /* Never make frag if expect extra pass. */
1659 if (!need_pass_2)
1660 frag_align (1, 0, 0);
1661
1662 record_alignment (now_seg, 1);
1663
1664 demand_empty_rest_of_line ();
1665 }
1666
1667 /* Directives: Literal pools. */
1668
1669 static literal_pool *
1670 find_literal_pool (int size)
1671 {
1672 literal_pool *pool;
1673
1674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1675 {
1676 if (pool->section == now_seg
1677 && pool->sub_section == now_subseg && pool->size == size)
1678 break;
1679 }
1680
1681 return pool;
1682 }
1683
1684 static literal_pool *
1685 find_or_make_literal_pool (int size)
1686 {
1687 /* Next literal pool ID number. */
1688 static unsigned int latest_pool_num = 1;
1689 literal_pool *pool;
1690
1691 pool = find_literal_pool (size);
1692
1693 if (pool == NULL)
1694 {
1695 /* Create a new pool. */
1696 pool = XNEW (literal_pool);
1697 if (!pool)
1698 return NULL;
1699
1700 /* Currently we always put the literal pool in the current text
1701 section. If we were generating "small" model code where we
1702 knew that all code and initialised data was within 1MB then
1703 we could output literals to mergeable, read-only data
1704 sections. */
1705
1706 pool->next_free_entry = 0;
1707 pool->section = now_seg;
1708 pool->sub_section = now_subseg;
1709 pool->size = size;
1710 pool->next = list_of_pools;
1711 pool->symbol = NULL;
1712
1713 /* Add it to the list. */
1714 list_of_pools = pool;
1715 }
1716
1717 /* New pools, and emptied pools, will have a NULL symbol. */
1718 if (pool->symbol == NULL)
1719 {
1720 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1721 &zero_address_frag, 0);
1722 pool->id = latest_pool_num++;
1723 }
1724
1725 /* Done. */
1726 return pool;
1727 }
1728
1729 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1730 Return TRUE on success, otherwise return FALSE. */
1731 static bool
1732 add_to_lit_pool (expressionS *exp, int size)
1733 {
1734 literal_pool *pool;
1735 unsigned int entry;
1736
1737 pool = find_or_make_literal_pool (size);
1738
1739 /* Check if this literal value is already in the pool. */
1740 for (entry = 0; entry < pool->next_free_entry; entry++)
1741 {
1742 expressionS * litexp = & pool->literals[entry].exp;
1743
1744 if ((litexp->X_op == exp->X_op)
1745 && (exp->X_op == O_constant)
1746 && (litexp->X_add_number == exp->X_add_number)
1747 && (litexp->X_unsigned == exp->X_unsigned))
1748 break;
1749
1750 if ((litexp->X_op == exp->X_op)
1751 && (exp->X_op == O_symbol)
1752 && (litexp->X_add_number == exp->X_add_number)
1753 && (litexp->X_add_symbol == exp->X_add_symbol)
1754 && (litexp->X_op_symbol == exp->X_op_symbol))
1755 break;
1756 }
1757
1758 /* Do we need to create a new entry? */
1759 if (entry == pool->next_free_entry)
1760 {
1761 if (entry >= MAX_LITERAL_POOL_SIZE)
1762 {
1763 set_syntax_error (_("literal pool overflow"));
1764 return false;
1765 }
1766
1767 pool->literals[entry].exp = *exp;
1768 pool->next_free_entry += 1;
1769 if (exp->X_op == O_big)
1770 {
1771 /* PR 16688: Bignums are held in a single global array. We must
1772 copy and preserve that value now, before it is overwritten. */
1773 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1774 exp->X_add_number);
1775 memcpy (pool->literals[entry].bignum, generic_bignum,
1776 CHARS_PER_LITTLENUM * exp->X_add_number);
1777 }
1778 else
1779 pool->literals[entry].bignum = NULL;
1780 }
1781
1782 exp->X_op = O_symbol;
1783 exp->X_add_number = ((int) entry) * size;
1784 exp->X_add_symbol = pool->symbol;
1785
1786 return true;
1787 }
1788
1789 /* Can't use symbol_new here, so have to create a symbol and then at
1790 a later date assign it a value. That's what these functions do. */
1791
1792 static void
1793 symbol_locate (symbolS * symbolP,
1794 const char *name,/* It is copied, the caller can modify. */
1795 segT segment, /* Segment identifier (SEG_<something>). */
1796 valueT valu, /* Symbol value. */
1797 fragS * frag) /* Associated fragment. */
1798 {
1799 size_t name_length;
1800 char *preserved_copy_of_name;
1801
1802 name_length = strlen (name) + 1; /* +1 for \0. */
1803 obstack_grow (&notes, name, name_length);
1804 preserved_copy_of_name = obstack_finish (&notes);
1805
1806 #ifdef tc_canonicalize_symbol_name
1807 preserved_copy_of_name =
1808 tc_canonicalize_symbol_name (preserved_copy_of_name);
1809 #endif
1810
1811 S_SET_NAME (symbolP, preserved_copy_of_name);
1812
1813 S_SET_SEGMENT (symbolP, segment);
1814 S_SET_VALUE (symbolP, valu);
1815 symbol_clear_list_pointers (symbolP);
1816
1817 symbol_set_frag (symbolP, frag);
1818
1819 /* Link to end of symbol chain. */
1820 {
1821 extern int symbol_table_frozen;
1822
1823 if (symbol_table_frozen)
1824 abort ();
1825 }
1826
1827 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1828
1829 obj_symbol_new_hook (symbolP);
1830
1831 #ifdef tc_symbol_new_hook
1832 tc_symbol_new_hook (symbolP);
1833 #endif
1834
1835 #ifdef DEBUG_SYMS
1836 verify_symbol_chain (symbol_rootP, symbol_lastP);
1837 #endif /* DEBUG_SYMS */
1838 }
1839
1840
1841 static void
1842 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1843 {
1844 unsigned int entry;
1845 literal_pool *pool;
1846 char sym_name[20];
1847 int align;
1848
1849 for (align = 2; align <= 4; align++)
1850 {
1851 int size = 1 << align;
1852
1853 pool = find_literal_pool (size);
1854 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1855 continue;
1856
1857 /* Align pool as you have word accesses.
1858 Only make a frag if we have to. */
1859 if (!need_pass_2)
1860 frag_align (align, 0, 0);
1861
1862 mapping_state (MAP_DATA);
1863
1864 record_alignment (now_seg, align);
1865
1866 sprintf (sym_name, "$$lit_\002%x", pool->id);
1867
1868 symbol_locate (pool->symbol, sym_name, now_seg,
1869 (valueT) frag_now_fix (), frag_now);
1870 symbol_table_insert (pool->symbol);
1871
1872 for (entry = 0; entry < pool->next_free_entry; entry++)
1873 {
1874 expressionS * exp = & pool->literals[entry].exp;
1875
1876 if (exp->X_op == O_big)
1877 {
1878 /* PR 16688: Restore the global bignum value. */
1879 gas_assert (pool->literals[entry].bignum != NULL);
1880 memcpy (generic_bignum, pool->literals[entry].bignum,
1881 CHARS_PER_LITTLENUM * exp->X_add_number);
1882 }
1883
1884 /* First output the expression in the instruction to the pool. */
1885 emit_expr (exp, size); /* .word|.xword */
1886
1887 if (exp->X_op == O_big)
1888 {
1889 free (pool->literals[entry].bignum);
1890 pool->literals[entry].bignum = NULL;
1891 }
1892 }
1893
1894 /* Mark the pool as empty. */
1895 pool->next_free_entry = 0;
1896 pool->symbol = NULL;
1897 }
1898 }
1899
1900 #ifdef OBJ_ELF
1901 /* Forward declarations for functions below, in the MD interface
1902 section. */
1903 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1904 static struct reloc_table_entry * find_reloc_table_entry (char **);
1905
1906 /* Directives: Data. */
1907 /* N.B. the support for relocation suffix in this directive needs to be
1908 implemented properly. */
1909
1910 static void
1911 s_aarch64_elf_cons (int nbytes)
1912 {
1913 expressionS exp;
1914
1915 #ifdef md_flush_pending_output
1916 md_flush_pending_output ();
1917 #endif
1918
1919 if (is_it_end_of_statement ())
1920 {
1921 demand_empty_rest_of_line ();
1922 return;
1923 }
1924
1925 #ifdef md_cons_align
1926 md_cons_align (nbytes);
1927 #endif
1928
1929 mapping_state (MAP_DATA);
1930 do
1931 {
1932 struct reloc_table_entry *reloc;
1933
1934 expression (&exp);
1935
1936 if (exp.X_op != O_symbol)
1937 emit_expr (&exp, (unsigned int) nbytes);
1938 else
1939 {
1940 skip_past_char (&input_line_pointer, '#');
1941 if (skip_past_char (&input_line_pointer, ':'))
1942 {
1943 reloc = find_reloc_table_entry (&input_line_pointer);
1944 if (reloc == NULL)
1945 as_bad (_("unrecognized relocation suffix"));
1946 else
1947 as_bad (_("unimplemented relocation suffix"));
1948 ignore_rest_of_line ();
1949 return;
1950 }
1951 else
1952 emit_expr (&exp, (unsigned int) nbytes);
1953 }
1954 }
1955 while (*input_line_pointer++ == ',');
1956
1957 /* Put terminator back into stream. */
1958 input_line_pointer--;
1959 demand_empty_rest_of_line ();
1960 }
1961
1962 /* Mark symbol that it follows a variant PCS convention. */
1963
1964 static void
1965 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1966 {
1967 char *name;
1968 char c;
1969 symbolS *sym;
1970 asymbol *bfdsym;
1971 elf_symbol_type *elfsym;
1972
1973 c = get_symbol_name (&name);
1974 if (!*name)
1975 as_bad (_("Missing symbol name in directive"));
1976 sym = symbol_find_or_make (name);
1977 restore_line_pointer (c);
1978 demand_empty_rest_of_line ();
1979 bfdsym = symbol_get_bfdsym (sym);
1980 elfsym = elf_symbol_from (bfdsym);
1981 gas_assert (elfsym);
1982 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1983 }
1984 #endif /* OBJ_ELF */
1985
1986 /* Output a 32-bit word, but mark as an instruction. */
1987
1988 static void
1989 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 expressionS exp;
1992
1993 #ifdef md_flush_pending_output
1994 md_flush_pending_output ();
1995 #endif
1996
1997 if (is_it_end_of_statement ())
1998 {
1999 demand_empty_rest_of_line ();
2000 return;
2001 }
2002
2003 /* Sections are assumed to start aligned. In executable section, there is no
2004 MAP_DATA symbol pending. So we only align the address during
2005 MAP_DATA --> MAP_INSN transition.
2006 For other sections, this is not guaranteed. */
2007 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2008 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2009 frag_align_code (2, 0);
2010
2011 #ifdef OBJ_ELF
2012 mapping_state (MAP_INSN);
2013 #endif
2014
2015 do
2016 {
2017 expression (&exp);
2018 if (exp.X_op != O_constant)
2019 {
2020 as_bad (_("constant expression required"));
2021 ignore_rest_of_line ();
2022 return;
2023 }
2024
2025 if (target_big_endian)
2026 {
2027 unsigned int val = exp.X_add_number;
2028 exp.X_add_number = SWAP_32 (val);
2029 }
2030 emit_expr (&exp, 4);
2031 }
2032 while (*input_line_pointer++ == ',');
2033
2034 /* Put terminator back into stream. */
2035 input_line_pointer--;
2036 demand_empty_rest_of_line ();
2037 }
2038
2039 static void
2040 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2041 {
2042 demand_empty_rest_of_line ();
2043 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2044 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2045 }
2046
2047 #ifdef OBJ_ELF
2048 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2049
2050 static void
2051 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2052 {
2053 expressionS exp;
2054
2055 expression (&exp);
2056 frag_grow (4);
2057 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2058 BFD_RELOC_AARCH64_TLSDESC_ADD);
2059
2060 demand_empty_rest_of_line ();
2061 }
2062
2063 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2064
2065 static void
2066 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2067 {
2068 expressionS exp;
2069
2070 /* Since we're just labelling the code, there's no need to define a
2071 mapping symbol. */
2072 expression (&exp);
2073 /* Make sure there is enough room in this frag for the following
2074 blr. This trick only works if the blr follows immediately after
2075 the .tlsdesc directive. */
2076 frag_grow (4);
2077 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2078 BFD_RELOC_AARCH64_TLSDESC_CALL);
2079
2080 demand_empty_rest_of_line ();
2081 }
2082
2083 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2084
2085 static void
2086 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2087 {
2088 expressionS exp;
2089
2090 expression (&exp);
2091 frag_grow (4);
2092 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2093 BFD_RELOC_AARCH64_TLSDESC_LDR);
2094
2095 demand_empty_rest_of_line ();
2096 }
2097 #endif /* OBJ_ELF */
2098
2099 static void s_aarch64_arch (int);
2100 static void s_aarch64_cpu (int);
2101 static void s_aarch64_arch_extension (int);
2102
2103 /* This table describes all the machine specific pseudo-ops the assembler
2104 has to support. The fields are:
2105 pseudo-op name without dot
2106 function to call to execute this pseudo-op
2107 Integer arg to pass to the function. */
2108
2109 const pseudo_typeS md_pseudo_table[] = {
2110 /* Never called because '.req' does not start a line. */
2111 {"req", s_req, 0},
2112 {"unreq", s_unreq, 0},
2113 {"bss", s_bss, 0},
2114 {"even", s_even, 0},
2115 {"ltorg", s_ltorg, 0},
2116 {"pool", s_ltorg, 0},
2117 {"cpu", s_aarch64_cpu, 0},
2118 {"arch", s_aarch64_arch, 0},
2119 {"arch_extension", s_aarch64_arch_extension, 0},
2120 {"inst", s_aarch64_inst, 0},
2121 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2122 #ifdef OBJ_ELF
2123 {"tlsdescadd", s_tlsdescadd, 0},
2124 {"tlsdesccall", s_tlsdesccall, 0},
2125 {"tlsdescldr", s_tlsdescldr, 0},
2126 {"word", s_aarch64_elf_cons, 4},
2127 {"long", s_aarch64_elf_cons, 4},
2128 {"xword", s_aarch64_elf_cons, 8},
2129 {"dword", s_aarch64_elf_cons, 8},
2130 {"variant_pcs", s_variant_pcs, 0},
2131 #endif
2132 {"float16", float_cons, 'h'},
2133 {"bfloat16", float_cons, 'b'},
2134 {0, 0, 0}
2135 };
2136 \f
2137
2138 /* Check whether STR points to a register name followed by a comma or the
2139 end of line; REG_TYPE indicates which register types are checked
2140 against. Return TRUE if STR is such a register name; otherwise return
2141 FALSE. The function does not intend to produce any diagnostics, but since
2142 the register parser aarch64_reg_parse, which is called by this function,
2143 does produce diagnostics, we call clear_error to clear any diagnostics
2144 that may be generated by aarch64_reg_parse.
2145 Also, the function returns FALSE directly if there is any user error
2146 present at the function entry. This prevents the existing diagnostics
2147 state from being spoiled.
2148 The function currently serves parse_constant_immediate and
2149 parse_big_immediate only. */
2150 static bool
2151 reg_name_p (char *str, aarch64_reg_type reg_type)
2152 {
2153 int reg;
2154
2155 /* Prevent the diagnostics state from being spoiled. */
2156 if (error_p ())
2157 return false;
2158
2159 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2160
2161 /* Clear the parsing error that may be set by the reg parser. */
2162 clear_error ();
2163
2164 if (reg == PARSE_FAIL)
2165 return false;
2166
2167 skip_whitespace (str);
2168 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2169 return true;
2170
2171 return false;
2172 }
2173
2174 /* Parser functions used exclusively in instruction operands. */
2175
2176 /* Parse an immediate expression which may not be constant.
2177
2178 To prevent the expression parser from pushing a register name
2179 into the symbol table as an undefined symbol, firstly a check is
2180 done to find out whether STR is a register of type REG_TYPE followed
2181 by a comma or the end of line. Return FALSE if STR is such a string. */
2182
2183 static bool
2184 parse_immediate_expression (char **str, expressionS *exp,
2185 aarch64_reg_type reg_type)
2186 {
2187 if (reg_name_p (*str, reg_type))
2188 {
2189 set_recoverable_error (_("immediate operand required"));
2190 return false;
2191 }
2192
2193 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2194 NORMAL_RESOLUTION);
2195
2196 if (exp->X_op == O_absent)
2197 {
2198 set_fatal_syntax_error (_("missing immediate expression"));
2199 return false;
2200 }
2201
2202 return true;
2203 }
2204
2205 /* Constant immediate-value read function for use in insn parsing.
2206 STR points to the beginning of the immediate (with the optional
2207 leading #); *VAL receives the value. REG_TYPE says which register
2208 names should be treated as registers rather than as symbolic immediates.
2209
2210 Return TRUE on success; otherwise return FALSE. */
2211
2212 static bool
2213 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2214 {
2215 expressionS exp;
2216
2217 if (! parse_immediate_expression (str, &exp, reg_type))
2218 return false;
2219
2220 if (exp.X_op != O_constant)
2221 {
2222 set_syntax_error (_("constant expression required"));
2223 return false;
2224 }
2225
2226 *val = exp.X_add_number;
2227 return true;
2228 }
2229
2230 static uint32_t
2231 encode_imm_float_bits (uint32_t imm)
2232 {
2233 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2234 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2235 }
2236
2237 /* Return TRUE if the single-precision floating-point value encoded in IMM
2238 can be expressed in the AArch64 8-bit signed floating-point format with
2239 3-bit exponent and normalized 4 bits of precision; in other words, the
2240 floating-point value must be expressable as
2241 (+/-) n / 16 * power (2, r)
2242 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2243
2244 static bool
2245 aarch64_imm_float_p (uint32_t imm)
2246 {
2247 /* If a single-precision floating-point value has the following bit
2248 pattern, it can be expressed in the AArch64 8-bit floating-point
2249 format:
2250
2251 3 32222222 2221111111111
2252 1 09876543 21098765432109876543210
2253 n Eeeeeexx xxxx0000000000000000000
2254
2255 where n, e and each x are either 0 or 1 independently, with
2256 E == ~ e. */
2257
2258 uint32_t pattern;
2259
2260 /* Prepare the pattern for 'Eeeeee'. */
2261 if (((imm >> 30) & 0x1) == 0)
2262 pattern = 0x3e000000;
2263 else
2264 pattern = 0x40000000;
2265
2266 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2267 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2268 }
2269
2270 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2271 as an IEEE float without any loss of precision. Store the value in
2272 *FPWORD if so. */
2273
2274 static bool
2275 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2276 {
2277 /* If a double-precision floating-point value has the following bit
2278 pattern, it can be expressed in a float:
2279
2280 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2281 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2282 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2283
2284 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2285 if Eeee_eeee != 1111_1111
2286
2287 where n, e, s and S are either 0 or 1 independently and where ~ is the
2288 inverse of E. */
2289
2290 uint32_t pattern;
2291 uint32_t high32 = imm >> 32;
2292 uint32_t low32 = imm;
2293
2294 /* Lower 29 bits need to be 0s. */
2295 if ((imm & 0x1fffffff) != 0)
2296 return false;
2297
2298 /* Prepare the pattern for 'Eeeeeeeee'. */
2299 if (((high32 >> 30) & 0x1) == 0)
2300 pattern = 0x38000000;
2301 else
2302 pattern = 0x40000000;
2303
2304 /* Check E~~~. */
2305 if ((high32 & 0x78000000) != pattern)
2306 return false;
2307
2308 /* Check Eeee_eeee != 1111_1111. */
2309 if ((high32 & 0x7ff00000) == 0x47f00000)
2310 return false;
2311
2312 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2313 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2314 | (low32 >> 29)); /* 3 S bits. */
2315 return true;
2316 }
2317
2318 /* Return true if we should treat OPERAND as a double-precision
2319 floating-point operand rather than a single-precision one. */
2320 static bool
2321 double_precision_operand_p (const aarch64_opnd_info *operand)
2322 {
2323 /* Check for unsuffixed SVE registers, which are allowed
2324 for LDR and STR but not in instructions that require an
2325 immediate. We get better error messages if we arbitrarily
2326 pick one size, parse the immediate normally, and then
2327 report the match failure in the normal way. */
2328 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2329 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2330 }
2331
2332 /* Parse a floating-point immediate. Return TRUE on success and return the
2333 value in *IMMED in the format of IEEE754 single-precision encoding.
2334 *CCP points to the start of the string; DP_P is TRUE when the immediate
2335 is expected to be in double-precision (N.B. this only matters when
2336 hexadecimal representation is involved). REG_TYPE says which register
2337 names should be treated as registers rather than as symbolic immediates.
2338
2339 This routine accepts any IEEE float; it is up to the callers to reject
2340 invalid ones. */
2341
2342 static bool
2343 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2344 aarch64_reg_type reg_type)
2345 {
2346 char *str = *ccp;
2347 char *fpnum;
2348 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2349 int64_t val = 0;
2350 unsigned fpword = 0;
2351 bool hex_p = false;
2352
2353 skip_past_char (&str, '#');
2354
2355 fpnum = str;
2356 skip_whitespace (fpnum);
2357
2358 if (startswith (fpnum, "0x"))
2359 {
2360 /* Support the hexadecimal representation of the IEEE754 encoding.
2361 Double-precision is expected when DP_P is TRUE, otherwise the
2362 representation should be in single-precision. */
2363 if (! parse_constant_immediate (&str, &val, reg_type))
2364 goto invalid_fp;
2365
2366 if (dp_p)
2367 {
2368 if (!can_convert_double_to_float (val, &fpword))
2369 goto invalid_fp;
2370 }
2371 else if ((uint64_t) val > 0xffffffff)
2372 goto invalid_fp;
2373 else
2374 fpword = val;
2375
2376 hex_p = true;
2377 }
2378 else if (reg_name_p (str, reg_type))
2379 {
2380 set_recoverable_error (_("immediate operand required"));
2381 return false;
2382 }
2383
2384 if (! hex_p)
2385 {
2386 int i;
2387
2388 if ((str = atof_ieee (str, 's', words)) == NULL)
2389 goto invalid_fp;
2390
2391 /* Our FP word must be 32 bits (single-precision FP). */
2392 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2393 {
2394 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2395 fpword |= words[i];
2396 }
2397 }
2398
2399 *immed = fpword;
2400 *ccp = str;
2401 return true;
2402
2403 invalid_fp:
2404 set_fatal_syntax_error (_("invalid floating-point constant"));
2405 return false;
2406 }
2407
2408 /* Less-generic immediate-value read function with the possibility of loading
2409 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2410 instructions.
2411
2412 To prevent the expression parser from pushing a register name into the
2413 symbol table as an undefined symbol, a check is firstly done to find
2414 out whether STR is a register of type REG_TYPE followed by a comma or
2415 the end of line. Return FALSE if STR is such a register. */
2416
2417 static bool
2418 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2419 {
2420 char *ptr = *str;
2421
2422 if (reg_name_p (ptr, reg_type))
2423 {
2424 set_syntax_error (_("immediate operand required"));
2425 return false;
2426 }
2427
2428 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2429 NORMAL_RESOLUTION);
2430
2431 if (inst.reloc.exp.X_op == O_constant)
2432 *imm = inst.reloc.exp.X_add_number;
2433
2434 *str = ptr;
2435
2436 return true;
2437 }
2438
2439 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2440 if NEED_LIBOPCODES is non-zero, the fixup will need
2441 assistance from the libopcodes. */
2442
2443 static inline void
2444 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2445 const aarch64_opnd_info *operand,
2446 int need_libopcodes_p)
2447 {
2448 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2449 reloc->opnd = operand->type;
2450 if (need_libopcodes_p)
2451 reloc->need_libopcodes_p = 1;
2452 };
2453
2454 /* Return TRUE if the instruction needs to be fixed up later internally by
2455 the GAS; otherwise return FALSE. */
2456
2457 static inline bool
2458 aarch64_gas_internal_fixup_p (void)
2459 {
2460 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2461 }
2462
2463 /* Assign the immediate value to the relevant field in *OPERAND if
2464 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2465 needs an internal fixup in a later stage.
2466 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2467 IMM.VALUE that may get assigned with the constant. */
2468 static inline void
2469 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2470 aarch64_opnd_info *operand,
2471 int addr_off_p,
2472 int need_libopcodes_p,
2473 int skip_p)
2474 {
2475 if (reloc->exp.X_op == O_constant)
2476 {
2477 if (addr_off_p)
2478 operand->addr.offset.imm = reloc->exp.X_add_number;
2479 else
2480 operand->imm.value = reloc->exp.X_add_number;
2481 reloc->type = BFD_RELOC_UNUSED;
2482 }
2483 else
2484 {
2485 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2486 /* Tell libopcodes to ignore this operand or not. This is helpful
2487 when one of the operands needs to be fixed up later but we need
2488 libopcodes to check the other operands. */
2489 operand->skip = skip_p;
2490 }
2491 }
2492
2493 /* Relocation modifiers. Each entry in the table contains the textual
2494 name for the relocation which may be placed before a symbol used as
2495 a load/store offset, or add immediate. It must be surrounded by a
2496 leading and trailing colon, for example:
2497
2498 ldr x0, [x1, #:rello:varsym]
2499 add x0, x1, #:rello:varsym */
2500
2501 struct reloc_table_entry
2502 {
2503 const char *name;
2504 int pc_rel;
2505 bfd_reloc_code_real_type adr_type;
2506 bfd_reloc_code_real_type adrp_type;
2507 bfd_reloc_code_real_type movw_type;
2508 bfd_reloc_code_real_type add_type;
2509 bfd_reloc_code_real_type ldst_type;
2510 bfd_reloc_code_real_type ld_literal_type;
2511 };
2512
2513 static struct reloc_table_entry reloc_table[] =
2514 {
2515 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2516 {"lo12", 0,
2517 0, /* adr_type */
2518 0,
2519 0,
2520 BFD_RELOC_AARCH64_ADD_LO12,
2521 BFD_RELOC_AARCH64_LDST_LO12,
2522 0},
2523
2524 /* Higher 21 bits of pc-relative page offset: ADRP */
2525 {"pg_hi21", 1,
2526 0, /* adr_type */
2527 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2528 0,
2529 0,
2530 0,
2531 0},
2532
2533 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2534 {"pg_hi21_nc", 1,
2535 0, /* adr_type */
2536 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2537 0,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2543 {"abs_g0", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G0,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2552 {"abs_g0_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G0_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 0-15 of address/value: MOVK, no check */
2561 {"abs_g0_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G0_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2570 {"abs_g1", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G1,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2579 {"abs_g1_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G1_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 16-31 of address/value: MOVK, no check */
2588 {"abs_g1_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G1_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2597 {"abs_g2", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G2,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2606 {"abs_g2_s", 0,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_G2_S,
2610 0,
2611 0,
2612 0},
2613
2614 /* Less significant bits 32-47 of address/value: MOVK, no check */
2615 {"abs_g2_nc", 0,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_G2_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2624 {"abs_g3", 0,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_G3,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2633 {"prel_g0", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2642 {"prel_g0_nc", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2651 {"prel_g1", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2660 {"prel_g1_nc", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2664 0,
2665 0,
2666 0},
2667
2668 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2669 {"prel_g2", 1,
2670 0, /* adr_type */
2671 0,
2672 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2673 0,
2674 0,
2675 0},
2676
2677 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2678 {"prel_g2_nc", 1,
2679 0, /* adr_type */
2680 0,
2681 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2687 {"prel_g3", 1,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2691 0,
2692 0,
2693 0},
2694
2695 /* Get to the page containing GOT entry for a symbol. */
2696 {"got", 1,
2697 0, /* adr_type */
2698 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2699 0,
2700 0,
2701 0,
2702 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2703
2704 /* 12 bit offset into the page containing GOT entry for that symbol. */
2705 {"got_lo12", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2711 0},
2712
2713 /* 0-15 bits of address/value: MOVk, no check. */
2714 {"gotoff_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Most significant bits 16-31 of address/value: MOVZ. */
2723 {"gotoff_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* 15 bit offset into the page containing GOT entry for that symbol. */
2732 {"gotoff_lo15", 0,
2733 0, /* adr_type */
2734 0,
2735 0,
2736 0,
2737 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2738 0},
2739
2740 /* Get to the page containing GOT TLS entry for a symbol */
2741 {"gottprel_g0_nc", 0,
2742 0, /* adr_type */
2743 0,
2744 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2745 0,
2746 0,
2747 0},
2748
2749 /* Get to the page containing GOT TLS entry for a symbol */
2750 {"gottprel_g1", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2754 0,
2755 0,
2756 0},
2757
2758 /* Get to the page containing GOT TLS entry for a symbol */
2759 {"tlsgd", 0,
2760 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsgd_lo12", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* Lower 16 bits address/value: MOVk. */
2777 {"tlsgd_g0_nc", 0,
2778 0, /* adr_type */
2779 0,
2780 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2781 0,
2782 0,
2783 0},
2784
2785 /* Most significant bits 16-31 of address/value: MOVZ. */
2786 {"tlsgd_g1", 0,
2787 0, /* adr_type */
2788 0,
2789 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2790 0,
2791 0,
2792 0},
2793
2794 /* Get to the page containing GOT TLS entry for a symbol */
2795 {"tlsdesc", 0,
2796 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2797 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2798 0,
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2802
2803 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2804 {"tlsdesc_lo12", 0,
2805 0, /* adr_type */
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2809 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2810 0},
2811
2812 /* Get to the page containing GOT TLS entry for a symbol.
2813 The same as GD, we allocate two consecutive GOT slots
2814 for module index and module offset, the only difference
2815 with GD is the module offset should be initialized to
2816 zero without any outstanding runtime relocation. */
2817 {"tlsldm", 0,
2818 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2819 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2820 0,
2821 0,
2822 0,
2823 0},
2824
2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2826 {"tlsldm_lo12_nc", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2831 0,
2832 0},
2833
2834 /* 12 bit offset into the module TLS base address. */
2835 {"dtprel_lo12", 0,
2836 0, /* adr_type */
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2840 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2841 0},
2842
2843 /* Same as dtprel_lo12, no overflow check. */
2844 {"dtprel_lo12_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2849 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2850 0},
2851
2852 /* bits[23:12] of offset to the module TLS base address. */
2853 {"dtprel_hi12", 0,
2854 0, /* adr_type */
2855 0,
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2858 0,
2859 0},
2860
2861 /* bits[15:0] of offset to the module TLS base address. */
2862 {"dtprel_g0", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2866 0,
2867 0,
2868 0},
2869
2870 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2871 {"dtprel_g0_nc", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2875 0,
2876 0,
2877 0},
2878
2879 /* bits[31:16] of offset to the module TLS base address. */
2880 {"dtprel_g1", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2884 0,
2885 0,
2886 0},
2887
2888 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2889 {"dtprel_g1_nc", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2893 0,
2894 0,
2895 0},
2896
2897 /* bits[47:32] of offset to the module TLS base address. */
2898 {"dtprel_g2", 0,
2899 0, /* adr_type */
2900 0,
2901 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2902 0,
2903 0,
2904 0},
2905
2906 /* Lower 16 bit offset into GOT entry for a symbol */
2907 {"tlsdesc_off_g0_nc", 0,
2908 0, /* adr_type */
2909 0,
2910 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2911 0,
2912 0,
2913 0},
2914
2915 /* Higher 16 bit offset into GOT entry for a symbol */
2916 {"tlsdesc_off_g1", 0,
2917 0, /* adr_type */
2918 0,
2919 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2920 0,
2921 0,
2922 0},
2923
2924 /* Get to the page containing GOT TLS entry for a symbol */
2925 {"gottprel", 0,
2926 0, /* adr_type */
2927 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2928 0,
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2932
2933 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2934 {"gottprel_lo12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2948 0,
2949 0},
2950
2951 /* Get tp offset for a symbol. */
2952 {"tprel_lo12", 0,
2953 0, /* adr_type */
2954 0,
2955 0,
2956 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2957 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2958 0},
2959
2960 /* Get tp offset for a symbol. */
2961 {"tprel_hi12", 0,
2962 0, /* adr_type */
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2966 0,
2967 0},
2968
2969 /* Get tp offset for a symbol. */
2970 {"tprel_lo12_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2975 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2976 0},
2977
2978 /* Most significant bits 32-47 of address/value: MOVZ. */
2979 {"tprel_g2", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 16-31 of address/value: MOVZ. */
2988 {"tprel_g1", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2992 0,
2993 0,
2994 0},
2995
2996 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2997 {"tprel_g1_nc", 0,
2998 0, /* adr_type */
2999 0,
3000 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3001 0,
3002 0,
3003 0},
3004
3005 /* Most significant bits 0-15 of address/value: MOVZ. */
3006 {"tprel_g0", 0,
3007 0, /* adr_type */
3008 0,
3009 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3010 0,
3011 0,
3012 0},
3013
3014 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3015 {"tprel_g0_nc", 0,
3016 0, /* adr_type */
3017 0,
3018 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3019 0,
3020 0,
3021 0},
3022
3023 /* 15bit offset from got entry to base address of GOT table. */
3024 {"gotpage_lo15", 0,
3025 0,
3026 0,
3027 0,
3028 0,
3029 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3030 0},
3031
3032 /* 14bit offset from got entry to base address of GOT table. */
3033 {"gotpage_lo14", 0,
3034 0,
3035 0,
3036 0,
3037 0,
3038 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3039 0},
3040 };
3041
3042 /* Given the address of a pointer pointing to the textual name of a
3043 relocation as may appear in assembler source, attempt to find its
3044 details in reloc_table. The pointer will be updated to the character
3045 after the trailing colon. On failure, NULL will be returned;
3046 otherwise return the reloc_table_entry. */
3047
3048 static struct reloc_table_entry *
3049 find_reloc_table_entry (char **str)
3050 {
3051 unsigned int i;
3052 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3053 {
3054 int length = strlen (reloc_table[i].name);
3055
3056 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3057 && (*str)[length] == ':')
3058 {
3059 *str += (length + 1);
3060 return &reloc_table[i];
3061 }
3062 }
3063
3064 return NULL;
3065 }
3066
3067 /* Returns 0 if the relocation should never be forced,
3068 1 if the relocation must be forced, and -1 if either
3069 result is OK. */
3070
3071 static signed int
3072 aarch64_force_reloc (unsigned int type)
3073 {
3074 switch (type)
3075 {
3076 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3077 /* Perform these "immediate" internal relocations
3078 even if the symbol is extern or weak. */
3079 return 0;
3080
3081 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3082 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3083 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3084 /* Pseudo relocs that need to be fixed up according to
3085 ilp32_p. */
3086 return 0;
3087
3088 case BFD_RELOC_AARCH64_ADD_LO12:
3089 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3090 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3091 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3092 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3093 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3094 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3095 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3096 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3097 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3098 case BFD_RELOC_AARCH64_LDST128_LO12:
3099 case BFD_RELOC_AARCH64_LDST16_LO12:
3100 case BFD_RELOC_AARCH64_LDST32_LO12:
3101 case BFD_RELOC_AARCH64_LDST64_LO12:
3102 case BFD_RELOC_AARCH64_LDST8_LO12:
3103 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3104 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3106 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3107 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3108 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3109 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3110 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3111 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3113 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3114 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3116 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3117 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3120 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3122 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3123 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3127 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3142 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3146 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3148 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3150 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3157 /* Always leave these relocations for the linker. */
3158 return 1;
3159
3160 default:
3161 return -1;
3162 }
3163 }
3164
3165 int
3166 aarch64_force_relocation (struct fix *fixp)
3167 {
3168 int res = aarch64_force_reloc (fixp->fx_r_type);
3169
3170 if (res == -1)
3171 return generic_force_reloc (fixp);
3172 return res;
3173 }
3174
3175 /* Mode argument to parse_shift and parser_shifter_operand. */
3176 enum parse_shift_mode
3177 {
3178 SHIFTED_NONE, /* no shifter allowed */
3179 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3180 "#imm{,lsl #n}" */
3181 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3182 "#imm" */
3183 SHIFTED_LSL, /* bare "lsl #n" */
3184 SHIFTED_MUL, /* bare "mul #n" */
3185 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3186 SHIFTED_MUL_VL, /* "mul vl" */
3187 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3188 };
3189
3190 /* Parse a <shift> operator on an AArch64 data processing instruction.
3191 Return TRUE on success; otherwise return FALSE. */
3192 static bool
3193 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3194 {
3195 const struct aarch64_name_value_pair *shift_op;
3196 enum aarch64_modifier_kind kind;
3197 expressionS exp;
3198 int exp_has_prefix;
3199 char *s = *str;
3200 char *p = s;
3201
3202 for (p = *str; ISALPHA (*p); p++)
3203 ;
3204
3205 if (p == *str)
3206 {
3207 set_syntax_error (_("shift expression expected"));
3208 return false;
3209 }
3210
3211 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3212
3213 if (shift_op == NULL)
3214 {
3215 set_syntax_error (_("shift operator expected"));
3216 return false;
3217 }
3218
3219 kind = aarch64_get_operand_modifier (shift_op);
3220
3221 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3222 {
3223 set_syntax_error (_("invalid use of 'MSL'"));
3224 return false;
3225 }
3226
3227 if (kind == AARCH64_MOD_MUL
3228 && mode != SHIFTED_MUL
3229 && mode != SHIFTED_MUL_VL)
3230 {
3231 set_syntax_error (_("invalid use of 'MUL'"));
3232 return false;
3233 }
3234
3235 switch (mode)
3236 {
3237 case SHIFTED_LOGIC_IMM:
3238 if (aarch64_extend_operator_p (kind))
3239 {
3240 set_syntax_error (_("extending shift is not permitted"));
3241 return false;
3242 }
3243 break;
3244
3245 case SHIFTED_ARITH_IMM:
3246 if (kind == AARCH64_MOD_ROR)
3247 {
3248 set_syntax_error (_("'ROR' shift is not permitted"));
3249 return false;
3250 }
3251 break;
3252
3253 case SHIFTED_LSL:
3254 if (kind != AARCH64_MOD_LSL)
3255 {
3256 set_syntax_error (_("only 'LSL' shift is permitted"));
3257 return false;
3258 }
3259 break;
3260
3261 case SHIFTED_MUL:
3262 if (kind != AARCH64_MOD_MUL)
3263 {
3264 set_syntax_error (_("only 'MUL' is permitted"));
3265 return false;
3266 }
3267 break;
3268
3269 case SHIFTED_MUL_VL:
3270 /* "MUL VL" consists of two separate tokens. Require the first
3271 token to be "MUL" and look for a following "VL". */
3272 if (kind == AARCH64_MOD_MUL)
3273 {
3274 skip_whitespace (p);
3275 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3276 {
3277 p += 2;
3278 kind = AARCH64_MOD_MUL_VL;
3279 break;
3280 }
3281 }
3282 set_syntax_error (_("only 'MUL VL' is permitted"));
3283 return false;
3284
3285 case SHIFTED_REG_OFFSET:
3286 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3287 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3288 {
3289 set_fatal_syntax_error
3290 (_("invalid shift for the register offset addressing mode"));
3291 return false;
3292 }
3293 break;
3294
3295 case SHIFTED_LSL_MSL:
3296 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3297 {
3298 set_syntax_error (_("invalid shift operator"));
3299 return false;
3300 }
3301 break;
3302
3303 default:
3304 abort ();
3305 }
3306
3307 /* Whitespace can appear here if the next thing is a bare digit. */
3308 skip_whitespace (p);
3309
3310 /* Parse shift amount. */
3311 exp_has_prefix = 0;
3312 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3313 exp.X_op = O_absent;
3314 else
3315 {
3316 if (is_immediate_prefix (*p))
3317 {
3318 p++;
3319 exp_has_prefix = 1;
3320 }
3321 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3322 NORMAL_RESOLUTION);
3323 }
3324 if (kind == AARCH64_MOD_MUL_VL)
3325 /* For consistency, give MUL VL the same shift amount as an implicit
3326 MUL #1. */
3327 operand->shifter.amount = 1;
3328 else if (exp.X_op == O_absent)
3329 {
3330 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3331 {
3332 set_syntax_error (_("missing shift amount"));
3333 return false;
3334 }
3335 operand->shifter.amount = 0;
3336 }
3337 else if (exp.X_op != O_constant)
3338 {
3339 set_syntax_error (_("constant shift amount required"));
3340 return false;
3341 }
3342 /* For parsing purposes, MUL #n has no inherent range. The range
3343 depends on the operand and will be checked by operand-specific
3344 routines. */
3345 else if (kind != AARCH64_MOD_MUL
3346 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3347 {
3348 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3349 return false;
3350 }
3351 else
3352 {
3353 operand->shifter.amount = exp.X_add_number;
3354 operand->shifter.amount_present = 1;
3355 }
3356
3357 operand->shifter.operator_present = 1;
3358 operand->shifter.kind = kind;
3359
3360 *str = p;
3361 return true;
3362 }
3363
3364 /* Parse a <shifter_operand> for a data processing instruction:
3365
3366 #<immediate>
3367 #<immediate>, LSL #imm
3368
3369 Validation of immediate operands is deferred to md_apply_fix.
3370
3371 Return TRUE on success; otherwise return FALSE. */
3372
3373 static bool
3374 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3375 enum parse_shift_mode mode)
3376 {
3377 char *p;
3378
3379 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3380 return false;
3381
3382 p = *str;
3383
3384 /* Accept an immediate expression. */
3385 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3386 REJECT_ABSENT, NORMAL_RESOLUTION))
3387 return false;
3388
3389 /* Accept optional LSL for arithmetic immediate values. */
3390 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3391 if (! parse_shift (&p, operand, SHIFTED_LSL))
3392 return false;
3393
3394 /* Not accept any shifter for logical immediate values. */
3395 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3396 && parse_shift (&p, operand, mode))
3397 {
3398 set_syntax_error (_("unexpected shift operator"));
3399 return false;
3400 }
3401
3402 *str = p;
3403 return true;
3404 }
3405
3406 /* Parse a <shifter_operand> for a data processing instruction:
3407
3408 <Rm>
3409 <Rm>, <shift>
3410 #<immediate>
3411 #<immediate>, LSL #imm
3412
3413 where <shift> is handled by parse_shift above, and the last two
3414 cases are handled by the function above.
3415
3416 Validation of immediate operands is deferred to md_apply_fix.
3417
3418 Return TRUE on success; otherwise return FALSE. */
3419
3420 static bool
3421 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3422 enum parse_shift_mode mode)
3423 {
3424 const reg_entry *reg;
3425 aarch64_opnd_qualifier_t qualifier;
3426 enum aarch64_operand_class opd_class
3427 = aarch64_get_operand_class (operand->type);
3428
3429 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3430 if (reg)
3431 {
3432 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3433 {
3434 set_syntax_error (_("unexpected register in the immediate operand"));
3435 return false;
3436 }
3437
3438 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3439 {
3440 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3441 return false;
3442 }
3443
3444 operand->reg.regno = reg->number;
3445 operand->qualifier = qualifier;
3446
3447 /* Accept optional shift operation on register. */
3448 if (! skip_past_comma (str))
3449 return true;
3450
3451 if (! parse_shift (str, operand, mode))
3452 return false;
3453
3454 return true;
3455 }
3456 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3457 {
3458 set_syntax_error
3459 (_("integer register expected in the extended/shifted operand "
3460 "register"));
3461 return false;
3462 }
3463
3464 /* We have a shifted immediate variable. */
3465 return parse_shifter_operand_imm (str, operand, mode);
3466 }
3467
3468 /* Return TRUE on success; return FALSE otherwise. */
3469
3470 static bool
3471 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3472 enum parse_shift_mode mode)
3473 {
3474 char *p = *str;
3475
3476 /* Determine if we have the sequence of characters #: or just :
3477 coming next. If we do, then we check for a :rello: relocation
3478 modifier. If we don't, punt the whole lot to
3479 parse_shifter_operand. */
3480
3481 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3482 {
3483 struct reloc_table_entry *entry;
3484
3485 if (p[0] == '#')
3486 p += 2;
3487 else
3488 p++;
3489 *str = p;
3490
3491 /* Try to parse a relocation. Anything else is an error. */
3492 if (!(entry = find_reloc_table_entry (str)))
3493 {
3494 set_syntax_error (_("unknown relocation modifier"));
3495 return false;
3496 }
3497
3498 if (entry->add_type == 0)
3499 {
3500 set_syntax_error
3501 (_("this relocation modifier is not allowed on this instruction"));
3502 return false;
3503 }
3504
3505 /* Save str before we decompose it. */
3506 p = *str;
3507
3508 /* Next, we parse the expression. */
3509 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3510 REJECT_ABSENT,
3511 aarch64_force_reloc (entry->add_type) == 1))
3512 return false;
3513
3514 /* Record the relocation type (use the ADD variant here). */
3515 inst.reloc.type = entry->add_type;
3516 inst.reloc.pc_rel = entry->pc_rel;
3517
3518 /* If str is empty, we've reached the end, stop here. */
3519 if (**str == '\0')
3520 return true;
3521
3522 /* Otherwise, we have a shifted reloc modifier, so rewind to
3523 recover the variable name and continue parsing for the shifter. */
3524 *str = p;
3525 return parse_shifter_operand_imm (str, operand, mode);
3526 }
3527
3528 return parse_shifter_operand (str, operand, mode);
3529 }
3530
3531 /* Parse all forms of an address expression. Information is written
3532 to *OPERAND and/or inst.reloc.
3533
3534 The A64 instruction set has the following addressing modes:
3535
3536 Offset
3537 [base] // in SIMD ld/st structure
3538 [base{,#0}] // in ld/st exclusive
3539 [base{,#imm}]
3540 [base,Xm{,LSL #imm}]
3541 [base,Xm,SXTX {#imm}]
3542 [base,Wm,(S|U)XTW {#imm}]
3543 Pre-indexed
3544 [base]! // in ldraa/ldrab exclusive
3545 [base,#imm]!
3546 Post-indexed
3547 [base],#imm
3548 [base],Xm // in SIMD ld/st structure
3549 PC-relative (literal)
3550 label
3551 SVE:
3552 [base,#imm,MUL VL]
3553 [base,Zm.D{,LSL #imm}]
3554 [base,Zm.S,(S|U)XTW {#imm}]
3555 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3556 [Zn.S,#imm]
3557 [Zn.D,#imm]
3558 [Zn.S{, Xm}]
3559 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3560 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3561 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3562
3563 (As a convenience, the notation "=immediate" is permitted in conjunction
3564 with the pc-relative literal load instructions to automatically place an
3565 immediate value or symbolic address in a nearby literal pool and generate
3566 a hidden label which references it.)
3567
3568 Upon a successful parsing, the address structure in *OPERAND will be
3569 filled in the following way:
3570
3571 .base_regno = <base>
3572 .offset.is_reg // 1 if the offset is a register
3573 .offset.imm = <imm>
3574 .offset.regno = <Rm>
3575
3576 For different addressing modes defined in the A64 ISA:
3577
3578 Offset
3579 .pcrel=0; .preind=1; .postind=0; .writeback=0
3580 Pre-indexed
3581 .pcrel=0; .preind=1; .postind=0; .writeback=1
3582 Post-indexed
3583 .pcrel=0; .preind=0; .postind=1; .writeback=1
3584 PC-relative (literal)
3585 .pcrel=1; .preind=1; .postind=0; .writeback=0
3586
3587 The shift/extension information, if any, will be stored in .shifter.
3588 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3589 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3590 corresponding register.
3591
3592 BASE_TYPE says which types of base register should be accepted and
3593 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3594 is the type of shifter that is allowed for immediate offsets,
3595 or SHIFTED_NONE if none.
3596
3597 In all other respects, it is the caller's responsibility to check
3598 for addressing modes not supported by the instruction, and to set
3599 inst.reloc.type. */
3600
3601 static bool
3602 parse_address_main (char **str, aarch64_opnd_info *operand,
3603 aarch64_opnd_qualifier_t *base_qualifier,
3604 aarch64_opnd_qualifier_t *offset_qualifier,
3605 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3606 enum parse_shift_mode imm_shift_mode)
3607 {
3608 char *p = *str;
3609 const reg_entry *reg;
3610 expressionS *exp = &inst.reloc.exp;
3611
3612 *base_qualifier = AARCH64_OPND_QLF_NIL;
3613 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3614 if (! skip_past_char (&p, '['))
3615 {
3616 /* =immediate or label. */
3617 operand->addr.pcrel = 1;
3618 operand->addr.preind = 1;
3619
3620 /* #:<reloc_op>:<symbol> */
3621 skip_past_char (&p, '#');
3622 if (skip_past_char (&p, ':'))
3623 {
3624 bfd_reloc_code_real_type ty;
3625 struct reloc_table_entry *entry;
3626
3627 /* Try to parse a relocation modifier. Anything else is
3628 an error. */
3629 entry = find_reloc_table_entry (&p);
3630 if (! entry)
3631 {
3632 set_syntax_error (_("unknown relocation modifier"));
3633 return false;
3634 }
3635
3636 switch (operand->type)
3637 {
3638 case AARCH64_OPND_ADDR_PCREL21:
3639 /* adr */
3640 ty = entry->adr_type;
3641 break;
3642
3643 default:
3644 ty = entry->ld_literal_type;
3645 break;
3646 }
3647
3648 if (ty == 0)
3649 {
3650 set_syntax_error
3651 (_("this relocation modifier is not allowed on this "
3652 "instruction"));
3653 return false;
3654 }
3655
3656 /* #:<reloc_op>: */
3657 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3658 aarch64_force_reloc (entry->add_type) == 1))
3659 {
3660 set_syntax_error (_("invalid relocation expression"));
3661 return false;
3662 }
3663 /* #:<reloc_op>:<expr> */
3664 /* Record the relocation type. */
3665 inst.reloc.type = ty;
3666 inst.reloc.pc_rel = entry->pc_rel;
3667 }
3668 else
3669 {
3670 if (skip_past_char (&p, '='))
3671 /* =immediate; need to generate the literal in the literal pool. */
3672 inst.gen_lit_pool = 1;
3673
3674 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3675 NORMAL_RESOLUTION))
3676 {
3677 set_syntax_error (_("invalid address"));
3678 return false;
3679 }
3680 }
3681
3682 *str = p;
3683 return true;
3684 }
3685
3686 /* [ */
3687
3688 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3689 if (!reg || !aarch64_check_reg_type (reg, base_type))
3690 {
3691 set_syntax_error (_(get_reg_expected_msg (base_type)));
3692 return false;
3693 }
3694 operand->addr.base_regno = reg->number;
3695
3696 /* [Xn */
3697 if (skip_past_comma (&p))
3698 {
3699 /* [Xn, */
3700 operand->addr.preind = 1;
3701
3702 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3703 if (reg)
3704 {
3705 if (!aarch64_check_reg_type (reg, offset_type))
3706 {
3707 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3708 return false;
3709 }
3710
3711 /* [Xn,Rm */
3712 operand->addr.offset.regno = reg->number;
3713 operand->addr.offset.is_reg = 1;
3714 /* Shifted index. */
3715 if (skip_past_comma (&p))
3716 {
3717 /* [Xn,Rm, */
3718 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3719 /* Use the diagnostics set in parse_shift, so not set new
3720 error message here. */
3721 return false;
3722 }
3723 /* We only accept:
3724 [base,Xm] # For vector plus scalar SVE2 indexing.
3725 [base,Xm{,LSL #imm}]
3726 [base,Xm,SXTX {#imm}]
3727 [base,Wm,(S|U)XTW {#imm}] */
3728 if (operand->shifter.kind == AARCH64_MOD_NONE
3729 || operand->shifter.kind == AARCH64_MOD_LSL
3730 || operand->shifter.kind == AARCH64_MOD_SXTX)
3731 {
3732 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3733 {
3734 set_syntax_error (_("invalid use of 32-bit register offset"));
3735 return false;
3736 }
3737 if (aarch64_get_qualifier_esize (*base_qualifier)
3738 != aarch64_get_qualifier_esize (*offset_qualifier)
3739 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3740 || *base_qualifier != AARCH64_OPND_QLF_S_S
3741 || *offset_qualifier != AARCH64_OPND_QLF_X))
3742 {
3743 set_syntax_error (_("offset has different size from base"));
3744 return false;
3745 }
3746 }
3747 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3748 {
3749 set_syntax_error (_("invalid use of 64-bit register offset"));
3750 return false;
3751 }
3752 }
3753 else
3754 {
3755 /* [Xn,#:<reloc_op>:<symbol> */
3756 skip_past_char (&p, '#');
3757 if (skip_past_char (&p, ':'))
3758 {
3759 struct reloc_table_entry *entry;
3760
3761 /* Try to parse a relocation modifier. Anything else is
3762 an error. */
3763 if (!(entry = find_reloc_table_entry (&p)))
3764 {
3765 set_syntax_error (_("unknown relocation modifier"));
3766 return false;
3767 }
3768
3769 if (entry->ldst_type == 0)
3770 {
3771 set_syntax_error
3772 (_("this relocation modifier is not allowed on this "
3773 "instruction"));
3774 return false;
3775 }
3776
3777 /* [Xn,#:<reloc_op>: */
3778 /* We now have the group relocation table entry corresponding to
3779 the name in the assembler source. Next, we parse the
3780 expression. */
3781 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3782 aarch64_force_reloc (entry->add_type) == 1))
3783 {
3784 set_syntax_error (_("invalid relocation expression"));
3785 return false;
3786 }
3787
3788 /* [Xn,#:<reloc_op>:<expr> */
3789 /* Record the load/store relocation type. */
3790 inst.reloc.type = entry->ldst_type;
3791 inst.reloc.pc_rel = entry->pc_rel;
3792 }
3793 else
3794 {
3795 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3796 NORMAL_RESOLUTION))
3797 {
3798 set_syntax_error (_("invalid expression in the address"));
3799 return false;
3800 }
3801 /* [Xn,<expr> */
3802 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3803 /* [Xn,<expr>,<shifter> */
3804 if (! parse_shift (&p, operand, imm_shift_mode))
3805 return false;
3806 }
3807 }
3808 }
3809
3810 if (! skip_past_char (&p, ']'))
3811 {
3812 set_syntax_error (_("']' expected"));
3813 return false;
3814 }
3815
3816 if (skip_past_char (&p, '!'))
3817 {
3818 if (operand->addr.preind && operand->addr.offset.is_reg)
3819 {
3820 set_syntax_error (_("register offset not allowed in pre-indexed "
3821 "addressing mode"));
3822 return false;
3823 }
3824 /* [Xn]! */
3825 operand->addr.writeback = 1;
3826 }
3827 else if (skip_past_comma (&p))
3828 {
3829 /* [Xn], */
3830 operand->addr.postind = 1;
3831 operand->addr.writeback = 1;
3832
3833 if (operand->addr.preind)
3834 {
3835 set_syntax_error (_("cannot combine pre- and post-indexing"));
3836 return false;
3837 }
3838
3839 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3840 if (reg)
3841 {
3842 /* [Xn],Xm */
3843 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3844 {
3845 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3846 return false;
3847 }
3848
3849 operand->addr.offset.regno = reg->number;
3850 operand->addr.offset.is_reg = 1;
3851 }
3852 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3853 NORMAL_RESOLUTION))
3854 {
3855 /* [Xn],#expr */
3856 set_syntax_error (_("invalid expression in the address"));
3857 return false;
3858 }
3859 }
3860
3861 /* If at this point neither .preind nor .postind is set, we have a
3862 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3863 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3864 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3865 [Zn.<T>, xzr]. */
3866 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3867 {
3868 if (operand->addr.writeback)
3869 {
3870 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3871 {
3872 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3873 operand->addr.offset.is_reg = 0;
3874 operand->addr.offset.imm = 0;
3875 operand->addr.preind = 1;
3876 }
3877 else
3878 {
3879 /* Reject [Rn]! */
3880 set_syntax_error (_("missing offset in the pre-indexed address"));
3881 return false;
3882 }
3883 }
3884 else
3885 {
3886 operand->addr.preind = 1;
3887 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3888 {
3889 operand->addr.offset.is_reg = 1;
3890 operand->addr.offset.regno = REG_ZR;
3891 *offset_qualifier = AARCH64_OPND_QLF_X;
3892 }
3893 else
3894 {
3895 inst.reloc.exp.X_op = O_constant;
3896 inst.reloc.exp.X_add_number = 0;
3897 }
3898 }
3899 }
3900
3901 *str = p;
3902 return true;
3903 }
3904
3905 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3906 on success. */
3907 static bool
3908 parse_address (char **str, aarch64_opnd_info *operand)
3909 {
3910 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3911 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3912 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3913 }
3914
3915 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3916 The arguments have the same meaning as for parse_address_main.
3917 Return TRUE on success. */
3918 static bool
3919 parse_sve_address (char **str, aarch64_opnd_info *operand,
3920 aarch64_opnd_qualifier_t *base_qualifier,
3921 aarch64_opnd_qualifier_t *offset_qualifier)
3922 {
3923 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3924 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3925 SHIFTED_MUL_VL);
3926 }
3927
3928 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3929 Return TRUE on success; otherwise return FALSE. */
3930 static bool
3931 parse_half (char **str, int *internal_fixup_p)
3932 {
3933 char *p = *str;
3934
3935 skip_past_char (&p, '#');
3936
3937 gas_assert (internal_fixup_p);
3938 *internal_fixup_p = 0;
3939
3940 if (*p == ':')
3941 {
3942 struct reloc_table_entry *entry;
3943
3944 /* Try to parse a relocation. Anything else is an error. */
3945 ++p;
3946
3947 if (!(entry = find_reloc_table_entry (&p)))
3948 {
3949 set_syntax_error (_("unknown relocation modifier"));
3950 return false;
3951 }
3952
3953 if (entry->movw_type == 0)
3954 {
3955 set_syntax_error
3956 (_("this relocation modifier is not allowed on this instruction"));
3957 return false;
3958 }
3959
3960 inst.reloc.type = entry->movw_type;
3961 }
3962 else
3963 *internal_fixup_p = 1;
3964
3965 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3966 aarch64_force_reloc (inst.reloc.type) == 1))
3967 return false;
3968
3969 *str = p;
3970 return true;
3971 }
3972
3973 /* Parse an operand for an ADRP instruction:
3974 ADRP <Xd>, <label>
3975 Return TRUE on success; otherwise return FALSE. */
3976
3977 static bool
3978 parse_adrp (char **str)
3979 {
3980 char *p;
3981
3982 p = *str;
3983 if (*p == ':')
3984 {
3985 struct reloc_table_entry *entry;
3986
3987 /* Try to parse a relocation. Anything else is an error. */
3988 ++p;
3989 if (!(entry = find_reloc_table_entry (&p)))
3990 {
3991 set_syntax_error (_("unknown relocation modifier"));
3992 return false;
3993 }
3994
3995 if (entry->adrp_type == 0)
3996 {
3997 set_syntax_error
3998 (_("this relocation modifier is not allowed on this instruction"));
3999 return false;
4000 }
4001
4002 inst.reloc.type = entry->adrp_type;
4003 }
4004 else
4005 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4006
4007 inst.reloc.pc_rel = 1;
4008 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4009 aarch64_force_reloc (inst.reloc.type) == 1))
4010 return false;
4011 *str = p;
4012 return true;
4013 }
4014
4015 /* Miscellaneous. */
4016
4017 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4018 of SIZE tokens in which index I gives the token for field value I,
4019 or is null if field value I is invalid. REG_TYPE says which register
4020 names should be treated as registers rather than as symbolic immediates.
4021
4022 Return true on success, moving *STR past the operand and storing the
4023 field value in *VAL. */
4024
4025 static int
4026 parse_enum_string (char **str, int64_t *val, const char *const *array,
4027 size_t size, aarch64_reg_type reg_type)
4028 {
4029 expressionS exp;
4030 char *p, *q;
4031 size_t i;
4032
4033 /* Match C-like tokens. */
4034 p = q = *str;
4035 while (ISALNUM (*q))
4036 q++;
4037
4038 for (i = 0; i < size; ++i)
4039 if (array[i]
4040 && strncasecmp (array[i], p, q - p) == 0
4041 && array[i][q - p] == 0)
4042 {
4043 *val = i;
4044 *str = q;
4045 return true;
4046 }
4047
4048 if (!parse_immediate_expression (&p, &exp, reg_type))
4049 return false;
4050
4051 if (exp.X_op == O_constant
4052 && (uint64_t) exp.X_add_number < size)
4053 {
4054 *val = exp.X_add_number;
4055 *str = p;
4056 return true;
4057 }
4058
4059 /* Use the default error for this operand. */
4060 return false;
4061 }
4062
4063 /* Parse an option for a preload instruction. Returns the encoding for the
4064 option, or PARSE_FAIL. */
4065
4066 static int
4067 parse_pldop (char **str)
4068 {
4069 char *p, *q;
4070 const struct aarch64_name_value_pair *o;
4071
4072 p = q = *str;
4073 while (ISALNUM (*q))
4074 q++;
4075
4076 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4077 if (!o)
4078 return PARSE_FAIL;
4079
4080 *str = q;
4081 return o->value;
4082 }
4083
4084 /* Parse an option for a barrier instruction. Returns the encoding for the
4085 option, or PARSE_FAIL. */
4086
4087 static int
4088 parse_barrier (char **str)
4089 {
4090 char *p, *q;
4091 const struct aarch64_name_value_pair *o;
4092
4093 p = q = *str;
4094 while (ISALPHA (*q))
4095 q++;
4096
4097 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4098 if (!o)
4099 return PARSE_FAIL;
4100
4101 *str = q;
4102 return o->value;
4103 }
4104
4105 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4106 return 0 if successful. Otherwise return PARSE_FAIL. */
4107
4108 static int
4109 parse_barrier_psb (char **str,
4110 const struct aarch64_name_value_pair ** hint_opt)
4111 {
4112 char *p, *q;
4113 const struct aarch64_name_value_pair *o;
4114
4115 p = q = *str;
4116 while (ISALPHA (*q))
4117 q++;
4118
4119 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4120 if (!o)
4121 {
4122 set_fatal_syntax_error
4123 ( _("unknown or missing option to PSB/TSB"));
4124 return PARSE_FAIL;
4125 }
4126
4127 if (o->value != 0x11)
4128 {
4129 /* PSB only accepts option name 'CSYNC'. */
4130 set_syntax_error
4131 (_("the specified option is not accepted for PSB/TSB"));
4132 return PARSE_FAIL;
4133 }
4134
4135 *str = q;
4136 *hint_opt = o;
4137 return 0;
4138 }
4139
4140 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4141 return 0 if successful. Otherwise return PARSE_FAIL. */
4142
4143 static int
4144 parse_bti_operand (char **str,
4145 const struct aarch64_name_value_pair ** hint_opt)
4146 {
4147 char *p, *q;
4148 const struct aarch64_name_value_pair *o;
4149
4150 p = q = *str;
4151 while (ISALPHA (*q))
4152 q++;
4153
4154 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4155 if (!o)
4156 {
4157 set_fatal_syntax_error
4158 ( _("unknown option to BTI"));
4159 return PARSE_FAIL;
4160 }
4161
4162 switch (o->value)
4163 {
4164 /* Valid BTI operands. */
4165 case HINT_OPD_C:
4166 case HINT_OPD_J:
4167 case HINT_OPD_JC:
4168 break;
4169
4170 default:
4171 set_syntax_error
4172 (_("unknown option to BTI"));
4173 return PARSE_FAIL;
4174 }
4175
4176 *str = q;
4177 *hint_opt = o;
4178 return 0;
4179 }
4180
4181 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4182 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4183 on failure. Format:
4184
4185 REG_TYPE.QUALIFIER
4186
4187 Side effect: Update STR with current parse position of success.
4188 */
4189
4190 static const reg_entry *
4191 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4192 aarch64_opnd_qualifier_t *qualifier)
4193 {
4194 char *q;
4195
4196 reg_entry *reg = parse_reg (str);
4197 if (reg != NULL && reg->type == reg_type)
4198 {
4199 if (!skip_past_char (str, '.'))
4200 {
4201 set_syntax_error (_("missing ZA tile element size separator"));
4202 return NULL;
4203 }
4204
4205 q = *str;
4206 switch (TOLOWER (*q))
4207 {
4208 case 'b':
4209 *qualifier = AARCH64_OPND_QLF_S_B;
4210 break;
4211 case 'h':
4212 *qualifier = AARCH64_OPND_QLF_S_H;
4213 break;
4214 case 's':
4215 *qualifier = AARCH64_OPND_QLF_S_S;
4216 break;
4217 case 'd':
4218 *qualifier = AARCH64_OPND_QLF_S_D;
4219 break;
4220 case 'q':
4221 *qualifier = AARCH64_OPND_QLF_S_Q;
4222 break;
4223 default:
4224 return NULL;
4225 }
4226 q++;
4227
4228 *str = q;
4229 return reg;
4230 }
4231
4232 return NULL;
4233 }
4234
4235 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4236 Function return tile QUALIFIER on success.
4237
4238 Tiles are in example format: za[0-9]\.[bhsd]
4239
4240 Function returns <ZAda> register number or PARSE_FAIL.
4241 */
4242 static int
4243 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4244 {
4245 int regno;
4246 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4247
4248 if (reg == NULL)
4249 return PARSE_FAIL;
4250 regno = reg->number;
4251
4252 switch (*qualifier)
4253 {
4254 case AARCH64_OPND_QLF_S_B:
4255 if (regno != 0x00)
4256 {
4257 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4258 return PARSE_FAIL;
4259 }
4260 break;
4261 case AARCH64_OPND_QLF_S_H:
4262 if (regno > 0x01)
4263 {
4264 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4265 return PARSE_FAIL;
4266 }
4267 break;
4268 case AARCH64_OPND_QLF_S_S:
4269 if (regno > 0x03)
4270 {
4271 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4272 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4273 return PARSE_FAIL;
4274 }
4275 break;
4276 case AARCH64_OPND_QLF_S_D:
4277 if (regno > 0x07)
4278 {
4279 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4280 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4281 return PARSE_FAIL;
4282 }
4283 break;
4284 default:
4285 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4286 return PARSE_FAIL;
4287 }
4288
4289 return regno;
4290 }
4291
4292 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4293
4294 #<imm>
4295 <imm>
4296
4297 Function return TRUE if immediate was found, or FALSE.
4298 */
4299 static bool
4300 parse_sme_immediate (char **str, int64_t *imm)
4301 {
4302 int64_t val;
4303 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4304 return false;
4305
4306 *imm = val;
4307 return true;
4308 }
4309
4310 /* Parse index with vector select register and immediate:
4311
4312 [<Wv>, <imm>]
4313 [<Wv>, #<imm>]
4314 where <Wv> is in W12-W15 range and # is optional for immediate.
4315
4316 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4317 is set to true.
4318
4319 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4320 IMM output.
4321 */
4322 static bool
4323 parse_sme_za_hv_tiles_operand_index (char **str,
4324 int *vector_select_register,
4325 int64_t *imm)
4326 {
4327 const reg_entry *reg;
4328
4329 if (!skip_past_char (str, '['))
4330 {
4331 set_syntax_error (_("expected '['"));
4332 return false;
4333 }
4334
4335 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4336 reg = parse_reg (str);
4337 if (reg == NULL || reg->type != REG_TYPE_R_32
4338 || reg->number < 12 || reg->number > 15)
4339 {
4340 set_syntax_error (_("expected vector select register W12-W15"));
4341 return false;
4342 }
4343 *vector_select_register = reg->number;
4344
4345 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4346 {
4347 set_syntax_error (_("expected ','"));
4348 return false;
4349 }
4350
4351 if (!parse_sme_immediate (str, imm))
4352 {
4353 set_syntax_error (_("index offset immediate expected"));
4354 return false;
4355 }
4356
4357 if (!skip_past_char (str, ']'))
4358 {
4359 set_syntax_error (_("expected ']'"));
4360 return false;
4361 }
4362
4363 return true;
4364 }
4365
4366 /* Parse SME ZA horizontal or vertical vector access to tiles.
4367 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4368 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4369 contains <Wv> select register and corresponding optional IMMEDIATE.
4370 In addition QUALIFIER is extracted.
4371
4372 Field format examples:
4373
4374 ZA0<HV>.B[<Wv>, #<imm>]
4375 <ZAn><HV>.H[<Wv>, #<imm>]
4376 <ZAn><HV>.S[<Wv>, #<imm>]
4377 <ZAn><HV>.D[<Wv>, #<imm>]
4378 <ZAn><HV>.Q[<Wv>, #<imm>]
4379
4380 Function returns <ZAda> register number or PARSE_FAIL.
4381 */
4382 static int
4383 parse_sme_za_hv_tiles_operand (char **str,
4384 enum sme_hv_slice *slice_indicator,
4385 int *vector_select_register,
4386 int *imm,
4387 aarch64_opnd_qualifier_t *qualifier)
4388 {
4389 char *qh, *qv;
4390 int regno;
4391 int regno_limit;
4392 int64_t imm_limit;
4393 int64_t imm_value;
4394 const reg_entry *reg;
4395
4396 qh = qv = *str;
4397 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4398 {
4399 *slice_indicator = HV_horizontal;
4400 *str = qh;
4401 }
4402 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4403 {
4404 *slice_indicator = HV_vertical;
4405 *str = qv;
4406 }
4407 else
4408 return PARSE_FAIL;
4409 regno = reg->number;
4410
4411 switch (*qualifier)
4412 {
4413 case AARCH64_OPND_QLF_S_B:
4414 regno_limit = 0;
4415 imm_limit = 15;
4416 break;
4417 case AARCH64_OPND_QLF_S_H:
4418 regno_limit = 1;
4419 imm_limit = 7;
4420 break;
4421 case AARCH64_OPND_QLF_S_S:
4422 regno_limit = 3;
4423 imm_limit = 3;
4424 break;
4425 case AARCH64_OPND_QLF_S_D:
4426 regno_limit = 7;
4427 imm_limit = 1;
4428 break;
4429 case AARCH64_OPND_QLF_S_Q:
4430 regno_limit = 15;
4431 imm_limit = 0;
4432 break;
4433 default:
4434 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4435 return PARSE_FAIL;
4436 }
4437
4438 /* Check if destination register ZA tile vector is in range for given
4439 instruction variant. */
4440 if (regno < 0 || regno > regno_limit)
4441 {
4442 set_syntax_error (_("ZA tile vector out of range"));
4443 return PARSE_FAIL;
4444 }
4445
4446 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4447 &imm_value))
4448 return PARSE_FAIL;
4449
4450 /* Check if optional index offset is in the range for instruction
4451 variant. */
4452 if (imm_value < 0 || imm_value > imm_limit)
4453 {
4454 set_syntax_error (_("index offset out of range"));
4455 return PARSE_FAIL;
4456 }
4457
4458 *imm = imm_value;
4459
4460 return regno;
4461 }
4462
4463
4464 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4465 Returns the encoding for the option, or PARSE_FAIL.
4466
4467 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4468 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4469
4470 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4471 field, otherwise as a system register.
4472 */
4473
4474 static int
4475 parse_sys_reg (char **str, htab_t sys_regs,
4476 int imple_defined_p, int pstatefield_p,
4477 uint32_t* flags)
4478 {
4479 char *p, *q;
4480 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4481 const aarch64_sys_reg *o;
4482 int value;
4483
4484 p = buf;
4485 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4486 if (p < buf + (sizeof (buf) - 1))
4487 *p++ = TOLOWER (*q);
4488 *p = '\0';
4489
4490 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4491 valid system register. This is enforced by construction of the hash
4492 table. */
4493 if (p - buf != q - *str)
4494 return PARSE_FAIL;
4495
4496 o = str_hash_find (sys_regs, buf);
4497 if (!o)
4498 {
4499 if (!imple_defined_p)
4500 return PARSE_FAIL;
4501 else
4502 {
4503 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4504 unsigned int op0, op1, cn, cm, op2;
4505
4506 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4507 != 5)
4508 return PARSE_FAIL;
4509 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4510 return PARSE_FAIL;
4511 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4512 if (flags)
4513 *flags = 0;
4514 }
4515 }
4516 else
4517 {
4518 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4519 as_bad (_("selected processor does not support PSTATE field "
4520 "name '%s'"), buf);
4521 if (!pstatefield_p
4522 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4523 o->value, o->flags, o->features))
4524 as_bad (_("selected processor does not support system register "
4525 "name '%s'"), buf);
4526 if (aarch64_sys_reg_deprecated_p (o->flags))
4527 as_warn (_("system register name '%s' is deprecated and may be "
4528 "removed in a future release"), buf);
4529 value = o->value;
4530 if (flags)
4531 *flags = o->flags;
4532 }
4533
4534 *str = q;
4535 return value;
4536 }
4537
4538 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4539 for the option, or NULL. */
4540
4541 static const aarch64_sys_ins_reg *
4542 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4543 {
4544 char *p, *q;
4545 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4546 const aarch64_sys_ins_reg *o;
4547
4548 p = buf;
4549 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4550 if (p < buf + (sizeof (buf) - 1))
4551 *p++ = TOLOWER (*q);
4552 *p = '\0';
4553
4554 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4555 valid system register. This is enforced by construction of the hash
4556 table. */
4557 if (p - buf != q - *str)
4558 return NULL;
4559
4560 o = str_hash_find (sys_ins_regs, buf);
4561 if (!o)
4562 return NULL;
4563
4564 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4565 o->name, o->value, o->flags, 0))
4566 as_bad (_("selected processor does not support system register "
4567 "name '%s'"), buf);
4568 if (aarch64_sys_reg_deprecated_p (o->flags))
4569 as_warn (_("system register name '%s' is deprecated and may be "
4570 "removed in a future release"), buf);
4571
4572 *str = q;
4573 return o;
4574 }
4575 \f
4576 #define po_char_or_fail(chr) do { \
4577 if (! skip_past_char (&str, chr)) \
4578 goto failure; \
4579 } while (0)
4580
4581 #define po_reg_or_fail(regtype) do { \
4582 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4583 if (val == PARSE_FAIL) \
4584 { \
4585 set_default_error (); \
4586 goto failure; \
4587 } \
4588 } while (0)
4589
4590 #define po_int_reg_or_fail(reg_type) do { \
4591 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4592 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4593 { \
4594 set_default_error (); \
4595 goto failure; \
4596 } \
4597 info->reg.regno = reg->number; \
4598 info->qualifier = qualifier; \
4599 } while (0)
4600
4601 #define po_imm_nc_or_fail() do { \
4602 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4603 goto failure; \
4604 } while (0)
4605
4606 #define po_imm_or_fail(min, max) do { \
4607 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4608 goto failure; \
4609 if (val < min || val > max) \
4610 { \
4611 set_fatal_syntax_error (_("immediate value out of range "\
4612 #min " to "#max)); \
4613 goto failure; \
4614 } \
4615 } while (0)
4616
4617 #define po_enum_or_fail(array) do { \
4618 if (!parse_enum_string (&str, &val, array, \
4619 ARRAY_SIZE (array), imm_reg_type)) \
4620 goto failure; \
4621 } while (0)
4622
4623 #define po_misc_or_fail(expr) do { \
4624 if (!expr) \
4625 goto failure; \
4626 } while (0)
4627 \f
4628 /* encode the 12-bit imm field of Add/sub immediate */
4629 static inline uint32_t
4630 encode_addsub_imm (uint32_t imm)
4631 {
4632 return imm << 10;
4633 }
4634
4635 /* encode the shift amount field of Add/sub immediate */
4636 static inline uint32_t
4637 encode_addsub_imm_shift_amount (uint32_t cnt)
4638 {
4639 return cnt << 22;
4640 }
4641
4642
4643 /* encode the imm field of Adr instruction */
4644 static inline uint32_t
4645 encode_adr_imm (uint32_t imm)
4646 {
4647 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4648 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4649 }
4650
4651 /* encode the immediate field of Move wide immediate */
4652 static inline uint32_t
4653 encode_movw_imm (uint32_t imm)
4654 {
4655 return imm << 5;
4656 }
4657
4658 /* encode the 26-bit offset of unconditional branch */
4659 static inline uint32_t
4660 encode_branch_ofs_26 (uint32_t ofs)
4661 {
4662 return ofs & ((1 << 26) - 1);
4663 }
4664
4665 /* encode the 19-bit offset of conditional branch and compare & branch */
4666 static inline uint32_t
4667 encode_cond_branch_ofs_19 (uint32_t ofs)
4668 {
4669 return (ofs & ((1 << 19) - 1)) << 5;
4670 }
4671
4672 /* encode the 19-bit offset of ld literal */
4673 static inline uint32_t
4674 encode_ld_lit_ofs_19 (uint32_t ofs)
4675 {
4676 return (ofs & ((1 << 19) - 1)) << 5;
4677 }
4678
4679 /* Encode the 14-bit offset of test & branch. */
4680 static inline uint32_t
4681 encode_tst_branch_ofs_14 (uint32_t ofs)
4682 {
4683 return (ofs & ((1 << 14) - 1)) << 5;
4684 }
4685
4686 /* Encode the 16-bit imm field of svc/hvc/smc. */
4687 static inline uint32_t
4688 encode_svc_imm (uint32_t imm)
4689 {
4690 return imm << 5;
4691 }
4692
4693 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4694 static inline uint32_t
4695 reencode_addsub_switch_add_sub (uint32_t opcode)
4696 {
4697 return opcode ^ (1 << 30);
4698 }
4699
4700 static inline uint32_t
4701 reencode_movzn_to_movz (uint32_t opcode)
4702 {
4703 return opcode | (1 << 30);
4704 }
4705
4706 static inline uint32_t
4707 reencode_movzn_to_movn (uint32_t opcode)
4708 {
4709 return opcode & ~(1 << 30);
4710 }
4711
4712 /* Overall per-instruction processing. */
4713
4714 /* We need to be able to fix up arbitrary expressions in some statements.
4715 This is so that we can handle symbols that are an arbitrary distance from
4716 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4717 which returns part of an address in a form which will be valid for
4718 a data instruction. We do this by pushing the expression into a symbol
4719 in the expr_section, and creating a fix for that. */
4720
4721 static fixS *
4722 fix_new_aarch64 (fragS * frag,
4723 int where,
4724 short int size,
4725 expressionS * exp,
4726 int pc_rel,
4727 int reloc)
4728 {
4729 fixS *new_fix;
4730
4731 switch (exp->X_op)
4732 {
4733 case O_constant:
4734 case O_symbol:
4735 case O_add:
4736 case O_subtract:
4737 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4738 break;
4739
4740 default:
4741 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4742 pc_rel, reloc);
4743 break;
4744 }
4745 return new_fix;
4746 }
4747 \f
4748 /* Diagnostics on operands errors. */
4749
4750 /* By default, output verbose error message.
4751 Disable the verbose error message by -mno-verbose-error. */
4752 static int verbose_error_p = 1;
4753
4754 #ifdef DEBUG_AARCH64
4755 /* N.B. this is only for the purpose of debugging. */
4756 const char* operand_mismatch_kind_names[] =
4757 {
4758 "AARCH64_OPDE_NIL",
4759 "AARCH64_OPDE_RECOVERABLE",
4760 "AARCH64_OPDE_SYNTAX_ERROR",
4761 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4762 "AARCH64_OPDE_INVALID_VARIANT",
4763 "AARCH64_OPDE_OUT_OF_RANGE",
4764 "AARCH64_OPDE_UNALIGNED",
4765 "AARCH64_OPDE_REG_LIST",
4766 "AARCH64_OPDE_OTHER_ERROR",
4767 };
4768 #endif /* DEBUG_AARCH64 */
4769
4770 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4771
4772 When multiple errors of different kinds are found in the same assembly
4773 line, only the error of the highest severity will be picked up for
4774 issuing the diagnostics. */
4775
4776 static inline bool
4777 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4778 enum aarch64_operand_error_kind rhs)
4779 {
4780 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4781 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4782 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4783 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4784 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4785 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4786 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4787 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4788 return lhs > rhs;
4789 }
4790
4791 /* Helper routine to get the mnemonic name from the assembly instruction
4792 line; should only be called for the diagnosis purpose, as there is
4793 string copy operation involved, which may affect the runtime
4794 performance if used in elsewhere. */
4795
4796 static const char*
4797 get_mnemonic_name (const char *str)
4798 {
4799 static char mnemonic[32];
4800 char *ptr;
4801
4802 /* Get the first 15 bytes and assume that the full name is included. */
4803 strncpy (mnemonic, str, 31);
4804 mnemonic[31] = '\0';
4805
4806 /* Scan up to the end of the mnemonic, which must end in white space,
4807 '.', or end of string. */
4808 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4809 ;
4810
4811 *ptr = '\0';
4812
4813 /* Append '...' to the truncated long name. */
4814 if (ptr - mnemonic == 31)
4815 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4816
4817 return mnemonic;
4818 }
4819
4820 static void
4821 reset_aarch64_instruction (aarch64_instruction *instruction)
4822 {
4823 memset (instruction, '\0', sizeof (aarch64_instruction));
4824 instruction->reloc.type = BFD_RELOC_UNUSED;
4825 }
4826
4827 /* Data structures storing one user error in the assembly code related to
4828 operands. */
4829
4830 struct operand_error_record
4831 {
4832 const aarch64_opcode *opcode;
4833 aarch64_operand_error detail;
4834 struct operand_error_record *next;
4835 };
4836
4837 typedef struct operand_error_record operand_error_record;
4838
4839 struct operand_errors
4840 {
4841 operand_error_record *head;
4842 operand_error_record *tail;
4843 };
4844
4845 typedef struct operand_errors operand_errors;
4846
4847 /* Top-level data structure reporting user errors for the current line of
4848 the assembly code.
4849 The way md_assemble works is that all opcodes sharing the same mnemonic
4850 name are iterated to find a match to the assembly line. In this data
4851 structure, each of the such opcodes will have one operand_error_record
4852 allocated and inserted. In other words, excessive errors related with
4853 a single opcode are disregarded. */
4854 operand_errors operand_error_report;
4855
4856 /* Free record nodes. */
4857 static operand_error_record *free_opnd_error_record_nodes = NULL;
4858
4859 /* Initialize the data structure that stores the operand mismatch
4860 information on assembling one line of the assembly code. */
4861 static void
4862 init_operand_error_report (void)
4863 {
4864 if (operand_error_report.head != NULL)
4865 {
4866 gas_assert (operand_error_report.tail != NULL);
4867 operand_error_report.tail->next = free_opnd_error_record_nodes;
4868 free_opnd_error_record_nodes = operand_error_report.head;
4869 operand_error_report.head = NULL;
4870 operand_error_report.tail = NULL;
4871 return;
4872 }
4873 gas_assert (operand_error_report.tail == NULL);
4874 }
4875
4876 /* Return TRUE if some operand error has been recorded during the
4877 parsing of the current assembly line using the opcode *OPCODE;
4878 otherwise return FALSE. */
4879 static inline bool
4880 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4881 {
4882 operand_error_record *record = operand_error_report.head;
4883 return record && record->opcode == opcode;
4884 }
4885
4886 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4887 OPCODE field is initialized with OPCODE.
4888 N.B. only one record for each opcode, i.e. the maximum of one error is
4889 recorded for each instruction template. */
4890
4891 static void
4892 add_operand_error_record (const operand_error_record* new_record)
4893 {
4894 const aarch64_opcode *opcode = new_record->opcode;
4895 operand_error_record* record = operand_error_report.head;
4896
4897 /* The record may have been created for this opcode. If not, we need
4898 to prepare one. */
4899 if (! opcode_has_operand_error_p (opcode))
4900 {
4901 /* Get one empty record. */
4902 if (free_opnd_error_record_nodes == NULL)
4903 {
4904 record = XNEW (operand_error_record);
4905 }
4906 else
4907 {
4908 record = free_opnd_error_record_nodes;
4909 free_opnd_error_record_nodes = record->next;
4910 }
4911 record->opcode = opcode;
4912 /* Insert at the head. */
4913 record->next = operand_error_report.head;
4914 operand_error_report.head = record;
4915 if (operand_error_report.tail == NULL)
4916 operand_error_report.tail = record;
4917 }
4918 else if (record->detail.kind != AARCH64_OPDE_NIL
4919 && record->detail.index <= new_record->detail.index
4920 && operand_error_higher_severity_p (record->detail.kind,
4921 new_record->detail.kind))
4922 {
4923 /* In the case of multiple errors found on operands related with a
4924 single opcode, only record the error of the leftmost operand and
4925 only if the error is of higher severity. */
4926 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4927 " the existing error %s on operand %d",
4928 operand_mismatch_kind_names[new_record->detail.kind],
4929 new_record->detail.index,
4930 operand_mismatch_kind_names[record->detail.kind],
4931 record->detail.index);
4932 return;
4933 }
4934
4935 record->detail = new_record->detail;
4936 }
4937
4938 static inline void
4939 record_operand_error_info (const aarch64_opcode *opcode,
4940 aarch64_operand_error *error_info)
4941 {
4942 operand_error_record record;
4943 record.opcode = opcode;
4944 record.detail = *error_info;
4945 add_operand_error_record (&record);
4946 }
4947
4948 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4949 error message *ERROR, for operand IDX (count from 0). */
4950
4951 static void
4952 record_operand_error (const aarch64_opcode *opcode, int idx,
4953 enum aarch64_operand_error_kind kind,
4954 const char* error)
4955 {
4956 aarch64_operand_error info;
4957 memset(&info, 0, sizeof (info));
4958 info.index = idx;
4959 info.kind = kind;
4960 info.error = error;
4961 info.non_fatal = false;
4962 record_operand_error_info (opcode, &info);
4963 }
4964
4965 static void
4966 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4967 enum aarch64_operand_error_kind kind,
4968 const char* error, const int *extra_data)
4969 {
4970 aarch64_operand_error info;
4971 info.index = idx;
4972 info.kind = kind;
4973 info.error = error;
4974 info.data[0] = extra_data[0];
4975 info.data[1] = extra_data[1];
4976 info.data[2] = extra_data[2];
4977 info.non_fatal = false;
4978 record_operand_error_info (opcode, &info);
4979 }
4980
4981 static void
4982 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4983 const char* error, int lower_bound,
4984 int upper_bound)
4985 {
4986 int data[3] = {lower_bound, upper_bound, 0};
4987 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4988 error, data);
4989 }
4990
4991 /* Remove the operand error record for *OPCODE. */
4992 static void ATTRIBUTE_UNUSED
4993 remove_operand_error_record (const aarch64_opcode *opcode)
4994 {
4995 if (opcode_has_operand_error_p (opcode))
4996 {
4997 operand_error_record* record = operand_error_report.head;
4998 gas_assert (record != NULL && operand_error_report.tail != NULL);
4999 operand_error_report.head = record->next;
5000 record->next = free_opnd_error_record_nodes;
5001 free_opnd_error_record_nodes = record;
5002 if (operand_error_report.head == NULL)
5003 {
5004 gas_assert (operand_error_report.tail == record);
5005 operand_error_report.tail = NULL;
5006 }
5007 }
5008 }
5009
5010 /* Given the instruction in *INSTR, return the index of the best matched
5011 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5012
5013 Return -1 if there is no qualifier sequence; return the first match
5014 if there is multiple matches found. */
5015
5016 static int
5017 find_best_match (const aarch64_inst *instr,
5018 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5019 {
5020 int i, num_opnds, max_num_matched, idx;
5021
5022 num_opnds = aarch64_num_of_operands (instr->opcode);
5023 if (num_opnds == 0)
5024 {
5025 DEBUG_TRACE ("no operand");
5026 return -1;
5027 }
5028
5029 max_num_matched = 0;
5030 idx = 0;
5031
5032 /* For each pattern. */
5033 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5034 {
5035 int j, num_matched;
5036 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5037
5038 /* Most opcodes has much fewer patterns in the list. */
5039 if (empty_qualifier_sequence_p (qualifiers))
5040 {
5041 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5042 break;
5043 }
5044
5045 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5046 if (*qualifiers == instr->operands[j].qualifier)
5047 ++num_matched;
5048
5049 if (num_matched > max_num_matched)
5050 {
5051 max_num_matched = num_matched;
5052 idx = i;
5053 }
5054 }
5055
5056 DEBUG_TRACE ("return with %d", idx);
5057 return idx;
5058 }
5059
5060 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5061 corresponding operands in *INSTR. */
5062
5063 static inline void
5064 assign_qualifier_sequence (aarch64_inst *instr,
5065 const aarch64_opnd_qualifier_t *qualifiers)
5066 {
5067 int i = 0;
5068 int num_opnds = aarch64_num_of_operands (instr->opcode);
5069 gas_assert (num_opnds);
5070 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5071 instr->operands[i].qualifier = *qualifiers;
5072 }
5073
5074 /* Print operands for the diagnosis purpose. */
5075
5076 static void
5077 print_operands (char *buf, const aarch64_opcode *opcode,
5078 const aarch64_opnd_info *opnds)
5079 {
5080 int i;
5081
5082 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5083 {
5084 char str[128];
5085
5086 /* We regard the opcode operand info more, however we also look into
5087 the inst->operands to support the disassembling of the optional
5088 operand.
5089 The two operand code should be the same in all cases, apart from
5090 when the operand can be optional. */
5091 if (opcode->operands[i] == AARCH64_OPND_NIL
5092 || opnds[i].type == AARCH64_OPND_NIL)
5093 break;
5094
5095 /* Generate the operand string in STR. */
5096 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5097 NULL, cpu_variant);
5098
5099 /* Delimiter. */
5100 if (str[0] != '\0')
5101 strcat (buf, i == 0 ? " " : ", ");
5102
5103 /* Append the operand string. */
5104 strcat (buf, str);
5105 }
5106 }
5107
5108 /* Send to stderr a string as information. */
5109
5110 static void
5111 output_info (const char *format, ...)
5112 {
5113 const char *file;
5114 unsigned int line;
5115 va_list args;
5116
5117 file = as_where (&line);
5118 if (file)
5119 {
5120 if (line != 0)
5121 fprintf (stderr, "%s:%u: ", file, line);
5122 else
5123 fprintf (stderr, "%s: ", file);
5124 }
5125 fprintf (stderr, _("Info: "));
5126 va_start (args, format);
5127 vfprintf (stderr, format, args);
5128 va_end (args);
5129 (void) putc ('\n', stderr);
5130 }
5131
5132 /* Output one operand error record. */
5133
5134 static void
5135 output_operand_error_record (const operand_error_record *record, char *str)
5136 {
5137 const aarch64_operand_error *detail = &record->detail;
5138 int idx = detail->index;
5139 const aarch64_opcode *opcode = record->opcode;
5140 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5141 : AARCH64_OPND_NIL);
5142
5143 typedef void (*handler_t)(const char *format, ...);
5144 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5145
5146 switch (detail->kind)
5147 {
5148 case AARCH64_OPDE_NIL:
5149 gas_assert (0);
5150 break;
5151 case AARCH64_OPDE_SYNTAX_ERROR:
5152 case AARCH64_OPDE_RECOVERABLE:
5153 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5154 case AARCH64_OPDE_OTHER_ERROR:
5155 /* Use the prepared error message if there is, otherwise use the
5156 operand description string to describe the error. */
5157 if (detail->error != NULL)
5158 {
5159 if (idx < 0)
5160 handler (_("%s -- `%s'"), detail->error, str);
5161 else
5162 handler (_("%s at operand %d -- `%s'"),
5163 detail->error, idx + 1, str);
5164 }
5165 else
5166 {
5167 gas_assert (idx >= 0);
5168 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5169 aarch64_get_operand_desc (opd_code), str);
5170 }
5171 break;
5172
5173 case AARCH64_OPDE_INVALID_VARIANT:
5174 handler (_("operand mismatch -- `%s'"), str);
5175 if (verbose_error_p)
5176 {
5177 /* We will try to correct the erroneous instruction and also provide
5178 more information e.g. all other valid variants.
5179
5180 The string representation of the corrected instruction and other
5181 valid variants are generated by
5182
5183 1) obtaining the intermediate representation of the erroneous
5184 instruction;
5185 2) manipulating the IR, e.g. replacing the operand qualifier;
5186 3) printing out the instruction by calling the printer functions
5187 shared with the disassembler.
5188
5189 The limitation of this method is that the exact input assembly
5190 line cannot be accurately reproduced in some cases, for example an
5191 optional operand present in the actual assembly line will be
5192 omitted in the output; likewise for the optional syntax rules,
5193 e.g. the # before the immediate. Another limitation is that the
5194 assembly symbols and relocation operations in the assembly line
5195 currently cannot be printed out in the error report. Last but not
5196 least, when there is other error(s) co-exist with this error, the
5197 'corrected' instruction may be still incorrect, e.g. given
5198 'ldnp h0,h1,[x0,#6]!'
5199 this diagnosis will provide the version:
5200 'ldnp s0,s1,[x0,#6]!'
5201 which is still not right. */
5202 size_t len = strlen (get_mnemonic_name (str));
5203 int i, qlf_idx;
5204 bool result;
5205 char buf[2048];
5206 aarch64_inst *inst_base = &inst.base;
5207 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5208
5209 /* Init inst. */
5210 reset_aarch64_instruction (&inst);
5211 inst_base->opcode = opcode;
5212
5213 /* Reset the error report so that there is no side effect on the
5214 following operand parsing. */
5215 init_operand_error_report ();
5216
5217 /* Fill inst. */
5218 result = parse_operands (str + len, opcode)
5219 && programmer_friendly_fixup (&inst);
5220 gas_assert (result);
5221 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5222 NULL, NULL, insn_sequence);
5223 gas_assert (!result);
5224
5225 /* Find the most matched qualifier sequence. */
5226 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5227 gas_assert (qlf_idx > -1);
5228
5229 /* Assign the qualifiers. */
5230 assign_qualifier_sequence (inst_base,
5231 opcode->qualifiers_list[qlf_idx]);
5232
5233 /* Print the hint. */
5234 output_info (_(" did you mean this?"));
5235 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5236 print_operands (buf, opcode, inst_base->operands);
5237 output_info (_(" %s"), buf);
5238
5239 /* Print out other variant(s) if there is any. */
5240 if (qlf_idx != 0 ||
5241 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5242 output_info (_(" other valid variant(s):"));
5243
5244 /* For each pattern. */
5245 qualifiers_list = opcode->qualifiers_list;
5246 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5247 {
5248 /* Most opcodes has much fewer patterns in the list.
5249 First NIL qualifier indicates the end in the list. */
5250 if (empty_qualifier_sequence_p (*qualifiers_list))
5251 break;
5252
5253 if (i != qlf_idx)
5254 {
5255 /* Mnemonics name. */
5256 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5257
5258 /* Assign the qualifiers. */
5259 assign_qualifier_sequence (inst_base, *qualifiers_list);
5260
5261 /* Print instruction. */
5262 print_operands (buf, opcode, inst_base->operands);
5263
5264 output_info (_(" %s"), buf);
5265 }
5266 }
5267 }
5268 break;
5269
5270 case AARCH64_OPDE_UNTIED_OPERAND:
5271 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5272 detail->index + 1, str);
5273 break;
5274
5275 case AARCH64_OPDE_OUT_OF_RANGE:
5276 if (detail->data[0] != detail->data[1])
5277 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5278 detail->error ? detail->error : _("immediate value"),
5279 detail->data[0], detail->data[1], idx + 1, str);
5280 else
5281 handler (_("%s must be %d at operand %d -- `%s'"),
5282 detail->error ? detail->error : _("immediate value"),
5283 detail->data[0], idx + 1, str);
5284 break;
5285
5286 case AARCH64_OPDE_REG_LIST:
5287 if (detail->data[0] == 1)
5288 handler (_("invalid number of registers in the list; "
5289 "only 1 register is expected at operand %d -- `%s'"),
5290 idx + 1, str);
5291 else
5292 handler (_("invalid number of registers in the list; "
5293 "%d registers are expected at operand %d -- `%s'"),
5294 detail->data[0], idx + 1, str);
5295 break;
5296
5297 case AARCH64_OPDE_UNALIGNED:
5298 handler (_("immediate value must be a multiple of "
5299 "%d at operand %d -- `%s'"),
5300 detail->data[0], idx + 1, str);
5301 break;
5302
5303 default:
5304 gas_assert (0);
5305 break;
5306 }
5307 }
5308
5309 /* Process and output the error message about the operand mismatching.
5310
5311 When this function is called, the operand error information had
5312 been collected for an assembly line and there will be multiple
5313 errors in the case of multiple instruction templates; output the
5314 error message that most closely describes the problem.
5315
5316 The errors to be printed can be filtered on printing all errors
5317 or only non-fatal errors. This distinction has to be made because
5318 the error buffer may already be filled with fatal errors we don't want to
5319 print due to the different instruction templates. */
5320
5321 static void
5322 output_operand_error_report (char *str, bool non_fatal_only)
5323 {
5324 int largest_error_pos;
5325 const char *msg = NULL;
5326 enum aarch64_operand_error_kind kind;
5327 operand_error_record *curr;
5328 operand_error_record *head = operand_error_report.head;
5329 operand_error_record *record = NULL;
5330
5331 /* No error to report. */
5332 if (head == NULL)
5333 return;
5334
5335 gas_assert (head != NULL && operand_error_report.tail != NULL);
5336
5337 /* Only one error. */
5338 if (head == operand_error_report.tail)
5339 {
5340 /* If the only error is a non-fatal one and we don't want to print it,
5341 just exit. */
5342 if (!non_fatal_only || head->detail.non_fatal)
5343 {
5344 DEBUG_TRACE ("single opcode entry with error kind: %s",
5345 operand_mismatch_kind_names[head->detail.kind]);
5346 output_operand_error_record (head, str);
5347 }
5348 return;
5349 }
5350
5351 /* Find the error kind of the highest severity. */
5352 DEBUG_TRACE ("multiple opcode entries with error kind");
5353 kind = AARCH64_OPDE_NIL;
5354 for (curr = head; curr != NULL; curr = curr->next)
5355 {
5356 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5357 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5358 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5359 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5360 kind = curr->detail.kind;
5361 }
5362
5363 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5364
5365 /* Pick up one of errors of KIND to report. */
5366 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5367 for (curr = head; curr != NULL; curr = curr->next)
5368 {
5369 /* If we don't want to print non-fatal errors then don't consider them
5370 at all. */
5371 if (curr->detail.kind != kind
5372 || (non_fatal_only && !curr->detail.non_fatal))
5373 continue;
5374 /* If there are multiple errors, pick up the one with the highest
5375 mismatching operand index. In the case of multiple errors with
5376 the equally highest operand index, pick up the first one or the
5377 first one with non-NULL error message. */
5378 if (curr->detail.index > largest_error_pos
5379 || (curr->detail.index == largest_error_pos && msg == NULL
5380 && curr->detail.error != NULL))
5381 {
5382 largest_error_pos = curr->detail.index;
5383 record = curr;
5384 msg = record->detail.error;
5385 }
5386 }
5387
5388 /* The way errors are collected in the back-end is a bit non-intuitive. But
5389 essentially, because each operand template is tried recursively you may
5390 always have errors collected from the previous tried OPND. These are
5391 usually skipped if there is one successful match. However now with the
5392 non-fatal errors we have to ignore those previously collected hard errors
5393 when we're only interested in printing the non-fatal ones. This condition
5394 prevents us from printing errors that are not appropriate, since we did
5395 match a condition, but it also has warnings that it wants to print. */
5396 if (non_fatal_only && !record)
5397 return;
5398
5399 gas_assert (largest_error_pos != -2 && record != NULL);
5400 DEBUG_TRACE ("Pick up error kind %s to report",
5401 operand_mismatch_kind_names[record->detail.kind]);
5402
5403 /* Output. */
5404 output_operand_error_record (record, str);
5405 }
5406 \f
5407 /* Write an AARCH64 instruction to buf - always little-endian. */
5408 static void
5409 put_aarch64_insn (char *buf, uint32_t insn)
5410 {
5411 unsigned char *where = (unsigned char *) buf;
5412 where[0] = insn;
5413 where[1] = insn >> 8;
5414 where[2] = insn >> 16;
5415 where[3] = insn >> 24;
5416 }
5417
5418 static uint32_t
5419 get_aarch64_insn (char *buf)
5420 {
5421 unsigned char *where = (unsigned char *) buf;
5422 uint32_t result;
5423 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5424 | ((uint32_t) where[3] << 24)));
5425 return result;
5426 }
5427
5428 static void
5429 output_inst (struct aarch64_inst *new_inst)
5430 {
5431 char *to = NULL;
5432
5433 to = frag_more (INSN_SIZE);
5434
5435 frag_now->tc_frag_data.recorded = 1;
5436
5437 put_aarch64_insn (to, inst.base.value);
5438
5439 if (inst.reloc.type != BFD_RELOC_UNUSED)
5440 {
5441 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5442 INSN_SIZE, &inst.reloc.exp,
5443 inst.reloc.pc_rel,
5444 inst.reloc.type);
5445 DEBUG_TRACE ("Prepared relocation fix up");
5446 /* Don't check the addend value against the instruction size,
5447 that's the job of our code in md_apply_fix(). */
5448 fixp->fx_no_overflow = 1;
5449 if (new_inst != NULL)
5450 fixp->tc_fix_data.inst = new_inst;
5451 if (aarch64_gas_internal_fixup_p ())
5452 {
5453 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5454 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5455 fixp->fx_addnumber = inst.reloc.flags;
5456 }
5457 }
5458
5459 dwarf2_emit_insn (INSN_SIZE);
5460 }
5461
5462 /* Link together opcodes of the same name. */
5463
5464 struct templates
5465 {
5466 const aarch64_opcode *opcode;
5467 struct templates *next;
5468 };
5469
5470 typedef struct templates templates;
5471
5472 static templates *
5473 lookup_mnemonic (const char *start, int len)
5474 {
5475 templates *templ = NULL;
5476
5477 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5478 return templ;
5479 }
5480
5481 /* Subroutine of md_assemble, responsible for looking up the primary
5482 opcode from the mnemonic the user wrote. STR points to the
5483 beginning of the mnemonic. */
5484
5485 static templates *
5486 opcode_lookup (char **str)
5487 {
5488 char *end, *base, *dot;
5489 const aarch64_cond *cond;
5490 char condname[16];
5491 int len;
5492
5493 /* Scan up to the end of the mnemonic, which must end in white space,
5494 '.', or end of string. */
5495 dot = 0;
5496 for (base = end = *str; is_part_of_name(*end); end++)
5497 if (*end == '.' && !dot)
5498 dot = end;
5499
5500 if (end == base || dot == base)
5501 return 0;
5502
5503 inst.cond = COND_ALWAYS;
5504
5505 /* Handle a possible condition. */
5506 if (dot)
5507 {
5508 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5509 if (cond)
5510 {
5511 inst.cond = cond->value;
5512 *str = end;
5513 }
5514 else
5515 {
5516 *str = dot;
5517 return 0;
5518 }
5519 len = dot - base;
5520 }
5521 else
5522 {
5523 *str = end;
5524 len = end - base;
5525 }
5526
5527 if (inst.cond == COND_ALWAYS)
5528 {
5529 /* Look for unaffixed mnemonic. */
5530 return lookup_mnemonic (base, len);
5531 }
5532 else if (len <= 13)
5533 {
5534 /* append ".c" to mnemonic if conditional */
5535 memcpy (condname, base, len);
5536 memcpy (condname + len, ".c", 2);
5537 base = condname;
5538 len += 2;
5539 return lookup_mnemonic (base, len);
5540 }
5541
5542 return NULL;
5543 }
5544
5545 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5546 to a corresponding operand qualifier. */
5547
5548 static inline aarch64_opnd_qualifier_t
5549 vectype_to_qualifier (const struct vector_type_el *vectype)
5550 {
5551 /* Element size in bytes indexed by vector_el_type. */
5552 const unsigned char ele_size[5]
5553 = {1, 2, 4, 8, 16};
5554 const unsigned int ele_base [5] =
5555 {
5556 AARCH64_OPND_QLF_V_4B,
5557 AARCH64_OPND_QLF_V_2H,
5558 AARCH64_OPND_QLF_V_2S,
5559 AARCH64_OPND_QLF_V_1D,
5560 AARCH64_OPND_QLF_V_1Q
5561 };
5562
5563 if (!vectype->defined || vectype->type == NT_invtype)
5564 goto vectype_conversion_fail;
5565
5566 if (vectype->type == NT_zero)
5567 return AARCH64_OPND_QLF_P_Z;
5568 if (vectype->type == NT_merge)
5569 return AARCH64_OPND_QLF_P_M;
5570
5571 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5572
5573 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5574 {
5575 /* Special case S_4B. */
5576 if (vectype->type == NT_b && vectype->width == 4)
5577 return AARCH64_OPND_QLF_S_4B;
5578
5579 /* Special case S_2H. */
5580 if (vectype->type == NT_h && vectype->width == 2)
5581 return AARCH64_OPND_QLF_S_2H;
5582
5583 /* Vector element register. */
5584 return AARCH64_OPND_QLF_S_B + vectype->type;
5585 }
5586 else
5587 {
5588 /* Vector register. */
5589 int reg_size = ele_size[vectype->type] * vectype->width;
5590 unsigned offset;
5591 unsigned shift;
5592 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5593 goto vectype_conversion_fail;
5594
5595 /* The conversion is by calculating the offset from the base operand
5596 qualifier for the vector type. The operand qualifiers are regular
5597 enough that the offset can established by shifting the vector width by
5598 a vector-type dependent amount. */
5599 shift = 0;
5600 if (vectype->type == NT_b)
5601 shift = 3;
5602 else if (vectype->type == NT_h || vectype->type == NT_s)
5603 shift = 2;
5604 else if (vectype->type >= NT_d)
5605 shift = 1;
5606 else
5607 gas_assert (0);
5608
5609 offset = ele_base [vectype->type] + (vectype->width >> shift);
5610 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5611 && offset <= AARCH64_OPND_QLF_V_1Q);
5612 return offset;
5613 }
5614
5615 vectype_conversion_fail:
5616 first_error (_("bad vector arrangement type"));
5617 return AARCH64_OPND_QLF_NIL;
5618 }
5619
5620 /* Process an optional operand that is found omitted from the assembly line.
5621 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5622 instruction's opcode entry while IDX is the index of this omitted operand.
5623 */
5624
5625 static void
5626 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5627 int idx, aarch64_opnd_info *operand)
5628 {
5629 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5630 gas_assert (optional_operand_p (opcode, idx));
5631 gas_assert (!operand->present);
5632
5633 switch (type)
5634 {
5635 case AARCH64_OPND_Rd:
5636 case AARCH64_OPND_Rn:
5637 case AARCH64_OPND_Rm:
5638 case AARCH64_OPND_Rt:
5639 case AARCH64_OPND_Rt2:
5640 case AARCH64_OPND_Rt_LS64:
5641 case AARCH64_OPND_Rt_SP:
5642 case AARCH64_OPND_Rs:
5643 case AARCH64_OPND_Ra:
5644 case AARCH64_OPND_Rt_SYS:
5645 case AARCH64_OPND_Rd_SP:
5646 case AARCH64_OPND_Rn_SP:
5647 case AARCH64_OPND_Rm_SP:
5648 case AARCH64_OPND_Fd:
5649 case AARCH64_OPND_Fn:
5650 case AARCH64_OPND_Fm:
5651 case AARCH64_OPND_Fa:
5652 case AARCH64_OPND_Ft:
5653 case AARCH64_OPND_Ft2:
5654 case AARCH64_OPND_Sd:
5655 case AARCH64_OPND_Sn:
5656 case AARCH64_OPND_Sm:
5657 case AARCH64_OPND_Va:
5658 case AARCH64_OPND_Vd:
5659 case AARCH64_OPND_Vn:
5660 case AARCH64_OPND_Vm:
5661 case AARCH64_OPND_VdD1:
5662 case AARCH64_OPND_VnD1:
5663 operand->reg.regno = default_value;
5664 break;
5665
5666 case AARCH64_OPND_Ed:
5667 case AARCH64_OPND_En:
5668 case AARCH64_OPND_Em:
5669 case AARCH64_OPND_Em16:
5670 case AARCH64_OPND_SM3_IMM2:
5671 operand->reglane.regno = default_value;
5672 break;
5673
5674 case AARCH64_OPND_IDX:
5675 case AARCH64_OPND_BIT_NUM:
5676 case AARCH64_OPND_IMMR:
5677 case AARCH64_OPND_IMMS:
5678 case AARCH64_OPND_SHLL_IMM:
5679 case AARCH64_OPND_IMM_VLSL:
5680 case AARCH64_OPND_IMM_VLSR:
5681 case AARCH64_OPND_CCMP_IMM:
5682 case AARCH64_OPND_FBITS:
5683 case AARCH64_OPND_UIMM4:
5684 case AARCH64_OPND_UIMM3_OP1:
5685 case AARCH64_OPND_UIMM3_OP2:
5686 case AARCH64_OPND_IMM:
5687 case AARCH64_OPND_IMM_2:
5688 case AARCH64_OPND_WIDTH:
5689 case AARCH64_OPND_UIMM7:
5690 case AARCH64_OPND_NZCV:
5691 case AARCH64_OPND_SVE_PATTERN:
5692 case AARCH64_OPND_SVE_PRFOP:
5693 operand->imm.value = default_value;
5694 break;
5695
5696 case AARCH64_OPND_SVE_PATTERN_SCALED:
5697 operand->imm.value = default_value;
5698 operand->shifter.kind = AARCH64_MOD_MUL;
5699 operand->shifter.amount = 1;
5700 break;
5701
5702 case AARCH64_OPND_EXCEPTION:
5703 inst.reloc.type = BFD_RELOC_UNUSED;
5704 break;
5705
5706 case AARCH64_OPND_BARRIER_ISB:
5707 operand->barrier = aarch64_barrier_options + default_value;
5708 break;
5709
5710 case AARCH64_OPND_BTI_TARGET:
5711 operand->hint_option = aarch64_hint_options + default_value;
5712 break;
5713
5714 default:
5715 break;
5716 }
5717 }
5718
5719 /* Process the relocation type for move wide instructions.
5720 Return TRUE on success; otherwise return FALSE. */
5721
5722 static bool
5723 process_movw_reloc_info (void)
5724 {
5725 int is32;
5726 unsigned shift;
5727
5728 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5729
5730 if (inst.base.opcode->op == OP_MOVK)
5731 switch (inst.reloc.type)
5732 {
5733 case BFD_RELOC_AARCH64_MOVW_G0_S:
5734 case BFD_RELOC_AARCH64_MOVW_G1_S:
5735 case BFD_RELOC_AARCH64_MOVW_G2_S:
5736 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5737 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5738 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5739 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5740 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5741 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5742 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5743 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5744 set_syntax_error
5745 (_("the specified relocation type is not allowed for MOVK"));
5746 return false;
5747 default:
5748 break;
5749 }
5750
5751 switch (inst.reloc.type)
5752 {
5753 case BFD_RELOC_AARCH64_MOVW_G0:
5754 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5755 case BFD_RELOC_AARCH64_MOVW_G0_S:
5756 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5757 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5758 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5759 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5760 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5761 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5762 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5763 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5764 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5765 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5766 shift = 0;
5767 break;
5768 case BFD_RELOC_AARCH64_MOVW_G1:
5769 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5770 case BFD_RELOC_AARCH64_MOVW_G1_S:
5771 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5772 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5773 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5774 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5775 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5776 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5777 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5778 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5779 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5780 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5781 shift = 16;
5782 break;
5783 case BFD_RELOC_AARCH64_MOVW_G2:
5784 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5785 case BFD_RELOC_AARCH64_MOVW_G2_S:
5786 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5787 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5788 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5789 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5790 if (is32)
5791 {
5792 set_fatal_syntax_error
5793 (_("the specified relocation type is not allowed for 32-bit "
5794 "register"));
5795 return false;
5796 }
5797 shift = 32;
5798 break;
5799 case BFD_RELOC_AARCH64_MOVW_G3:
5800 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5801 if (is32)
5802 {
5803 set_fatal_syntax_error
5804 (_("the specified relocation type is not allowed for 32-bit "
5805 "register"));
5806 return false;
5807 }
5808 shift = 48;
5809 break;
5810 default:
5811 /* More cases should be added when more MOVW-related relocation types
5812 are supported in GAS. */
5813 gas_assert (aarch64_gas_internal_fixup_p ());
5814 /* The shift amount should have already been set by the parser. */
5815 return true;
5816 }
5817 inst.base.operands[1].shifter.amount = shift;
5818 return true;
5819 }
5820
5821 /* A primitive log calculator. */
5822
5823 static inline unsigned int
5824 get_logsz (unsigned int size)
5825 {
5826 const unsigned char ls[16] =
5827 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5828 if (size > 16)
5829 {
5830 gas_assert (0);
5831 return -1;
5832 }
5833 gas_assert (ls[size - 1] != (unsigned char)-1);
5834 return ls[size - 1];
5835 }
5836
5837 /* Determine and return the real reloc type code for an instruction
5838 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5839
5840 static inline bfd_reloc_code_real_type
5841 ldst_lo12_determine_real_reloc_type (void)
5842 {
5843 unsigned logsz, max_logsz;
5844 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5845 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5846
5847 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5848 {
5849 BFD_RELOC_AARCH64_LDST8_LO12,
5850 BFD_RELOC_AARCH64_LDST16_LO12,
5851 BFD_RELOC_AARCH64_LDST32_LO12,
5852 BFD_RELOC_AARCH64_LDST64_LO12,
5853 BFD_RELOC_AARCH64_LDST128_LO12
5854 },
5855 {
5856 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5857 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5858 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5859 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5860 BFD_RELOC_AARCH64_NONE
5861 },
5862 {
5863 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5864 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5865 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5866 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5867 BFD_RELOC_AARCH64_NONE
5868 },
5869 {
5870 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5871 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5872 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5873 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5874 BFD_RELOC_AARCH64_NONE
5875 },
5876 {
5877 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5878 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5879 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5880 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5881 BFD_RELOC_AARCH64_NONE
5882 }
5883 };
5884
5885 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5886 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5887 || (inst.reloc.type
5888 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5889 || (inst.reloc.type
5890 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5891 || (inst.reloc.type
5892 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5893 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5894
5895 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5896 opd1_qlf =
5897 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5898 1, opd0_qlf, 0);
5899 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5900
5901 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5902
5903 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5904 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5905 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5906 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5907 max_logsz = 3;
5908 else
5909 max_logsz = 4;
5910
5911 if (logsz > max_logsz)
5912 {
5913 /* SEE PR 27904 for an example of this. */
5914 set_fatal_syntax_error
5915 (_("relocation qualifier does not match instruction size"));
5916 return BFD_RELOC_AARCH64_NONE;
5917 }
5918
5919 /* In reloc.c, these pseudo relocation types should be defined in similar
5920 order as above reloc_ldst_lo12 array. Because the array index calculation
5921 below relies on this. */
5922 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5923 }
5924
5925 /* Check whether a register list REGINFO is valid. The registers must be
5926 numbered in increasing order (modulo 32), in increments of one or two.
5927
5928 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5929 increments of two.
5930
5931 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5932
5933 static bool
5934 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5935 {
5936 uint32_t i, nb_regs, prev_regno, incr;
5937
5938 nb_regs = 1 + (reginfo & 0x3);
5939 reginfo >>= 2;
5940 prev_regno = reginfo & 0x1f;
5941 incr = accept_alternate ? 2 : 1;
5942
5943 for (i = 1; i < nb_regs; ++i)
5944 {
5945 uint32_t curr_regno;
5946 reginfo >>= 5;
5947 curr_regno = reginfo & 0x1f;
5948 if (curr_regno != ((prev_regno + incr) & 0x1f))
5949 return false;
5950 prev_regno = curr_regno;
5951 }
5952
5953 return true;
5954 }
5955
5956 /* Generic instruction operand parser. This does no encoding and no
5957 semantic validation; it merely squirrels values away in the inst
5958 structure. Returns TRUE or FALSE depending on whether the
5959 specified grammar matched. */
5960
5961 static bool
5962 parse_operands (char *str, const aarch64_opcode *opcode)
5963 {
5964 int i;
5965 char *backtrack_pos = 0;
5966 const enum aarch64_opnd *operands = opcode->operands;
5967 aarch64_reg_type imm_reg_type;
5968
5969 clear_error ();
5970 skip_whitespace (str);
5971
5972 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5973 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5974 else
5975 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5976
5977 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5978 {
5979 int64_t val;
5980 const reg_entry *reg;
5981 int comma_skipped_p = 0;
5982 aarch64_reg_type rtype;
5983 struct vector_type_el vectype;
5984 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5985 aarch64_opnd_info *info = &inst.base.operands[i];
5986 aarch64_reg_type reg_type;
5987
5988 DEBUG_TRACE ("parse operand %d", i);
5989
5990 /* Assign the operand code. */
5991 info->type = operands[i];
5992
5993 if (optional_operand_p (opcode, i))
5994 {
5995 /* Remember where we are in case we need to backtrack. */
5996 gas_assert (!backtrack_pos);
5997 backtrack_pos = str;
5998 }
5999
6000 /* Expect comma between operands; the backtrack mechanism will take
6001 care of cases of omitted optional operand. */
6002 if (i > 0 && ! skip_past_char (&str, ','))
6003 {
6004 set_syntax_error (_("comma expected between operands"));
6005 goto failure;
6006 }
6007 else
6008 comma_skipped_p = 1;
6009
6010 switch (operands[i])
6011 {
6012 case AARCH64_OPND_Rd:
6013 case AARCH64_OPND_Rn:
6014 case AARCH64_OPND_Rm:
6015 case AARCH64_OPND_Rt:
6016 case AARCH64_OPND_Rt2:
6017 case AARCH64_OPND_Rs:
6018 case AARCH64_OPND_Ra:
6019 case AARCH64_OPND_Rt_LS64:
6020 case AARCH64_OPND_Rt_SYS:
6021 case AARCH64_OPND_PAIRREG:
6022 case AARCH64_OPND_SVE_Rm:
6023 po_int_reg_or_fail (REG_TYPE_R_Z);
6024
6025 /* In LS64 load/store instructions Rt register number must be even
6026 and <=22. */
6027 if (operands[i] == AARCH64_OPND_Rt_LS64)
6028 {
6029 /* We've already checked if this is valid register.
6030 This will check if register number (Rt) is not undefined for LS64
6031 instructions:
6032 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6033 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6034 {
6035 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6036 goto failure;
6037 }
6038 }
6039 break;
6040
6041 case AARCH64_OPND_Rd_SP:
6042 case AARCH64_OPND_Rn_SP:
6043 case AARCH64_OPND_Rt_SP:
6044 case AARCH64_OPND_SVE_Rn_SP:
6045 case AARCH64_OPND_Rm_SP:
6046 po_int_reg_or_fail (REG_TYPE_R_SP);
6047 break;
6048
6049 case AARCH64_OPND_Rm_EXT:
6050 case AARCH64_OPND_Rm_SFT:
6051 po_misc_or_fail (parse_shifter_operand
6052 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6053 ? SHIFTED_ARITH_IMM
6054 : SHIFTED_LOGIC_IMM)));
6055 if (!info->shifter.operator_present)
6056 {
6057 /* Default to LSL if not present. Libopcodes prefers shifter
6058 kind to be explicit. */
6059 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6060 info->shifter.kind = AARCH64_MOD_LSL;
6061 /* For Rm_EXT, libopcodes will carry out further check on whether
6062 or not stack pointer is used in the instruction (Recall that
6063 "the extend operator is not optional unless at least one of
6064 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6065 }
6066 break;
6067
6068 case AARCH64_OPND_Fd:
6069 case AARCH64_OPND_Fn:
6070 case AARCH64_OPND_Fm:
6071 case AARCH64_OPND_Fa:
6072 case AARCH64_OPND_Ft:
6073 case AARCH64_OPND_Ft2:
6074 case AARCH64_OPND_Sd:
6075 case AARCH64_OPND_Sn:
6076 case AARCH64_OPND_Sm:
6077 case AARCH64_OPND_SVE_VZn:
6078 case AARCH64_OPND_SVE_Vd:
6079 case AARCH64_OPND_SVE_Vm:
6080 case AARCH64_OPND_SVE_Vn:
6081 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6082 if (val == PARSE_FAIL)
6083 {
6084 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6085 goto failure;
6086 }
6087 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6088
6089 info->reg.regno = val;
6090 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6091 break;
6092
6093 case AARCH64_OPND_SVE_Pd:
6094 case AARCH64_OPND_SVE_Pg3:
6095 case AARCH64_OPND_SVE_Pg4_5:
6096 case AARCH64_OPND_SVE_Pg4_10:
6097 case AARCH64_OPND_SVE_Pg4_16:
6098 case AARCH64_OPND_SVE_Pm:
6099 case AARCH64_OPND_SVE_Pn:
6100 case AARCH64_OPND_SVE_Pt:
6101 case AARCH64_OPND_SME_Pm:
6102 reg_type = REG_TYPE_PN;
6103 goto vector_reg;
6104
6105 case AARCH64_OPND_SVE_Za_5:
6106 case AARCH64_OPND_SVE_Za_16:
6107 case AARCH64_OPND_SVE_Zd:
6108 case AARCH64_OPND_SVE_Zm_5:
6109 case AARCH64_OPND_SVE_Zm_16:
6110 case AARCH64_OPND_SVE_Zn:
6111 case AARCH64_OPND_SVE_Zt:
6112 reg_type = REG_TYPE_ZN;
6113 goto vector_reg;
6114
6115 case AARCH64_OPND_Va:
6116 case AARCH64_OPND_Vd:
6117 case AARCH64_OPND_Vn:
6118 case AARCH64_OPND_Vm:
6119 reg_type = REG_TYPE_VN;
6120 vector_reg:
6121 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6122 if (val == PARSE_FAIL)
6123 {
6124 first_error (_(get_reg_expected_msg (reg_type)));
6125 goto failure;
6126 }
6127 if (vectype.defined & NTA_HASINDEX)
6128 goto failure;
6129
6130 info->reg.regno = val;
6131 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6132 && vectype.type == NT_invtype)
6133 /* Unqualified Pn and Zn registers are allowed in certain
6134 contexts. Rely on F_STRICT qualifier checking to catch
6135 invalid uses. */
6136 info->qualifier = AARCH64_OPND_QLF_NIL;
6137 else
6138 {
6139 info->qualifier = vectype_to_qualifier (&vectype);
6140 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6141 goto failure;
6142 }
6143 break;
6144
6145 case AARCH64_OPND_VdD1:
6146 case AARCH64_OPND_VnD1:
6147 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6148 if (val == PARSE_FAIL)
6149 {
6150 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6151 goto failure;
6152 }
6153 if (vectype.type != NT_d || vectype.index != 1)
6154 {
6155 set_fatal_syntax_error
6156 (_("the top half of a 128-bit FP/SIMD register is expected"));
6157 goto failure;
6158 }
6159 info->reg.regno = val;
6160 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6161 here; it is correct for the purpose of encoding/decoding since
6162 only the register number is explicitly encoded in the related
6163 instructions, although this appears a bit hacky. */
6164 info->qualifier = AARCH64_OPND_QLF_S_D;
6165 break;
6166
6167 case AARCH64_OPND_SVE_Zm3_INDEX:
6168 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6169 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6170 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6171 case AARCH64_OPND_SVE_Zm4_INDEX:
6172 case AARCH64_OPND_SVE_Zn_INDEX:
6173 reg_type = REG_TYPE_ZN;
6174 goto vector_reg_index;
6175
6176 case AARCH64_OPND_Ed:
6177 case AARCH64_OPND_En:
6178 case AARCH64_OPND_Em:
6179 case AARCH64_OPND_Em16:
6180 case AARCH64_OPND_SM3_IMM2:
6181 reg_type = REG_TYPE_VN;
6182 vector_reg_index:
6183 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6184 if (val == PARSE_FAIL)
6185 {
6186 first_error (_(get_reg_expected_msg (reg_type)));
6187 goto failure;
6188 }
6189 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6190 goto failure;
6191
6192 info->reglane.regno = val;
6193 info->reglane.index = vectype.index;
6194 info->qualifier = vectype_to_qualifier (&vectype);
6195 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6196 goto failure;
6197 break;
6198
6199 case AARCH64_OPND_SVE_ZnxN:
6200 case AARCH64_OPND_SVE_ZtxN:
6201 reg_type = REG_TYPE_ZN;
6202 goto vector_reg_list;
6203
6204 case AARCH64_OPND_LVn:
6205 case AARCH64_OPND_LVt:
6206 case AARCH64_OPND_LVt_AL:
6207 case AARCH64_OPND_LEt:
6208 reg_type = REG_TYPE_VN;
6209 vector_reg_list:
6210 if (reg_type == REG_TYPE_ZN
6211 && get_opcode_dependent_value (opcode) == 1
6212 && *str != '{')
6213 {
6214 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6215 if (val == PARSE_FAIL)
6216 {
6217 first_error (_(get_reg_expected_msg (reg_type)));
6218 goto failure;
6219 }
6220 info->reglist.first_regno = val;
6221 info->reglist.num_regs = 1;
6222 }
6223 else
6224 {
6225 val = parse_vector_reg_list (&str, reg_type, &vectype);
6226 if (val == PARSE_FAIL)
6227 goto failure;
6228
6229 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6230 {
6231 set_fatal_syntax_error (_("invalid register list"));
6232 goto failure;
6233 }
6234
6235 if (vectype.width != 0 && *str != ',')
6236 {
6237 set_fatal_syntax_error
6238 (_("expected element type rather than vector type"));
6239 goto failure;
6240 }
6241
6242 info->reglist.first_regno = (val >> 2) & 0x1f;
6243 info->reglist.num_regs = (val & 0x3) + 1;
6244 }
6245 if (operands[i] == AARCH64_OPND_LEt)
6246 {
6247 if (!(vectype.defined & NTA_HASINDEX))
6248 goto failure;
6249 info->reglist.has_index = 1;
6250 info->reglist.index = vectype.index;
6251 }
6252 else
6253 {
6254 if (vectype.defined & NTA_HASINDEX)
6255 goto failure;
6256 if (!(vectype.defined & NTA_HASTYPE))
6257 {
6258 if (reg_type == REG_TYPE_ZN)
6259 set_fatal_syntax_error (_("missing type suffix"));
6260 goto failure;
6261 }
6262 }
6263 info->qualifier = vectype_to_qualifier (&vectype);
6264 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6265 goto failure;
6266 break;
6267
6268 case AARCH64_OPND_CRn:
6269 case AARCH64_OPND_CRm:
6270 {
6271 char prefix = *(str++);
6272 if (prefix != 'c' && prefix != 'C')
6273 goto failure;
6274
6275 po_imm_nc_or_fail ();
6276 if (val > 15)
6277 {
6278 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6279 goto failure;
6280 }
6281 info->qualifier = AARCH64_OPND_QLF_CR;
6282 info->imm.value = val;
6283 break;
6284 }
6285
6286 case AARCH64_OPND_SHLL_IMM:
6287 case AARCH64_OPND_IMM_VLSR:
6288 po_imm_or_fail (1, 64);
6289 info->imm.value = val;
6290 break;
6291
6292 case AARCH64_OPND_CCMP_IMM:
6293 case AARCH64_OPND_SIMM5:
6294 case AARCH64_OPND_FBITS:
6295 case AARCH64_OPND_TME_UIMM16:
6296 case AARCH64_OPND_UIMM4:
6297 case AARCH64_OPND_UIMM4_ADDG:
6298 case AARCH64_OPND_UIMM10:
6299 case AARCH64_OPND_UIMM3_OP1:
6300 case AARCH64_OPND_UIMM3_OP2:
6301 case AARCH64_OPND_IMM_VLSL:
6302 case AARCH64_OPND_IMM:
6303 case AARCH64_OPND_IMM_2:
6304 case AARCH64_OPND_WIDTH:
6305 case AARCH64_OPND_SVE_INV_LIMM:
6306 case AARCH64_OPND_SVE_LIMM:
6307 case AARCH64_OPND_SVE_LIMM_MOV:
6308 case AARCH64_OPND_SVE_SHLIMM_PRED:
6309 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6310 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6311 case AARCH64_OPND_SVE_SHRIMM_PRED:
6312 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6313 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6314 case AARCH64_OPND_SVE_SIMM5:
6315 case AARCH64_OPND_SVE_SIMM5B:
6316 case AARCH64_OPND_SVE_SIMM6:
6317 case AARCH64_OPND_SVE_SIMM8:
6318 case AARCH64_OPND_SVE_UIMM3:
6319 case AARCH64_OPND_SVE_UIMM7:
6320 case AARCH64_OPND_SVE_UIMM8:
6321 case AARCH64_OPND_SVE_UIMM8_53:
6322 case AARCH64_OPND_IMM_ROT1:
6323 case AARCH64_OPND_IMM_ROT2:
6324 case AARCH64_OPND_IMM_ROT3:
6325 case AARCH64_OPND_SVE_IMM_ROT1:
6326 case AARCH64_OPND_SVE_IMM_ROT2:
6327 case AARCH64_OPND_SVE_IMM_ROT3:
6328 po_imm_nc_or_fail ();
6329 info->imm.value = val;
6330 break;
6331
6332 case AARCH64_OPND_SVE_AIMM:
6333 case AARCH64_OPND_SVE_ASIMM:
6334 po_imm_nc_or_fail ();
6335 info->imm.value = val;
6336 skip_whitespace (str);
6337 if (skip_past_comma (&str))
6338 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6339 else
6340 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6341 break;
6342
6343 case AARCH64_OPND_SVE_PATTERN:
6344 po_enum_or_fail (aarch64_sve_pattern_array);
6345 info->imm.value = val;
6346 break;
6347
6348 case AARCH64_OPND_SVE_PATTERN_SCALED:
6349 po_enum_or_fail (aarch64_sve_pattern_array);
6350 info->imm.value = val;
6351 if (skip_past_comma (&str)
6352 && !parse_shift (&str, info, SHIFTED_MUL))
6353 goto failure;
6354 if (!info->shifter.operator_present)
6355 {
6356 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6357 info->shifter.kind = AARCH64_MOD_MUL;
6358 info->shifter.amount = 1;
6359 }
6360 break;
6361
6362 case AARCH64_OPND_SVE_PRFOP:
6363 po_enum_or_fail (aarch64_sve_prfop_array);
6364 info->imm.value = val;
6365 break;
6366
6367 case AARCH64_OPND_UIMM7:
6368 po_imm_or_fail (0, 127);
6369 info->imm.value = val;
6370 break;
6371
6372 case AARCH64_OPND_IDX:
6373 case AARCH64_OPND_MASK:
6374 case AARCH64_OPND_BIT_NUM:
6375 case AARCH64_OPND_IMMR:
6376 case AARCH64_OPND_IMMS:
6377 po_imm_or_fail (0, 63);
6378 info->imm.value = val;
6379 break;
6380
6381 case AARCH64_OPND_IMM0:
6382 po_imm_nc_or_fail ();
6383 if (val != 0)
6384 {
6385 set_fatal_syntax_error (_("immediate zero expected"));
6386 goto failure;
6387 }
6388 info->imm.value = 0;
6389 break;
6390
6391 case AARCH64_OPND_FPIMM0:
6392 {
6393 int qfloat;
6394 bool res1 = false, res2 = false;
6395 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6396 it is probably not worth the effort to support it. */
6397 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6398 imm_reg_type))
6399 && (error_p ()
6400 || !(res2 = parse_constant_immediate (&str, &val,
6401 imm_reg_type))))
6402 goto failure;
6403 if ((res1 && qfloat == 0) || (res2 && val == 0))
6404 {
6405 info->imm.value = 0;
6406 info->imm.is_fp = 1;
6407 break;
6408 }
6409 set_fatal_syntax_error (_("immediate zero expected"));
6410 goto failure;
6411 }
6412
6413 case AARCH64_OPND_IMM_MOV:
6414 {
6415 char *saved = str;
6416 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6417 reg_name_p (str, REG_TYPE_VN))
6418 goto failure;
6419 str = saved;
6420 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6421 GE_OPT_PREFIX, REJECT_ABSENT,
6422 NORMAL_RESOLUTION));
6423 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6424 later. fix_mov_imm_insn will try to determine a machine
6425 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6426 message if the immediate cannot be moved by a single
6427 instruction. */
6428 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6429 inst.base.operands[i].skip = 1;
6430 }
6431 break;
6432
6433 case AARCH64_OPND_SIMD_IMM:
6434 case AARCH64_OPND_SIMD_IMM_SFT:
6435 if (! parse_big_immediate (&str, &val, imm_reg_type))
6436 goto failure;
6437 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6438 /* addr_off_p */ 0,
6439 /* need_libopcodes_p */ 1,
6440 /* skip_p */ 1);
6441 /* Parse shift.
6442 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6443 shift, we don't check it here; we leave the checking to
6444 the libopcodes (operand_general_constraint_met_p). By
6445 doing this, we achieve better diagnostics. */
6446 if (skip_past_comma (&str)
6447 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6448 goto failure;
6449 if (!info->shifter.operator_present
6450 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6451 {
6452 /* Default to LSL if not present. Libopcodes prefers shifter
6453 kind to be explicit. */
6454 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6455 info->shifter.kind = AARCH64_MOD_LSL;
6456 }
6457 break;
6458
6459 case AARCH64_OPND_FPIMM:
6460 case AARCH64_OPND_SIMD_FPIMM:
6461 case AARCH64_OPND_SVE_FPIMM8:
6462 {
6463 int qfloat;
6464 bool dp_p;
6465
6466 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6467 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6468 || !aarch64_imm_float_p (qfloat))
6469 {
6470 if (!error_p ())
6471 set_fatal_syntax_error (_("invalid floating-point"
6472 " constant"));
6473 goto failure;
6474 }
6475 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6476 inst.base.operands[i].imm.is_fp = 1;
6477 }
6478 break;
6479
6480 case AARCH64_OPND_SVE_I1_HALF_ONE:
6481 case AARCH64_OPND_SVE_I1_HALF_TWO:
6482 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6483 {
6484 int qfloat;
6485 bool dp_p;
6486
6487 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6488 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6489 {
6490 if (!error_p ())
6491 set_fatal_syntax_error (_("invalid floating-point"
6492 " constant"));
6493 goto failure;
6494 }
6495 inst.base.operands[i].imm.value = qfloat;
6496 inst.base.operands[i].imm.is_fp = 1;
6497 }
6498 break;
6499
6500 case AARCH64_OPND_LIMM:
6501 po_misc_or_fail (parse_shifter_operand (&str, info,
6502 SHIFTED_LOGIC_IMM));
6503 if (info->shifter.operator_present)
6504 {
6505 set_fatal_syntax_error
6506 (_("shift not allowed for bitmask immediate"));
6507 goto failure;
6508 }
6509 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6510 /* addr_off_p */ 0,
6511 /* need_libopcodes_p */ 1,
6512 /* skip_p */ 1);
6513 break;
6514
6515 case AARCH64_OPND_AIMM:
6516 if (opcode->op == OP_ADD)
6517 /* ADD may have relocation types. */
6518 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6519 SHIFTED_ARITH_IMM));
6520 else
6521 po_misc_or_fail (parse_shifter_operand (&str, info,
6522 SHIFTED_ARITH_IMM));
6523 switch (inst.reloc.type)
6524 {
6525 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6526 info->shifter.amount = 12;
6527 break;
6528 case BFD_RELOC_UNUSED:
6529 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6530 if (info->shifter.kind != AARCH64_MOD_NONE)
6531 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6532 inst.reloc.pc_rel = 0;
6533 break;
6534 default:
6535 break;
6536 }
6537 info->imm.value = 0;
6538 if (!info->shifter.operator_present)
6539 {
6540 /* Default to LSL if not present. Libopcodes prefers shifter
6541 kind to be explicit. */
6542 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6543 info->shifter.kind = AARCH64_MOD_LSL;
6544 }
6545 break;
6546
6547 case AARCH64_OPND_HALF:
6548 {
6549 /* #<imm16> or relocation. */
6550 int internal_fixup_p;
6551 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6552 if (internal_fixup_p)
6553 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6554 skip_whitespace (str);
6555 if (skip_past_comma (&str))
6556 {
6557 /* {, LSL #<shift>} */
6558 if (! aarch64_gas_internal_fixup_p ())
6559 {
6560 set_fatal_syntax_error (_("can't mix relocation modifier "
6561 "with explicit shift"));
6562 goto failure;
6563 }
6564 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6565 }
6566 else
6567 inst.base.operands[i].shifter.amount = 0;
6568 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6569 inst.base.operands[i].imm.value = 0;
6570 if (! process_movw_reloc_info ())
6571 goto failure;
6572 }
6573 break;
6574
6575 case AARCH64_OPND_EXCEPTION:
6576 case AARCH64_OPND_UNDEFINED:
6577 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6578 imm_reg_type));
6579 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6580 /* addr_off_p */ 0,
6581 /* need_libopcodes_p */ 0,
6582 /* skip_p */ 1);
6583 break;
6584
6585 case AARCH64_OPND_NZCV:
6586 {
6587 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6588 if (nzcv != NULL)
6589 {
6590 str += 4;
6591 info->imm.value = nzcv->value;
6592 break;
6593 }
6594 po_imm_or_fail (0, 15);
6595 info->imm.value = val;
6596 }
6597 break;
6598
6599 case AARCH64_OPND_COND:
6600 case AARCH64_OPND_COND1:
6601 {
6602 char *start = str;
6603 do
6604 str++;
6605 while (ISALPHA (*str));
6606 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6607 if (info->cond == NULL)
6608 {
6609 set_syntax_error (_("invalid condition"));
6610 goto failure;
6611 }
6612 else if (operands[i] == AARCH64_OPND_COND1
6613 && (info->cond->value & 0xe) == 0xe)
6614 {
6615 /* Do not allow AL or NV. */
6616 set_default_error ();
6617 goto failure;
6618 }
6619 }
6620 break;
6621
6622 case AARCH64_OPND_ADDR_ADRP:
6623 po_misc_or_fail (parse_adrp (&str));
6624 /* Clear the value as operand needs to be relocated. */
6625 info->imm.value = 0;
6626 break;
6627
6628 case AARCH64_OPND_ADDR_PCREL14:
6629 case AARCH64_OPND_ADDR_PCREL19:
6630 case AARCH64_OPND_ADDR_PCREL21:
6631 case AARCH64_OPND_ADDR_PCREL26:
6632 po_misc_or_fail (parse_address (&str, info));
6633 if (!info->addr.pcrel)
6634 {
6635 set_syntax_error (_("invalid pc-relative address"));
6636 goto failure;
6637 }
6638 if (inst.gen_lit_pool
6639 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6640 {
6641 /* Only permit "=value" in the literal load instructions.
6642 The literal will be generated by programmer_friendly_fixup. */
6643 set_syntax_error (_("invalid use of \"=immediate\""));
6644 goto failure;
6645 }
6646 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6647 {
6648 set_syntax_error (_("unrecognized relocation suffix"));
6649 goto failure;
6650 }
6651 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6652 {
6653 info->imm.value = inst.reloc.exp.X_add_number;
6654 inst.reloc.type = BFD_RELOC_UNUSED;
6655 }
6656 else
6657 {
6658 info->imm.value = 0;
6659 if (inst.reloc.type == BFD_RELOC_UNUSED)
6660 switch (opcode->iclass)
6661 {
6662 case compbranch:
6663 case condbranch:
6664 /* e.g. CBZ or B.COND */
6665 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6666 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6667 break;
6668 case testbranch:
6669 /* e.g. TBZ */
6670 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6671 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6672 break;
6673 case branch_imm:
6674 /* e.g. B or BL */
6675 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6676 inst.reloc.type =
6677 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6678 : BFD_RELOC_AARCH64_JUMP26;
6679 break;
6680 case loadlit:
6681 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6682 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6683 break;
6684 case pcreladdr:
6685 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6686 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6687 break;
6688 default:
6689 gas_assert (0);
6690 abort ();
6691 }
6692 inst.reloc.pc_rel = 1;
6693 }
6694 break;
6695
6696 case AARCH64_OPND_ADDR_SIMPLE:
6697 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6698 {
6699 /* [<Xn|SP>{, #<simm>}] */
6700 char *start = str;
6701 /* First use the normal address-parsing routines, to get
6702 the usual syntax errors. */
6703 po_misc_or_fail (parse_address (&str, info));
6704 if (info->addr.pcrel || info->addr.offset.is_reg
6705 || !info->addr.preind || info->addr.postind
6706 || info->addr.writeback)
6707 {
6708 set_syntax_error (_("invalid addressing mode"));
6709 goto failure;
6710 }
6711
6712 /* Then retry, matching the specific syntax of these addresses. */
6713 str = start;
6714 po_char_or_fail ('[');
6715 po_reg_or_fail (REG_TYPE_R64_SP);
6716 /* Accept optional ", #0". */
6717 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6718 && skip_past_char (&str, ','))
6719 {
6720 skip_past_char (&str, '#');
6721 if (! skip_past_char (&str, '0'))
6722 {
6723 set_fatal_syntax_error
6724 (_("the optional immediate offset can only be 0"));
6725 goto failure;
6726 }
6727 }
6728 po_char_or_fail (']');
6729 break;
6730 }
6731
6732 case AARCH64_OPND_ADDR_REGOFF:
6733 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6734 po_misc_or_fail (parse_address (&str, info));
6735 regoff_addr:
6736 if (info->addr.pcrel || !info->addr.offset.is_reg
6737 || !info->addr.preind || info->addr.postind
6738 || info->addr.writeback)
6739 {
6740 set_syntax_error (_("invalid addressing mode"));
6741 goto failure;
6742 }
6743 if (!info->shifter.operator_present)
6744 {
6745 /* Default to LSL if not present. Libopcodes prefers shifter
6746 kind to be explicit. */
6747 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6748 info->shifter.kind = AARCH64_MOD_LSL;
6749 }
6750 /* Qualifier to be deduced by libopcodes. */
6751 break;
6752
6753 case AARCH64_OPND_ADDR_SIMM7:
6754 po_misc_or_fail (parse_address (&str, info));
6755 if (info->addr.pcrel || info->addr.offset.is_reg
6756 || (!info->addr.preind && !info->addr.postind))
6757 {
6758 set_syntax_error (_("invalid addressing mode"));
6759 goto failure;
6760 }
6761 if (inst.reloc.type != BFD_RELOC_UNUSED)
6762 {
6763 set_syntax_error (_("relocation not allowed"));
6764 goto failure;
6765 }
6766 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6767 /* addr_off_p */ 1,
6768 /* need_libopcodes_p */ 1,
6769 /* skip_p */ 0);
6770 break;
6771
6772 case AARCH64_OPND_ADDR_SIMM9:
6773 case AARCH64_OPND_ADDR_SIMM9_2:
6774 case AARCH64_OPND_ADDR_SIMM11:
6775 case AARCH64_OPND_ADDR_SIMM13:
6776 po_misc_or_fail (parse_address (&str, info));
6777 if (info->addr.pcrel || info->addr.offset.is_reg
6778 || (!info->addr.preind && !info->addr.postind)
6779 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6780 && info->addr.writeback))
6781 {
6782 set_syntax_error (_("invalid addressing mode"));
6783 goto failure;
6784 }
6785 if (inst.reloc.type != BFD_RELOC_UNUSED)
6786 {
6787 set_syntax_error (_("relocation not allowed"));
6788 goto failure;
6789 }
6790 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6791 /* addr_off_p */ 1,
6792 /* need_libopcodes_p */ 1,
6793 /* skip_p */ 0);
6794 break;
6795
6796 case AARCH64_OPND_ADDR_SIMM10:
6797 case AARCH64_OPND_ADDR_OFFSET:
6798 po_misc_or_fail (parse_address (&str, info));
6799 if (info->addr.pcrel || info->addr.offset.is_reg
6800 || !info->addr.preind || info->addr.postind)
6801 {
6802 set_syntax_error (_("invalid addressing mode"));
6803 goto failure;
6804 }
6805 if (inst.reloc.type != BFD_RELOC_UNUSED)
6806 {
6807 set_syntax_error (_("relocation not allowed"));
6808 goto failure;
6809 }
6810 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6811 /* addr_off_p */ 1,
6812 /* need_libopcodes_p */ 1,
6813 /* skip_p */ 0);
6814 break;
6815
6816 case AARCH64_OPND_ADDR_UIMM12:
6817 po_misc_or_fail (parse_address (&str, info));
6818 if (info->addr.pcrel || info->addr.offset.is_reg
6819 || !info->addr.preind || info->addr.writeback)
6820 {
6821 set_syntax_error (_("invalid addressing mode"));
6822 goto failure;
6823 }
6824 if (inst.reloc.type == BFD_RELOC_UNUSED)
6825 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6826 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6827 || (inst.reloc.type
6828 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6829 || (inst.reloc.type
6830 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6831 || (inst.reloc.type
6832 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6833 || (inst.reloc.type
6834 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6835 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6836 /* Leave qualifier to be determined by libopcodes. */
6837 break;
6838
6839 case AARCH64_OPND_SIMD_ADDR_POST:
6840 /* [<Xn|SP>], <Xm|#<amount>> */
6841 po_misc_or_fail (parse_address (&str, info));
6842 if (!info->addr.postind || !info->addr.writeback)
6843 {
6844 set_syntax_error (_("invalid addressing mode"));
6845 goto failure;
6846 }
6847 if (!info->addr.offset.is_reg)
6848 {
6849 if (inst.reloc.exp.X_op == O_constant)
6850 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6851 else
6852 {
6853 set_fatal_syntax_error
6854 (_("writeback value must be an immediate constant"));
6855 goto failure;
6856 }
6857 }
6858 /* No qualifier. */
6859 break;
6860
6861 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6862 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6863 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6864 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6865 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6866 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6867 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6868 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6869 case AARCH64_OPND_SVE_ADDR_RI_U6:
6870 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6871 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6872 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6873 /* [X<n>{, #imm, MUL VL}]
6874 [X<n>{, #imm}]
6875 but recognizing SVE registers. */
6876 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6877 &offset_qualifier));
6878 if (base_qualifier != AARCH64_OPND_QLF_X)
6879 {
6880 set_syntax_error (_("invalid addressing mode"));
6881 goto failure;
6882 }
6883 sve_regimm:
6884 if (info->addr.pcrel || info->addr.offset.is_reg
6885 || !info->addr.preind || info->addr.writeback)
6886 {
6887 set_syntax_error (_("invalid addressing mode"));
6888 goto failure;
6889 }
6890 if (inst.reloc.type != BFD_RELOC_UNUSED
6891 || inst.reloc.exp.X_op != O_constant)
6892 {
6893 /* Make sure this has priority over
6894 "invalid addressing mode". */
6895 set_fatal_syntax_error (_("constant offset required"));
6896 goto failure;
6897 }
6898 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6899 break;
6900
6901 case AARCH64_OPND_SVE_ADDR_R:
6902 /* [<Xn|SP>{, <R><m>}]
6903 but recognizing SVE registers. */
6904 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6905 &offset_qualifier));
6906 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6907 {
6908 offset_qualifier = AARCH64_OPND_QLF_X;
6909 info->addr.offset.is_reg = 1;
6910 info->addr.offset.regno = 31;
6911 }
6912 else if (base_qualifier != AARCH64_OPND_QLF_X
6913 || offset_qualifier != AARCH64_OPND_QLF_X)
6914 {
6915 set_syntax_error (_("invalid addressing mode"));
6916 goto failure;
6917 }
6918 goto regoff_addr;
6919
6920 case AARCH64_OPND_SVE_ADDR_RR:
6921 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6922 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6923 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6924 case AARCH64_OPND_SVE_ADDR_RX:
6925 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6926 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6927 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6928 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6929 but recognizing SVE registers. */
6930 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6931 &offset_qualifier));
6932 if (base_qualifier != AARCH64_OPND_QLF_X
6933 || offset_qualifier != AARCH64_OPND_QLF_X)
6934 {
6935 set_syntax_error (_("invalid addressing mode"));
6936 goto failure;
6937 }
6938 goto regoff_addr;
6939
6940 case AARCH64_OPND_SVE_ADDR_RZ:
6941 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6942 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6943 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6944 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6945 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6946 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6947 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6948 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6949 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6950 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6951 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6952 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6953 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6954 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6955 &offset_qualifier));
6956 if (base_qualifier != AARCH64_OPND_QLF_X
6957 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6958 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6959 {
6960 set_syntax_error (_("invalid addressing mode"));
6961 goto failure;
6962 }
6963 info->qualifier = offset_qualifier;
6964 goto regoff_addr;
6965
6966 case AARCH64_OPND_SVE_ADDR_ZX:
6967 /* [Zn.<T>{, <Xm>}]. */
6968 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6969 &offset_qualifier));
6970 /* Things to check:
6971 base_qualifier either S_S or S_D
6972 offset_qualifier must be X
6973 */
6974 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6975 && base_qualifier != AARCH64_OPND_QLF_S_D)
6976 || offset_qualifier != AARCH64_OPND_QLF_X)
6977 {
6978 set_syntax_error (_("invalid addressing mode"));
6979 goto failure;
6980 }
6981 info->qualifier = base_qualifier;
6982 if (!info->addr.offset.is_reg || info->addr.pcrel
6983 || !info->addr.preind || info->addr.writeback
6984 || info->shifter.operator_present != 0)
6985 {
6986 set_syntax_error (_("invalid addressing mode"));
6987 goto failure;
6988 }
6989 info->shifter.kind = AARCH64_MOD_LSL;
6990 break;
6991
6992
6993 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6994 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6995 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6996 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6997 /* [Z<n>.<T>{, #imm}] */
6998 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6999 &offset_qualifier));
7000 if (base_qualifier != AARCH64_OPND_QLF_S_S
7001 && base_qualifier != AARCH64_OPND_QLF_S_D)
7002 {
7003 set_syntax_error (_("invalid addressing mode"));
7004 goto failure;
7005 }
7006 info->qualifier = base_qualifier;
7007 goto sve_regimm;
7008
7009 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7010 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7011 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7012 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7013 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7014
7015 We don't reject:
7016
7017 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7018
7019 here since we get better error messages by leaving it to
7020 the qualifier checking routines. */
7021 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7022 &offset_qualifier));
7023 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7024 && base_qualifier != AARCH64_OPND_QLF_S_D)
7025 || offset_qualifier != base_qualifier)
7026 {
7027 set_syntax_error (_("invalid addressing mode"));
7028 goto failure;
7029 }
7030 info->qualifier = base_qualifier;
7031 goto regoff_addr;
7032
7033 case AARCH64_OPND_SYSREG:
7034 {
7035 uint32_t sysreg_flags;
7036 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7037 &sysreg_flags)) == PARSE_FAIL)
7038 {
7039 set_syntax_error (_("unknown or missing system register name"));
7040 goto failure;
7041 }
7042 inst.base.operands[i].sysreg.value = val;
7043 inst.base.operands[i].sysreg.flags = sysreg_flags;
7044 break;
7045 }
7046
7047 case AARCH64_OPND_PSTATEFIELD:
7048 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
7049 == PARSE_FAIL)
7050 {
7051 set_syntax_error (_("unknown or missing PSTATE field name"));
7052 goto failure;
7053 }
7054 inst.base.operands[i].pstatefield = val;
7055 break;
7056
7057 case AARCH64_OPND_SYSREG_IC:
7058 inst.base.operands[i].sysins_op =
7059 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7060 goto sys_reg_ins;
7061
7062 case AARCH64_OPND_SYSREG_DC:
7063 inst.base.operands[i].sysins_op =
7064 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7065 goto sys_reg_ins;
7066
7067 case AARCH64_OPND_SYSREG_AT:
7068 inst.base.operands[i].sysins_op =
7069 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7070 goto sys_reg_ins;
7071
7072 case AARCH64_OPND_SYSREG_SR:
7073 inst.base.operands[i].sysins_op =
7074 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7075 goto sys_reg_ins;
7076
7077 case AARCH64_OPND_SYSREG_TLBI:
7078 inst.base.operands[i].sysins_op =
7079 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7080 sys_reg_ins:
7081 if (inst.base.operands[i].sysins_op == NULL)
7082 {
7083 set_fatal_syntax_error ( _("unknown or missing operation name"));
7084 goto failure;
7085 }
7086 break;
7087
7088 case AARCH64_OPND_BARRIER:
7089 case AARCH64_OPND_BARRIER_ISB:
7090 val = parse_barrier (&str);
7091 if (val != PARSE_FAIL
7092 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7093 {
7094 /* ISB only accepts options name 'sy'. */
7095 set_syntax_error
7096 (_("the specified option is not accepted in ISB"));
7097 /* Turn off backtrack as this optional operand is present. */
7098 backtrack_pos = 0;
7099 goto failure;
7100 }
7101 if (val != PARSE_FAIL
7102 && operands[i] == AARCH64_OPND_BARRIER)
7103 {
7104 /* Regular barriers accept options CRm (C0-C15).
7105 DSB nXS barrier variant accepts values > 15. */
7106 if (val < 0 || val > 15)
7107 {
7108 set_syntax_error (_("the specified option is not accepted in DSB"));
7109 goto failure;
7110 }
7111 }
7112 /* This is an extension to accept a 0..15 immediate. */
7113 if (val == PARSE_FAIL)
7114 po_imm_or_fail (0, 15);
7115 info->barrier = aarch64_barrier_options + val;
7116 break;
7117
7118 case AARCH64_OPND_BARRIER_DSB_NXS:
7119 val = parse_barrier (&str);
7120 if (val != PARSE_FAIL)
7121 {
7122 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7123 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7124 {
7125 set_syntax_error (_("the specified option is not accepted in DSB"));
7126 /* Turn off backtrack as this optional operand is present. */
7127 backtrack_pos = 0;
7128 goto failure;
7129 }
7130 }
7131 else
7132 {
7133 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7134 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7135 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7136 goto failure;
7137 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7138 {
7139 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7140 goto failure;
7141 }
7142 }
7143 /* Option index is encoded as 2-bit value in val<3:2>. */
7144 val = (val >> 2) - 4;
7145 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7146 break;
7147
7148 case AARCH64_OPND_PRFOP:
7149 val = parse_pldop (&str);
7150 /* This is an extension to accept a 0..31 immediate. */
7151 if (val == PARSE_FAIL)
7152 po_imm_or_fail (0, 31);
7153 inst.base.operands[i].prfop = aarch64_prfops + val;
7154 break;
7155
7156 case AARCH64_OPND_BARRIER_PSB:
7157 val = parse_barrier_psb (&str, &(info->hint_option));
7158 if (val == PARSE_FAIL)
7159 goto failure;
7160 break;
7161
7162 case AARCH64_OPND_BTI_TARGET:
7163 val = parse_bti_operand (&str, &(info->hint_option));
7164 if (val == PARSE_FAIL)
7165 goto failure;
7166 break;
7167
7168 case AARCH64_OPND_SME_ZAda_2b:
7169 case AARCH64_OPND_SME_ZAda_3b:
7170 val = parse_sme_zada_operand (&str, &qualifier);
7171 if (val == PARSE_FAIL)
7172 goto failure;
7173 info->reg.regno = val;
7174 info->qualifier = qualifier;
7175 break;
7176
7177 case AARCH64_OPND_SME_ZA_HV_idx_src:
7178 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7179 {
7180 enum sme_hv_slice vector_indicator;
7181 int vector_select_register;
7182 int imm;
7183 val = parse_sme_za_hv_tiles_operand (&str, &vector_indicator,
7184 &vector_select_register,
7185 &imm,
7186 &qualifier);
7187 if (val == PARSE_FAIL)
7188 goto failure;
7189 info->za_tile_vector.regno = val;
7190 info->za_tile_vector.index.regno = vector_select_register;
7191 info->za_tile_vector.index.imm = imm;
7192 info->za_tile_vector.v = vector_indicator;
7193 info->qualifier = qualifier;
7194 break;
7195 }
7196
7197 default:
7198 as_fatal (_("unhandled operand code %d"), operands[i]);
7199 }
7200
7201 /* If we get here, this operand was successfully parsed. */
7202 inst.base.operands[i].present = 1;
7203 continue;
7204
7205 failure:
7206 /* The parse routine should already have set the error, but in case
7207 not, set a default one here. */
7208 if (! error_p ())
7209 set_default_error ();
7210
7211 if (! backtrack_pos)
7212 goto parse_operands_return;
7213
7214 {
7215 /* We reach here because this operand is marked as optional, and
7216 either no operand was supplied or the operand was supplied but it
7217 was syntactically incorrect. In the latter case we report an
7218 error. In the former case we perform a few more checks before
7219 dropping through to the code to insert the default operand. */
7220
7221 char *tmp = backtrack_pos;
7222 char endchar = END_OF_INSN;
7223
7224 if (i != (aarch64_num_of_operands (opcode) - 1))
7225 endchar = ',';
7226 skip_past_char (&tmp, ',');
7227
7228 if (*tmp != endchar)
7229 /* The user has supplied an operand in the wrong format. */
7230 goto parse_operands_return;
7231
7232 /* Make sure there is not a comma before the optional operand.
7233 For example the fifth operand of 'sys' is optional:
7234
7235 sys #0,c0,c0,#0, <--- wrong
7236 sys #0,c0,c0,#0 <--- correct. */
7237 if (comma_skipped_p && i && endchar == END_OF_INSN)
7238 {
7239 set_fatal_syntax_error
7240 (_("unexpected comma before the omitted optional operand"));
7241 goto parse_operands_return;
7242 }
7243 }
7244
7245 /* Reaching here means we are dealing with an optional operand that is
7246 omitted from the assembly line. */
7247 gas_assert (optional_operand_p (opcode, i));
7248 info->present = 0;
7249 process_omitted_operand (operands[i], opcode, i, info);
7250
7251 /* Try again, skipping the optional operand at backtrack_pos. */
7252 str = backtrack_pos;
7253 backtrack_pos = 0;
7254
7255 /* Clear any error record after the omitted optional operand has been
7256 successfully handled. */
7257 clear_error ();
7258 }
7259
7260 /* Check if we have parsed all the operands. */
7261 if (*str != '\0' && ! error_p ())
7262 {
7263 /* Set I to the index of the last present operand; this is
7264 for the purpose of diagnostics. */
7265 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7266 ;
7267 set_fatal_syntax_error
7268 (_("unexpected characters following instruction"));
7269 }
7270
7271 parse_operands_return:
7272
7273 if (error_p ())
7274 {
7275 DEBUG_TRACE ("parsing FAIL: %s - %s",
7276 operand_mismatch_kind_names[get_error_kind ()],
7277 get_error_message ());
7278 /* Record the operand error properly; this is useful when there
7279 are multiple instruction templates for a mnemonic name, so that
7280 later on, we can select the error that most closely describes
7281 the problem. */
7282 record_operand_error (opcode, i, get_error_kind (),
7283 get_error_message ());
7284 return false;
7285 }
7286 else
7287 {
7288 DEBUG_TRACE ("parsing SUCCESS");
7289 return true;
7290 }
7291 }
7292
7293 /* It does some fix-up to provide some programmer friendly feature while
7294 keeping the libopcodes happy, i.e. libopcodes only accepts
7295 the preferred architectural syntax.
7296 Return FALSE if there is any failure; otherwise return TRUE. */
7297
7298 static bool
7299 programmer_friendly_fixup (aarch64_instruction *instr)
7300 {
7301 aarch64_inst *base = &instr->base;
7302 const aarch64_opcode *opcode = base->opcode;
7303 enum aarch64_op op = opcode->op;
7304 aarch64_opnd_info *operands = base->operands;
7305
7306 DEBUG_TRACE ("enter");
7307
7308 switch (opcode->iclass)
7309 {
7310 case testbranch:
7311 /* TBNZ Xn|Wn, #uimm6, label
7312 Test and Branch Not Zero: conditionally jumps to label if bit number
7313 uimm6 in register Xn is not zero. The bit number implies the width of
7314 the register, which may be written and should be disassembled as Wn if
7315 uimm is less than 32. */
7316 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7317 {
7318 if (operands[1].imm.value >= 32)
7319 {
7320 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7321 0, 31);
7322 return false;
7323 }
7324 operands[0].qualifier = AARCH64_OPND_QLF_X;
7325 }
7326 break;
7327 case loadlit:
7328 /* LDR Wt, label | =value
7329 As a convenience assemblers will typically permit the notation
7330 "=value" in conjunction with the pc-relative literal load instructions
7331 to automatically place an immediate value or symbolic address in a
7332 nearby literal pool and generate a hidden label which references it.
7333 ISREG has been set to 0 in the case of =value. */
7334 if (instr->gen_lit_pool
7335 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7336 {
7337 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7338 if (op == OP_LDRSW_LIT)
7339 size = 4;
7340 if (instr->reloc.exp.X_op != O_constant
7341 && instr->reloc.exp.X_op != O_big
7342 && instr->reloc.exp.X_op != O_symbol)
7343 {
7344 record_operand_error (opcode, 1,
7345 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7346 _("constant expression expected"));
7347 return false;
7348 }
7349 if (! add_to_lit_pool (&instr->reloc.exp, size))
7350 {
7351 record_operand_error (opcode, 1,
7352 AARCH64_OPDE_OTHER_ERROR,
7353 _("literal pool insertion failed"));
7354 return false;
7355 }
7356 }
7357 break;
7358 case log_shift:
7359 case bitfield:
7360 /* UXT[BHW] Wd, Wn
7361 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7362 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7363 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7364 A programmer-friendly assembler should accept a destination Xd in
7365 place of Wd, however that is not the preferred form for disassembly.
7366 */
7367 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7368 && operands[1].qualifier == AARCH64_OPND_QLF_W
7369 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7370 operands[0].qualifier = AARCH64_OPND_QLF_W;
7371 break;
7372
7373 case addsub_ext:
7374 {
7375 /* In the 64-bit form, the final register operand is written as Wm
7376 for all but the (possibly omitted) UXTX/LSL and SXTX
7377 operators.
7378 As a programmer-friendly assembler, we accept e.g.
7379 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7380 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7381 int idx = aarch64_operand_index (opcode->operands,
7382 AARCH64_OPND_Rm_EXT);
7383 gas_assert (idx == 1 || idx == 2);
7384 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7385 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7386 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7387 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7388 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7389 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7390 }
7391 break;
7392
7393 default:
7394 break;
7395 }
7396
7397 DEBUG_TRACE ("exit with SUCCESS");
7398 return true;
7399 }
7400
7401 /* Check for loads and stores that will cause unpredictable behavior. */
7402
7403 static void
7404 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7405 {
7406 aarch64_inst *base = &instr->base;
7407 const aarch64_opcode *opcode = base->opcode;
7408 const aarch64_opnd_info *opnds = base->operands;
7409 switch (opcode->iclass)
7410 {
7411 case ldst_pos:
7412 case ldst_imm9:
7413 case ldst_imm10:
7414 case ldst_unscaled:
7415 case ldst_unpriv:
7416 /* Loading/storing the base register is unpredictable if writeback. */
7417 if ((aarch64_get_operand_class (opnds[0].type)
7418 == AARCH64_OPND_CLASS_INT_REG)
7419 && opnds[0].reg.regno == opnds[1].addr.base_regno
7420 && opnds[1].addr.base_regno != REG_SP
7421 /* Exempt STG/STZG/ST2G/STZ2G. */
7422 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7423 && opnds[1].addr.writeback)
7424 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7425 break;
7426
7427 case ldstpair_off:
7428 case ldstnapair_offs:
7429 case ldstpair_indexed:
7430 /* Loading/storing the base register is unpredictable if writeback. */
7431 if ((aarch64_get_operand_class (opnds[0].type)
7432 == AARCH64_OPND_CLASS_INT_REG)
7433 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7434 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7435 && opnds[2].addr.base_regno != REG_SP
7436 /* Exempt STGP. */
7437 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7438 && opnds[2].addr.writeback)
7439 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7440 /* Load operations must load different registers. */
7441 if ((opcode->opcode & (1 << 22))
7442 && opnds[0].reg.regno == opnds[1].reg.regno)
7443 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7444 break;
7445
7446 case ldstexcl:
7447 if ((aarch64_get_operand_class (opnds[0].type)
7448 == AARCH64_OPND_CLASS_INT_REG)
7449 && (aarch64_get_operand_class (opnds[1].type)
7450 == AARCH64_OPND_CLASS_INT_REG))
7451 {
7452 if ((opcode->opcode & (1 << 22)))
7453 {
7454 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7455 if ((opcode->opcode & (1 << 21))
7456 && opnds[0].reg.regno == opnds[1].reg.regno)
7457 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7458 }
7459 else
7460 {
7461 /* Store-Exclusive is unpredictable if Rt == Rs. */
7462 if (opnds[0].reg.regno == opnds[1].reg.regno)
7463 as_warn
7464 (_("unpredictable: identical transfer and status registers"
7465 " --`%s'"),str);
7466
7467 if (opnds[0].reg.regno == opnds[2].reg.regno)
7468 {
7469 if (!(opcode->opcode & (1 << 21)))
7470 /* Store-Exclusive is unpredictable if Rn == Rs. */
7471 as_warn
7472 (_("unpredictable: identical base and status registers"
7473 " --`%s'"),str);
7474 else
7475 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7476 as_warn
7477 (_("unpredictable: "
7478 "identical transfer and status registers"
7479 " --`%s'"),str);
7480 }
7481
7482 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7483 if ((opcode->opcode & (1 << 21))
7484 && opnds[0].reg.regno == opnds[3].reg.regno
7485 && opnds[3].reg.regno != REG_SP)
7486 as_warn (_("unpredictable: identical base and status registers"
7487 " --`%s'"),str);
7488 }
7489 }
7490 break;
7491
7492 default:
7493 break;
7494 }
7495 }
7496
7497 static void
7498 force_automatic_sequence_close (void)
7499 {
7500 if (now_instr_sequence.instr)
7501 {
7502 as_warn (_("previous `%s' sequence has not been closed"),
7503 now_instr_sequence.instr->opcode->name);
7504 init_insn_sequence (NULL, &now_instr_sequence);
7505 }
7506 }
7507
7508 /* A wrapper function to interface with libopcodes on encoding and
7509 record the error message if there is any.
7510
7511 Return TRUE on success; otherwise return FALSE. */
7512
7513 static bool
7514 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7515 aarch64_insn *code)
7516 {
7517 aarch64_operand_error error_info;
7518 memset (&error_info, '\0', sizeof (error_info));
7519 error_info.kind = AARCH64_OPDE_NIL;
7520 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7521 && !error_info.non_fatal)
7522 return true;
7523
7524 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7525 record_operand_error_info (opcode, &error_info);
7526 return error_info.non_fatal;
7527 }
7528
7529 #ifdef DEBUG_AARCH64
7530 static inline void
7531 dump_opcode_operands (const aarch64_opcode *opcode)
7532 {
7533 int i = 0;
7534 while (opcode->operands[i] != AARCH64_OPND_NIL)
7535 {
7536 aarch64_verbose ("\t\t opnd%d: %s", i,
7537 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7538 ? aarch64_get_operand_name (opcode->operands[i])
7539 : aarch64_get_operand_desc (opcode->operands[i]));
7540 ++i;
7541 }
7542 }
7543 #endif /* DEBUG_AARCH64 */
7544
7545 /* This is the guts of the machine-dependent assembler. STR points to a
7546 machine dependent instruction. This function is supposed to emit
7547 the frags/bytes it assembles to. */
7548
7549 void
7550 md_assemble (char *str)
7551 {
7552 char *p = str;
7553 templates *template;
7554 const aarch64_opcode *opcode;
7555 aarch64_inst *inst_base;
7556 unsigned saved_cond;
7557
7558 /* Align the previous label if needed. */
7559 if (last_label_seen != NULL)
7560 {
7561 symbol_set_frag (last_label_seen, frag_now);
7562 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7563 S_SET_SEGMENT (last_label_seen, now_seg);
7564 }
7565
7566 /* Update the current insn_sequence from the segment. */
7567 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7568
7569 inst.reloc.type = BFD_RELOC_UNUSED;
7570
7571 DEBUG_TRACE ("\n\n");
7572 DEBUG_TRACE ("==============================");
7573 DEBUG_TRACE ("Enter md_assemble with %s", str);
7574
7575 template = opcode_lookup (&p);
7576 if (!template)
7577 {
7578 /* It wasn't an instruction, but it might be a register alias of
7579 the form alias .req reg directive. */
7580 if (!create_register_alias (str, p))
7581 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7582 str);
7583 return;
7584 }
7585
7586 skip_whitespace (p);
7587 if (*p == ',')
7588 {
7589 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7590 get_mnemonic_name (str), str);
7591 return;
7592 }
7593
7594 init_operand_error_report ();
7595
7596 /* Sections are assumed to start aligned. In executable section, there is no
7597 MAP_DATA symbol pending. So we only align the address during
7598 MAP_DATA --> MAP_INSN transition.
7599 For other sections, this is not guaranteed. */
7600 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7601 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7602 frag_align_code (2, 0);
7603
7604 saved_cond = inst.cond;
7605 reset_aarch64_instruction (&inst);
7606 inst.cond = saved_cond;
7607
7608 /* Iterate through all opcode entries with the same mnemonic name. */
7609 do
7610 {
7611 opcode = template->opcode;
7612
7613 DEBUG_TRACE ("opcode %s found", opcode->name);
7614 #ifdef DEBUG_AARCH64
7615 if (debug_dump)
7616 dump_opcode_operands (opcode);
7617 #endif /* DEBUG_AARCH64 */
7618
7619 mapping_state (MAP_INSN);
7620
7621 inst_base = &inst.base;
7622 inst_base->opcode = opcode;
7623
7624 /* Truly conditionally executed instructions, e.g. b.cond. */
7625 if (opcode->flags & F_COND)
7626 {
7627 gas_assert (inst.cond != COND_ALWAYS);
7628 inst_base->cond = get_cond_from_value (inst.cond);
7629 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7630 }
7631 else if (inst.cond != COND_ALWAYS)
7632 {
7633 /* It shouldn't arrive here, where the assembly looks like a
7634 conditional instruction but the found opcode is unconditional. */
7635 gas_assert (0);
7636 continue;
7637 }
7638
7639 if (parse_operands (p, opcode)
7640 && programmer_friendly_fixup (&inst)
7641 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7642 {
7643 /* Check that this instruction is supported for this CPU. */
7644 if (!opcode->avariant
7645 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7646 {
7647 as_bad (_("selected processor does not support `%s'"), str);
7648 return;
7649 }
7650
7651 warn_unpredictable_ldst (&inst, str);
7652
7653 if (inst.reloc.type == BFD_RELOC_UNUSED
7654 || !inst.reloc.need_libopcodes_p)
7655 output_inst (NULL);
7656 else
7657 {
7658 /* If there is relocation generated for the instruction,
7659 store the instruction information for the future fix-up. */
7660 struct aarch64_inst *copy;
7661 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7662 copy = XNEW (struct aarch64_inst);
7663 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7664 output_inst (copy);
7665 }
7666
7667 /* Issue non-fatal messages if any. */
7668 output_operand_error_report (str, true);
7669 return;
7670 }
7671
7672 template = template->next;
7673 if (template != NULL)
7674 {
7675 reset_aarch64_instruction (&inst);
7676 inst.cond = saved_cond;
7677 }
7678 }
7679 while (template != NULL);
7680
7681 /* Issue the error messages if any. */
7682 output_operand_error_report (str, false);
7683 }
7684
7685 /* Various frobbings of labels and their addresses. */
7686
7687 void
7688 aarch64_start_line_hook (void)
7689 {
7690 last_label_seen = NULL;
7691 }
7692
7693 void
7694 aarch64_frob_label (symbolS * sym)
7695 {
7696 last_label_seen = sym;
7697
7698 dwarf2_emit_label (sym);
7699 }
7700
7701 void
7702 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7703 {
7704 /* Check to see if we have a block to close. */
7705 force_automatic_sequence_close ();
7706 }
7707
7708 int
7709 aarch64_data_in_code (void)
7710 {
7711 if (startswith (input_line_pointer + 1, "data:"))
7712 {
7713 *input_line_pointer = '/';
7714 input_line_pointer += 5;
7715 *input_line_pointer = 0;
7716 return 1;
7717 }
7718
7719 return 0;
7720 }
7721
7722 char *
7723 aarch64_canonicalize_symbol_name (char *name)
7724 {
7725 int len;
7726
7727 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7728 *(name + len - 5) = 0;
7729
7730 return name;
7731 }
7732 \f
7733 /* Table of all register names defined by default. The user can
7734 define additional names with .req. Note that all register names
7735 should appear in both upper and lowercase variants. Some registers
7736 also have mixed-case names. */
7737
7738 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7739 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7740 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7741 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
7742 #define REGSET16(p,t) \
7743 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7744 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7745 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7746 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7747 #define REGSET16S(p,s,t) \
7748 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
7749 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
7750 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
7751 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
7752 #define REGSET31(p,t) \
7753 REGSET16(p, t), \
7754 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7755 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7756 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7757 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7758 #define REGSET(p,t) \
7759 REGSET31(p,t), REGNUM(p,31,t)
7760
7761 /* These go into aarch64_reg_hsh hash-table. */
7762 static const reg_entry reg_names[] = {
7763 /* Integer registers. */
7764 REGSET31 (x, R_64), REGSET31 (X, R_64),
7765 REGSET31 (w, R_32), REGSET31 (W, R_32),
7766
7767 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7768 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7769 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7770 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7771 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7772 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7773
7774 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7775 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7776
7777 /* Floating-point single precision registers. */
7778 REGSET (s, FP_S), REGSET (S, FP_S),
7779
7780 /* Floating-point double precision registers. */
7781 REGSET (d, FP_D), REGSET (D, FP_D),
7782
7783 /* Floating-point half precision registers. */
7784 REGSET (h, FP_H), REGSET (H, FP_H),
7785
7786 /* Floating-point byte precision registers. */
7787 REGSET (b, FP_B), REGSET (B, FP_B),
7788
7789 /* Floating-point quad precision registers. */
7790 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7791
7792 /* FP/SIMD registers. */
7793 REGSET (v, VN), REGSET (V, VN),
7794
7795 /* SVE vector registers. */
7796 REGSET (z, ZN), REGSET (Z, ZN),
7797
7798 /* SVE predicate registers. */
7799 REGSET16 (p, PN), REGSET16 (P, PN),
7800
7801 /* SME ZA tile registers. */
7802 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
7803
7804 /* SME ZA tile registers (horizontal slice). */
7805 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
7806
7807 /* SME ZA tile registers (vertical slice). */
7808 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
7809 };
7810
7811 #undef REGDEF
7812 #undef REGDEF_ALIAS
7813 #undef REGNUM
7814 #undef REGSET16
7815 #undef REGSET31
7816 #undef REGSET
7817
7818 #define N 1
7819 #define n 0
7820 #define Z 1
7821 #define z 0
7822 #define C 1
7823 #define c 0
7824 #define V 1
7825 #define v 0
7826 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7827 static const asm_nzcv nzcv_names[] = {
7828 {"nzcv", B (n, z, c, v)},
7829 {"nzcV", B (n, z, c, V)},
7830 {"nzCv", B (n, z, C, v)},
7831 {"nzCV", B (n, z, C, V)},
7832 {"nZcv", B (n, Z, c, v)},
7833 {"nZcV", B (n, Z, c, V)},
7834 {"nZCv", B (n, Z, C, v)},
7835 {"nZCV", B (n, Z, C, V)},
7836 {"Nzcv", B (N, z, c, v)},
7837 {"NzcV", B (N, z, c, V)},
7838 {"NzCv", B (N, z, C, v)},
7839 {"NzCV", B (N, z, C, V)},
7840 {"NZcv", B (N, Z, c, v)},
7841 {"NZcV", B (N, Z, c, V)},
7842 {"NZCv", B (N, Z, C, v)},
7843 {"NZCV", B (N, Z, C, V)}
7844 };
7845
7846 #undef N
7847 #undef n
7848 #undef Z
7849 #undef z
7850 #undef C
7851 #undef c
7852 #undef V
7853 #undef v
7854 #undef B
7855 \f
7856 /* MD interface: bits in the object file. */
7857
7858 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7859 for use in the a.out file, and stores them in the array pointed to by buf.
7860 This knows about the endian-ness of the target machine and does
7861 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7862 2 (short) and 4 (long) Floating numbers are put out as a series of
7863 LITTLENUMS (shorts, here at least). */
7864
7865 void
7866 md_number_to_chars (char *buf, valueT val, int n)
7867 {
7868 if (target_big_endian)
7869 number_to_chars_bigendian (buf, val, n);
7870 else
7871 number_to_chars_littleendian (buf, val, n);
7872 }
7873
7874 /* MD interface: Sections. */
7875
7876 /* Estimate the size of a frag before relaxing. Assume everything fits in
7877 4 bytes. */
7878
7879 int
7880 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7881 {
7882 fragp->fr_var = 4;
7883 return 4;
7884 }
7885
7886 /* Round up a section size to the appropriate boundary. */
7887
7888 valueT
7889 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7890 {
7891 return size;
7892 }
7893
7894 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7895 of an rs_align_code fragment.
7896
7897 Here we fill the frag with the appropriate info for padding the
7898 output stream. The resulting frag will consist of a fixed (fr_fix)
7899 and of a repeating (fr_var) part.
7900
7901 The fixed content is always emitted before the repeating content and
7902 these two parts are used as follows in constructing the output:
7903 - the fixed part will be used to align to a valid instruction word
7904 boundary, in case that we start at a misaligned address; as no
7905 executable instruction can live at the misaligned location, we
7906 simply fill with zeros;
7907 - the variable part will be used to cover the remaining padding and
7908 we fill using the AArch64 NOP instruction.
7909
7910 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7911 enough storage space for up to 3 bytes for padding the back to a valid
7912 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7913
7914 void
7915 aarch64_handle_align (fragS * fragP)
7916 {
7917 /* NOP = d503201f */
7918 /* AArch64 instructions are always little-endian. */
7919 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7920
7921 int bytes, fix, noop_size;
7922 char *p;
7923
7924 if (fragP->fr_type != rs_align_code)
7925 return;
7926
7927 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7928 p = fragP->fr_literal + fragP->fr_fix;
7929
7930 #ifdef OBJ_ELF
7931 gas_assert (fragP->tc_frag_data.recorded);
7932 #endif
7933
7934 noop_size = sizeof (aarch64_noop);
7935
7936 fix = bytes & (noop_size - 1);
7937 if (fix)
7938 {
7939 #ifdef OBJ_ELF
7940 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7941 #endif
7942 memset (p, 0, fix);
7943 p += fix;
7944 fragP->fr_fix += fix;
7945 }
7946
7947 if (noop_size)
7948 memcpy (p, aarch64_noop, noop_size);
7949 fragP->fr_var = noop_size;
7950 }
7951
7952 /* Perform target specific initialisation of a frag.
7953 Note - despite the name this initialisation is not done when the frag
7954 is created, but only when its type is assigned. A frag can be created
7955 and used a long time before its type is set, so beware of assuming that
7956 this initialisation is performed first. */
7957
7958 #ifndef OBJ_ELF
7959 void
7960 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7961 int max_chars ATTRIBUTE_UNUSED)
7962 {
7963 }
7964
7965 #else /* OBJ_ELF is defined. */
7966 void
7967 aarch64_init_frag (fragS * fragP, int max_chars)
7968 {
7969 /* Record a mapping symbol for alignment frags. We will delete this
7970 later if the alignment ends up empty. */
7971 if (!fragP->tc_frag_data.recorded)
7972 fragP->tc_frag_data.recorded = 1;
7973
7974 /* PR 21809: Do not set a mapping state for debug sections
7975 - it just confuses other tools. */
7976 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7977 return;
7978
7979 switch (fragP->fr_type)
7980 {
7981 case rs_align_test:
7982 case rs_fill:
7983 mapping_state_2 (MAP_DATA, max_chars);
7984 break;
7985 case rs_align:
7986 /* PR 20364: We can get alignment frags in code sections,
7987 so do not just assume that we should use the MAP_DATA state. */
7988 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7989 break;
7990 case rs_align_code:
7991 mapping_state_2 (MAP_INSN, max_chars);
7992 break;
7993 default:
7994 break;
7995 }
7996 }
7997 \f
7998 /* Initialize the DWARF-2 unwind information for this procedure. */
7999
8000 void
8001 tc_aarch64_frame_initial_instructions (void)
8002 {
8003 cfi_add_CFA_def_cfa (REG_SP, 0);
8004 }
8005 #endif /* OBJ_ELF */
8006
8007 /* Convert REGNAME to a DWARF-2 register number. */
8008
8009 int
8010 tc_aarch64_regname_to_dw2regnum (char *regname)
8011 {
8012 const reg_entry *reg = parse_reg (&regname);
8013 if (reg == NULL)
8014 return -1;
8015
8016 switch (reg->type)
8017 {
8018 case REG_TYPE_SP_32:
8019 case REG_TYPE_SP_64:
8020 case REG_TYPE_R_32:
8021 case REG_TYPE_R_64:
8022 return reg->number;
8023
8024 case REG_TYPE_FP_B:
8025 case REG_TYPE_FP_H:
8026 case REG_TYPE_FP_S:
8027 case REG_TYPE_FP_D:
8028 case REG_TYPE_FP_Q:
8029 return reg->number + 64;
8030
8031 default:
8032 break;
8033 }
8034 return -1;
8035 }
8036
8037 /* Implement DWARF2_ADDR_SIZE. */
8038
8039 int
8040 aarch64_dwarf2_addr_size (void)
8041 {
8042 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8043 if (ilp32_p)
8044 return 4;
8045 #endif
8046 return bfd_arch_bits_per_address (stdoutput) / 8;
8047 }
8048
8049 /* MD interface: Symbol and relocation handling. */
8050
8051 /* Return the address within the segment that a PC-relative fixup is
8052 relative to. For AArch64 PC-relative fixups applied to instructions
8053 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8054
8055 long
8056 md_pcrel_from_section (fixS * fixP, segT seg)
8057 {
8058 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8059
8060 /* If this is pc-relative and we are going to emit a relocation
8061 then we just want to put out any pipeline compensation that the linker
8062 will need. Otherwise we want to use the calculated base. */
8063 if (fixP->fx_pcrel
8064 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8065 || aarch64_force_relocation (fixP)))
8066 base = 0;
8067
8068 /* AArch64 should be consistent for all pc-relative relocations. */
8069 return base + AARCH64_PCREL_OFFSET;
8070 }
8071
8072 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8073 Otherwise we have no need to default values of symbols. */
8074
8075 symbolS *
8076 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8077 {
8078 #ifdef OBJ_ELF
8079 if (name[0] == '_' && name[1] == 'G'
8080 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8081 {
8082 if (!GOT_symbol)
8083 {
8084 if (symbol_find (name))
8085 as_bad (_("GOT already in the symbol table"));
8086
8087 GOT_symbol = symbol_new (name, undefined_section,
8088 &zero_address_frag, 0);
8089 }
8090
8091 return GOT_symbol;
8092 }
8093 #endif
8094
8095 return 0;
8096 }
8097
8098 /* Return non-zero if the indicated VALUE has overflowed the maximum
8099 range expressible by a unsigned number with the indicated number of
8100 BITS. */
8101
8102 static bool
8103 unsigned_overflow (valueT value, unsigned bits)
8104 {
8105 valueT lim;
8106 if (bits >= sizeof (valueT) * 8)
8107 return false;
8108 lim = (valueT) 1 << bits;
8109 return (value >= lim);
8110 }
8111
8112
8113 /* Return non-zero if the indicated VALUE has overflowed the maximum
8114 range expressible by an signed number with the indicated number of
8115 BITS. */
8116
8117 static bool
8118 signed_overflow (offsetT value, unsigned bits)
8119 {
8120 offsetT lim;
8121 if (bits >= sizeof (offsetT) * 8)
8122 return false;
8123 lim = (offsetT) 1 << (bits - 1);
8124 return (value < -lim || value >= lim);
8125 }
8126
8127 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8128 unsigned immediate offset load/store instruction, try to encode it as
8129 an unscaled, 9-bit, signed immediate offset load/store instruction.
8130 Return TRUE if it is successful; otherwise return FALSE.
8131
8132 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8133 in response to the standard LDR/STR mnemonics when the immediate offset is
8134 unambiguous, i.e. when it is negative or unaligned. */
8135
8136 static bool
8137 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8138 {
8139 int idx;
8140 enum aarch64_op new_op;
8141 const aarch64_opcode *new_opcode;
8142
8143 gas_assert (instr->opcode->iclass == ldst_pos);
8144
8145 switch (instr->opcode->op)
8146 {
8147 case OP_LDRB_POS:new_op = OP_LDURB; break;
8148 case OP_STRB_POS: new_op = OP_STURB; break;
8149 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8150 case OP_LDRH_POS: new_op = OP_LDURH; break;
8151 case OP_STRH_POS: new_op = OP_STURH; break;
8152 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8153 case OP_LDR_POS: new_op = OP_LDUR; break;
8154 case OP_STR_POS: new_op = OP_STUR; break;
8155 case OP_LDRF_POS: new_op = OP_LDURV; break;
8156 case OP_STRF_POS: new_op = OP_STURV; break;
8157 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8158 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8159 default: new_op = OP_NIL; break;
8160 }
8161
8162 if (new_op == OP_NIL)
8163 return false;
8164
8165 new_opcode = aarch64_get_opcode (new_op);
8166 gas_assert (new_opcode != NULL);
8167
8168 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8169 instr->opcode->op, new_opcode->op);
8170
8171 aarch64_replace_opcode (instr, new_opcode);
8172
8173 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8174 qualifier matching may fail because the out-of-date qualifier will
8175 prevent the operand being updated with a new and correct qualifier. */
8176 idx = aarch64_operand_index (instr->opcode->operands,
8177 AARCH64_OPND_ADDR_SIMM9);
8178 gas_assert (idx == 1);
8179 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8180
8181 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8182
8183 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8184 insn_sequence))
8185 return false;
8186
8187 return true;
8188 }
8189
8190 /* Called by fix_insn to fix a MOV immediate alias instruction.
8191
8192 Operand for a generic move immediate instruction, which is an alias
8193 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8194 a 32-bit/64-bit immediate value into general register. An assembler error
8195 shall result if the immediate cannot be created by a single one of these
8196 instructions. If there is a choice, then to ensure reversability an
8197 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8198
8199 static void
8200 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8201 {
8202 const aarch64_opcode *opcode;
8203
8204 /* Need to check if the destination is SP/ZR. The check has to be done
8205 before any aarch64_replace_opcode. */
8206 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8207 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8208
8209 instr->operands[1].imm.value = value;
8210 instr->operands[1].skip = 0;
8211
8212 if (try_mov_wide_p)
8213 {
8214 /* Try the MOVZ alias. */
8215 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8216 aarch64_replace_opcode (instr, opcode);
8217 if (aarch64_opcode_encode (instr->opcode, instr,
8218 &instr->value, NULL, NULL, insn_sequence))
8219 {
8220 put_aarch64_insn (buf, instr->value);
8221 return;
8222 }
8223 /* Try the MOVK alias. */
8224 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8225 aarch64_replace_opcode (instr, opcode);
8226 if (aarch64_opcode_encode (instr->opcode, instr,
8227 &instr->value, NULL, NULL, insn_sequence))
8228 {
8229 put_aarch64_insn (buf, instr->value);
8230 return;
8231 }
8232 }
8233
8234 if (try_mov_bitmask_p)
8235 {
8236 /* Try the ORR alias. */
8237 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8238 aarch64_replace_opcode (instr, opcode);
8239 if (aarch64_opcode_encode (instr->opcode, instr,
8240 &instr->value, NULL, NULL, insn_sequence))
8241 {
8242 put_aarch64_insn (buf, instr->value);
8243 return;
8244 }
8245 }
8246
8247 as_bad_where (fixP->fx_file, fixP->fx_line,
8248 _("immediate cannot be moved by a single instruction"));
8249 }
8250
8251 /* An instruction operand which is immediate related may have symbol used
8252 in the assembly, e.g.
8253
8254 mov w0, u32
8255 .set u32, 0x00ffff00
8256
8257 At the time when the assembly instruction is parsed, a referenced symbol,
8258 like 'u32' in the above example may not have been seen; a fixS is created
8259 in such a case and is handled here after symbols have been resolved.
8260 Instruction is fixed up with VALUE using the information in *FIXP plus
8261 extra information in FLAGS.
8262
8263 This function is called by md_apply_fix to fix up instructions that need
8264 a fix-up described above but does not involve any linker-time relocation. */
8265
8266 static void
8267 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8268 {
8269 int idx;
8270 uint32_t insn;
8271 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8272 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8273 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8274
8275 if (new_inst)
8276 {
8277 /* Now the instruction is about to be fixed-up, so the operand that
8278 was previously marked as 'ignored' needs to be unmarked in order
8279 to get the encoding done properly. */
8280 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8281 new_inst->operands[idx].skip = 0;
8282 }
8283
8284 gas_assert (opnd != AARCH64_OPND_NIL);
8285
8286 switch (opnd)
8287 {
8288 case AARCH64_OPND_EXCEPTION:
8289 case AARCH64_OPND_UNDEFINED:
8290 if (unsigned_overflow (value, 16))
8291 as_bad_where (fixP->fx_file, fixP->fx_line,
8292 _("immediate out of range"));
8293 insn = get_aarch64_insn (buf);
8294 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8295 put_aarch64_insn (buf, insn);
8296 break;
8297
8298 case AARCH64_OPND_AIMM:
8299 /* ADD or SUB with immediate.
8300 NOTE this assumes we come here with a add/sub shifted reg encoding
8301 3 322|2222|2 2 2 21111 111111
8302 1 098|7654|3 2 1 09876 543210 98765 43210
8303 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8304 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8305 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8306 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8307 ->
8308 3 322|2222|2 2 221111111111
8309 1 098|7654|3 2 109876543210 98765 43210
8310 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8311 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8312 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8313 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8314 Fields sf Rn Rd are already set. */
8315 insn = get_aarch64_insn (buf);
8316 if (value < 0)
8317 {
8318 /* Add <-> sub. */
8319 insn = reencode_addsub_switch_add_sub (insn);
8320 value = -value;
8321 }
8322
8323 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8324 && unsigned_overflow (value, 12))
8325 {
8326 /* Try to shift the value by 12 to make it fit. */
8327 if (((value >> 12) << 12) == value
8328 && ! unsigned_overflow (value, 12 + 12))
8329 {
8330 value >>= 12;
8331 insn |= encode_addsub_imm_shift_amount (1);
8332 }
8333 }
8334
8335 if (unsigned_overflow (value, 12))
8336 as_bad_where (fixP->fx_file, fixP->fx_line,
8337 _("immediate out of range"));
8338
8339 insn |= encode_addsub_imm (value);
8340
8341 put_aarch64_insn (buf, insn);
8342 break;
8343
8344 case AARCH64_OPND_SIMD_IMM:
8345 case AARCH64_OPND_SIMD_IMM_SFT:
8346 case AARCH64_OPND_LIMM:
8347 /* Bit mask immediate. */
8348 gas_assert (new_inst != NULL);
8349 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8350 new_inst->operands[idx].imm.value = value;
8351 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8352 &new_inst->value, NULL, NULL, insn_sequence))
8353 put_aarch64_insn (buf, new_inst->value);
8354 else
8355 as_bad_where (fixP->fx_file, fixP->fx_line,
8356 _("invalid immediate"));
8357 break;
8358
8359 case AARCH64_OPND_HALF:
8360 /* 16-bit unsigned immediate. */
8361 if (unsigned_overflow (value, 16))
8362 as_bad_where (fixP->fx_file, fixP->fx_line,
8363 _("immediate out of range"));
8364 insn = get_aarch64_insn (buf);
8365 insn |= encode_movw_imm (value & 0xffff);
8366 put_aarch64_insn (buf, insn);
8367 break;
8368
8369 case AARCH64_OPND_IMM_MOV:
8370 /* Operand for a generic move immediate instruction, which is
8371 an alias instruction that generates a single MOVZ, MOVN or ORR
8372 instruction to loads a 32-bit/64-bit immediate value into general
8373 register. An assembler error shall result if the immediate cannot be
8374 created by a single one of these instructions. If there is a choice,
8375 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8376 and MOVZ or MOVN to ORR. */
8377 gas_assert (new_inst != NULL);
8378 fix_mov_imm_insn (fixP, buf, new_inst, value);
8379 break;
8380
8381 case AARCH64_OPND_ADDR_SIMM7:
8382 case AARCH64_OPND_ADDR_SIMM9:
8383 case AARCH64_OPND_ADDR_SIMM9_2:
8384 case AARCH64_OPND_ADDR_SIMM10:
8385 case AARCH64_OPND_ADDR_UIMM12:
8386 case AARCH64_OPND_ADDR_SIMM11:
8387 case AARCH64_OPND_ADDR_SIMM13:
8388 /* Immediate offset in an address. */
8389 insn = get_aarch64_insn (buf);
8390
8391 gas_assert (new_inst != NULL && new_inst->value == insn);
8392 gas_assert (new_inst->opcode->operands[1] == opnd
8393 || new_inst->opcode->operands[2] == opnd);
8394
8395 /* Get the index of the address operand. */
8396 if (new_inst->opcode->operands[1] == opnd)
8397 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8398 idx = 1;
8399 else
8400 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8401 idx = 2;
8402
8403 /* Update the resolved offset value. */
8404 new_inst->operands[idx].addr.offset.imm = value;
8405
8406 /* Encode/fix-up. */
8407 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8408 &new_inst->value, NULL, NULL, insn_sequence))
8409 {
8410 put_aarch64_insn (buf, new_inst->value);
8411 break;
8412 }
8413 else if (new_inst->opcode->iclass == ldst_pos
8414 && try_to_encode_as_unscaled_ldst (new_inst))
8415 {
8416 put_aarch64_insn (buf, new_inst->value);
8417 break;
8418 }
8419
8420 as_bad_where (fixP->fx_file, fixP->fx_line,
8421 _("immediate offset out of range"));
8422 break;
8423
8424 default:
8425 gas_assert (0);
8426 as_fatal (_("unhandled operand code %d"), opnd);
8427 }
8428 }
8429
8430 /* Apply a fixup (fixP) to segment data, once it has been determined
8431 by our caller that we have all the info we need to fix it up.
8432
8433 Parameter valP is the pointer to the value of the bits. */
8434
8435 void
8436 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8437 {
8438 offsetT value = *valP;
8439 uint32_t insn;
8440 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8441 int scale;
8442 unsigned flags = fixP->fx_addnumber;
8443
8444 DEBUG_TRACE ("\n\n");
8445 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8446 DEBUG_TRACE ("Enter md_apply_fix");
8447
8448 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8449
8450 /* Note whether this will delete the relocation. */
8451
8452 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8453 fixP->fx_done = 1;
8454
8455 /* Process the relocations. */
8456 switch (fixP->fx_r_type)
8457 {
8458 case BFD_RELOC_NONE:
8459 /* This will need to go in the object file. */
8460 fixP->fx_done = 0;
8461 break;
8462
8463 case BFD_RELOC_8:
8464 case BFD_RELOC_8_PCREL:
8465 if (fixP->fx_done || !seg->use_rela_p)
8466 md_number_to_chars (buf, value, 1);
8467 break;
8468
8469 case BFD_RELOC_16:
8470 case BFD_RELOC_16_PCREL:
8471 if (fixP->fx_done || !seg->use_rela_p)
8472 md_number_to_chars (buf, value, 2);
8473 break;
8474
8475 case BFD_RELOC_32:
8476 case BFD_RELOC_32_PCREL:
8477 if (fixP->fx_done || !seg->use_rela_p)
8478 md_number_to_chars (buf, value, 4);
8479 break;
8480
8481 case BFD_RELOC_64:
8482 case BFD_RELOC_64_PCREL:
8483 if (fixP->fx_done || !seg->use_rela_p)
8484 md_number_to_chars (buf, value, 8);
8485 break;
8486
8487 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8488 /* We claim that these fixups have been processed here, even if
8489 in fact we generate an error because we do not have a reloc
8490 for them, so tc_gen_reloc() will reject them. */
8491 fixP->fx_done = 1;
8492 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8493 {
8494 as_bad_where (fixP->fx_file, fixP->fx_line,
8495 _("undefined symbol %s used as an immediate value"),
8496 S_GET_NAME (fixP->fx_addsy));
8497 goto apply_fix_return;
8498 }
8499 fix_insn (fixP, flags, value);
8500 break;
8501
8502 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8503 if (fixP->fx_done || !seg->use_rela_p)
8504 {
8505 if (value & 3)
8506 as_bad_where (fixP->fx_file, fixP->fx_line,
8507 _("pc-relative load offset not word aligned"));
8508 if (signed_overflow (value, 21))
8509 as_bad_where (fixP->fx_file, fixP->fx_line,
8510 _("pc-relative load offset out of range"));
8511 insn = get_aarch64_insn (buf);
8512 insn |= encode_ld_lit_ofs_19 (value >> 2);
8513 put_aarch64_insn (buf, insn);
8514 }
8515 break;
8516
8517 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8518 if (fixP->fx_done || !seg->use_rela_p)
8519 {
8520 if (signed_overflow (value, 21))
8521 as_bad_where (fixP->fx_file, fixP->fx_line,
8522 _("pc-relative address offset out of range"));
8523 insn = get_aarch64_insn (buf);
8524 insn |= encode_adr_imm (value);
8525 put_aarch64_insn (buf, insn);
8526 }
8527 break;
8528
8529 case BFD_RELOC_AARCH64_BRANCH19:
8530 if (fixP->fx_done || !seg->use_rela_p)
8531 {
8532 if (value & 3)
8533 as_bad_where (fixP->fx_file, fixP->fx_line,
8534 _("conditional branch target not word aligned"));
8535 if (signed_overflow (value, 21))
8536 as_bad_where (fixP->fx_file, fixP->fx_line,
8537 _("conditional branch out of range"));
8538 insn = get_aarch64_insn (buf);
8539 insn |= encode_cond_branch_ofs_19 (value >> 2);
8540 put_aarch64_insn (buf, insn);
8541 }
8542 break;
8543
8544 case BFD_RELOC_AARCH64_TSTBR14:
8545 if (fixP->fx_done || !seg->use_rela_p)
8546 {
8547 if (value & 3)
8548 as_bad_where (fixP->fx_file, fixP->fx_line,
8549 _("conditional branch target not word aligned"));
8550 if (signed_overflow (value, 16))
8551 as_bad_where (fixP->fx_file, fixP->fx_line,
8552 _("conditional branch out of range"));
8553 insn = get_aarch64_insn (buf);
8554 insn |= encode_tst_branch_ofs_14 (value >> 2);
8555 put_aarch64_insn (buf, insn);
8556 }
8557 break;
8558
8559 case BFD_RELOC_AARCH64_CALL26:
8560 case BFD_RELOC_AARCH64_JUMP26:
8561 if (fixP->fx_done || !seg->use_rela_p)
8562 {
8563 if (value & 3)
8564 as_bad_where (fixP->fx_file, fixP->fx_line,
8565 _("branch target not word aligned"));
8566 if (signed_overflow (value, 28))
8567 as_bad_where (fixP->fx_file, fixP->fx_line,
8568 _("branch out of range"));
8569 insn = get_aarch64_insn (buf);
8570 insn |= encode_branch_ofs_26 (value >> 2);
8571 put_aarch64_insn (buf, insn);
8572 }
8573 break;
8574
8575 case BFD_RELOC_AARCH64_MOVW_G0:
8576 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8577 case BFD_RELOC_AARCH64_MOVW_G0_S:
8578 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8579 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8580 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8581 scale = 0;
8582 goto movw_common;
8583 case BFD_RELOC_AARCH64_MOVW_G1:
8584 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8585 case BFD_RELOC_AARCH64_MOVW_G1_S:
8586 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8587 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8588 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8589 scale = 16;
8590 goto movw_common;
8591 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8592 scale = 0;
8593 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8594 /* Should always be exported to object file, see
8595 aarch64_force_relocation(). */
8596 gas_assert (!fixP->fx_done);
8597 gas_assert (seg->use_rela_p);
8598 goto movw_common;
8599 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8600 scale = 16;
8601 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8602 /* Should always be exported to object file, see
8603 aarch64_force_relocation(). */
8604 gas_assert (!fixP->fx_done);
8605 gas_assert (seg->use_rela_p);
8606 goto movw_common;
8607 case BFD_RELOC_AARCH64_MOVW_G2:
8608 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8609 case BFD_RELOC_AARCH64_MOVW_G2_S:
8610 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8611 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8612 scale = 32;
8613 goto movw_common;
8614 case BFD_RELOC_AARCH64_MOVW_G3:
8615 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8616 scale = 48;
8617 movw_common:
8618 if (fixP->fx_done || !seg->use_rela_p)
8619 {
8620 insn = get_aarch64_insn (buf);
8621
8622 if (!fixP->fx_done)
8623 {
8624 /* REL signed addend must fit in 16 bits */
8625 if (signed_overflow (value, 16))
8626 as_bad_where (fixP->fx_file, fixP->fx_line,
8627 _("offset out of range"));
8628 }
8629 else
8630 {
8631 /* Check for overflow and scale. */
8632 switch (fixP->fx_r_type)
8633 {
8634 case BFD_RELOC_AARCH64_MOVW_G0:
8635 case BFD_RELOC_AARCH64_MOVW_G1:
8636 case BFD_RELOC_AARCH64_MOVW_G2:
8637 case BFD_RELOC_AARCH64_MOVW_G3:
8638 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8639 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8640 if (unsigned_overflow (value, scale + 16))
8641 as_bad_where (fixP->fx_file, fixP->fx_line,
8642 _("unsigned value out of range"));
8643 break;
8644 case BFD_RELOC_AARCH64_MOVW_G0_S:
8645 case BFD_RELOC_AARCH64_MOVW_G1_S:
8646 case BFD_RELOC_AARCH64_MOVW_G2_S:
8647 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8648 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8649 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8650 /* NOTE: We can only come here with movz or movn. */
8651 if (signed_overflow (value, scale + 16))
8652 as_bad_where (fixP->fx_file, fixP->fx_line,
8653 _("signed value out of range"));
8654 if (value < 0)
8655 {
8656 /* Force use of MOVN. */
8657 value = ~value;
8658 insn = reencode_movzn_to_movn (insn);
8659 }
8660 else
8661 {
8662 /* Force use of MOVZ. */
8663 insn = reencode_movzn_to_movz (insn);
8664 }
8665 break;
8666 default:
8667 /* Unchecked relocations. */
8668 break;
8669 }
8670 value >>= scale;
8671 }
8672
8673 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8674 insn |= encode_movw_imm (value & 0xffff);
8675
8676 put_aarch64_insn (buf, insn);
8677 }
8678 break;
8679
8680 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8681 fixP->fx_r_type = (ilp32_p
8682 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8683 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8684 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8685 /* Should always be exported to object file, see
8686 aarch64_force_relocation(). */
8687 gas_assert (!fixP->fx_done);
8688 gas_assert (seg->use_rela_p);
8689 break;
8690
8691 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8692 fixP->fx_r_type = (ilp32_p
8693 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8694 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8695 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8696 /* Should always be exported to object file, see
8697 aarch64_force_relocation(). */
8698 gas_assert (!fixP->fx_done);
8699 gas_assert (seg->use_rela_p);
8700 break;
8701
8702 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8703 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8704 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8705 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8706 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8707 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8708 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8709 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8710 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8711 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8712 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8713 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8714 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8715 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8716 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8717 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8718 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8719 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8720 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8721 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8722 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8723 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8724 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8725 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8726 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8727 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8728 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8729 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8730 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8731 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8732 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8733 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8734 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8735 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8736 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8737 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8738 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8739 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8740 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8741 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8742 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8743 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8744 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8745 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8746 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8747 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8748 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8749 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8750 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8751 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8752 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8753 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8754 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8755 /* Should always be exported to object file, see
8756 aarch64_force_relocation(). */
8757 gas_assert (!fixP->fx_done);
8758 gas_assert (seg->use_rela_p);
8759 break;
8760
8761 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8762 /* Should always be exported to object file, see
8763 aarch64_force_relocation(). */
8764 fixP->fx_r_type = (ilp32_p
8765 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8766 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8767 gas_assert (!fixP->fx_done);
8768 gas_assert (seg->use_rela_p);
8769 break;
8770
8771 case BFD_RELOC_AARCH64_ADD_LO12:
8772 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8773 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8774 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8775 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8776 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8777 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8778 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8779 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8780 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8781 case BFD_RELOC_AARCH64_LDST128_LO12:
8782 case BFD_RELOC_AARCH64_LDST16_LO12:
8783 case BFD_RELOC_AARCH64_LDST32_LO12:
8784 case BFD_RELOC_AARCH64_LDST64_LO12:
8785 case BFD_RELOC_AARCH64_LDST8_LO12:
8786 /* Should always be exported to object file, see
8787 aarch64_force_relocation(). */
8788 gas_assert (!fixP->fx_done);
8789 gas_assert (seg->use_rela_p);
8790 break;
8791
8792 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8793 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8794 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8795 break;
8796
8797 case BFD_RELOC_UNUSED:
8798 /* An error will already have been reported. */
8799 break;
8800
8801 default:
8802 as_bad_where (fixP->fx_file, fixP->fx_line,
8803 _("unexpected %s fixup"),
8804 bfd_get_reloc_code_name (fixP->fx_r_type));
8805 break;
8806 }
8807
8808 apply_fix_return:
8809 /* Free the allocated the struct aarch64_inst.
8810 N.B. currently there are very limited number of fix-up types actually use
8811 this field, so the impact on the performance should be minimal . */
8812 free (fixP->tc_fix_data.inst);
8813
8814 return;
8815 }
8816
8817 /* Translate internal representation of relocation info to BFD target
8818 format. */
8819
8820 arelent *
8821 tc_gen_reloc (asection * section, fixS * fixp)
8822 {
8823 arelent *reloc;
8824 bfd_reloc_code_real_type code;
8825
8826 reloc = XNEW (arelent);
8827
8828 reloc->sym_ptr_ptr = XNEW (asymbol *);
8829 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8830 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8831
8832 if (fixp->fx_pcrel)
8833 {
8834 if (section->use_rela_p)
8835 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8836 else
8837 fixp->fx_offset = reloc->address;
8838 }
8839 reloc->addend = fixp->fx_offset;
8840
8841 code = fixp->fx_r_type;
8842 switch (code)
8843 {
8844 case BFD_RELOC_16:
8845 if (fixp->fx_pcrel)
8846 code = BFD_RELOC_16_PCREL;
8847 break;
8848
8849 case BFD_RELOC_32:
8850 if (fixp->fx_pcrel)
8851 code = BFD_RELOC_32_PCREL;
8852 break;
8853
8854 case BFD_RELOC_64:
8855 if (fixp->fx_pcrel)
8856 code = BFD_RELOC_64_PCREL;
8857 break;
8858
8859 default:
8860 break;
8861 }
8862
8863 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8864 if (reloc->howto == NULL)
8865 {
8866 as_bad_where (fixp->fx_file, fixp->fx_line,
8867 _
8868 ("cannot represent %s relocation in this object file format"),
8869 bfd_get_reloc_code_name (code));
8870 return NULL;
8871 }
8872
8873 return reloc;
8874 }
8875
8876 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8877
8878 void
8879 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8880 {
8881 bfd_reloc_code_real_type type;
8882 int pcrel = 0;
8883
8884 /* Pick a reloc.
8885 FIXME: @@ Should look at CPU word size. */
8886 switch (size)
8887 {
8888 case 1:
8889 type = BFD_RELOC_8;
8890 break;
8891 case 2:
8892 type = BFD_RELOC_16;
8893 break;
8894 case 4:
8895 type = BFD_RELOC_32;
8896 break;
8897 case 8:
8898 type = BFD_RELOC_64;
8899 break;
8900 default:
8901 as_bad (_("cannot do %u-byte relocation"), size);
8902 type = BFD_RELOC_UNUSED;
8903 break;
8904 }
8905
8906 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8907 }
8908
8909 #ifdef OBJ_ELF
8910
8911 /* Implement md_after_parse_args. This is the earliest time we need to decide
8912 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8913
8914 void
8915 aarch64_after_parse_args (void)
8916 {
8917 if (aarch64_abi != AARCH64_ABI_NONE)
8918 return;
8919
8920 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8921 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8922 aarch64_abi = AARCH64_ABI_ILP32;
8923 else
8924 aarch64_abi = AARCH64_ABI_LP64;
8925 }
8926
8927 const char *
8928 elf64_aarch64_target_format (void)
8929 {
8930 #ifdef TE_CLOUDABI
8931 /* FIXME: What to do for ilp32_p ? */
8932 if (target_big_endian)
8933 return "elf64-bigaarch64-cloudabi";
8934 else
8935 return "elf64-littleaarch64-cloudabi";
8936 #else
8937 if (target_big_endian)
8938 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8939 else
8940 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8941 #endif
8942 }
8943
8944 void
8945 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8946 {
8947 elf_frob_symbol (symp, puntp);
8948 }
8949 #endif
8950
8951 /* MD interface: Finalization. */
8952
8953 /* A good place to do this, although this was probably not intended
8954 for this kind of use. We need to dump the literal pool before
8955 references are made to a null symbol pointer. */
8956
8957 void
8958 aarch64_cleanup (void)
8959 {
8960 literal_pool *pool;
8961
8962 for (pool = list_of_pools; pool; pool = pool->next)
8963 {
8964 /* Put it at the end of the relevant section. */
8965 subseg_set (pool->section, pool->sub_section);
8966 s_ltorg (0);
8967 }
8968 }
8969
8970 #ifdef OBJ_ELF
8971 /* Remove any excess mapping symbols generated for alignment frags in
8972 SEC. We may have created a mapping symbol before a zero byte
8973 alignment; remove it if there's a mapping symbol after the
8974 alignment. */
8975 static void
8976 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8977 void *dummy ATTRIBUTE_UNUSED)
8978 {
8979 segment_info_type *seginfo = seg_info (sec);
8980 fragS *fragp;
8981
8982 if (seginfo == NULL || seginfo->frchainP == NULL)
8983 return;
8984
8985 for (fragp = seginfo->frchainP->frch_root;
8986 fragp != NULL; fragp = fragp->fr_next)
8987 {
8988 symbolS *sym = fragp->tc_frag_data.last_map;
8989 fragS *next = fragp->fr_next;
8990
8991 /* Variable-sized frags have been converted to fixed size by
8992 this point. But if this was variable-sized to start with,
8993 there will be a fixed-size frag after it. So don't handle
8994 next == NULL. */
8995 if (sym == NULL || next == NULL)
8996 continue;
8997
8998 if (S_GET_VALUE (sym) < next->fr_address)
8999 /* Not at the end of this frag. */
9000 continue;
9001 know (S_GET_VALUE (sym) == next->fr_address);
9002
9003 do
9004 {
9005 if (next->tc_frag_data.first_map != NULL)
9006 {
9007 /* Next frag starts with a mapping symbol. Discard this
9008 one. */
9009 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9010 break;
9011 }
9012
9013 if (next->fr_next == NULL)
9014 {
9015 /* This mapping symbol is at the end of the section. Discard
9016 it. */
9017 know (next->fr_fix == 0 && next->fr_var == 0);
9018 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9019 break;
9020 }
9021
9022 /* As long as we have empty frags without any mapping symbols,
9023 keep looking. */
9024 /* If the next frag is non-empty and does not start with a
9025 mapping symbol, then this mapping symbol is required. */
9026 if (next->fr_address != next->fr_next->fr_address)
9027 break;
9028
9029 next = next->fr_next;
9030 }
9031 while (next != NULL);
9032 }
9033 }
9034 #endif
9035
9036 /* Adjust the symbol table. */
9037
9038 void
9039 aarch64_adjust_symtab (void)
9040 {
9041 #ifdef OBJ_ELF
9042 /* Remove any overlapping mapping symbols generated by alignment frags. */
9043 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9044 /* Now do generic ELF adjustments. */
9045 elf_adjust_symtab ();
9046 #endif
9047 }
9048
9049 static void
9050 checked_hash_insert (htab_t table, const char *key, void *value)
9051 {
9052 str_hash_insert (table, key, value, 0);
9053 }
9054
9055 static void
9056 sysreg_hash_insert (htab_t table, const char *key, void *value)
9057 {
9058 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9059 checked_hash_insert (table, key, value);
9060 }
9061
9062 static void
9063 fill_instruction_hash_table (void)
9064 {
9065 const aarch64_opcode *opcode = aarch64_opcode_table;
9066
9067 while (opcode->name != NULL)
9068 {
9069 templates *templ, *new_templ;
9070 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9071
9072 new_templ = XNEW (templates);
9073 new_templ->opcode = opcode;
9074 new_templ->next = NULL;
9075
9076 if (!templ)
9077 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9078 else
9079 {
9080 new_templ->next = templ->next;
9081 templ->next = new_templ;
9082 }
9083 ++opcode;
9084 }
9085 }
9086
9087 static inline void
9088 convert_to_upper (char *dst, const char *src, size_t num)
9089 {
9090 unsigned int i;
9091 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9092 *dst = TOUPPER (*src);
9093 *dst = '\0';
9094 }
9095
9096 /* Assume STR point to a lower-case string, allocate, convert and return
9097 the corresponding upper-case string. */
9098 static inline const char*
9099 get_upper_str (const char *str)
9100 {
9101 char *ret;
9102 size_t len = strlen (str);
9103 ret = XNEWVEC (char, len + 1);
9104 convert_to_upper (ret, str, len);
9105 return ret;
9106 }
9107
9108 /* MD interface: Initialization. */
9109
9110 void
9111 md_begin (void)
9112 {
9113 unsigned mach;
9114 unsigned int i;
9115
9116 aarch64_ops_hsh = str_htab_create ();
9117 aarch64_cond_hsh = str_htab_create ();
9118 aarch64_shift_hsh = str_htab_create ();
9119 aarch64_sys_regs_hsh = str_htab_create ();
9120 aarch64_pstatefield_hsh = str_htab_create ();
9121 aarch64_sys_regs_ic_hsh = str_htab_create ();
9122 aarch64_sys_regs_dc_hsh = str_htab_create ();
9123 aarch64_sys_regs_at_hsh = str_htab_create ();
9124 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9125 aarch64_sys_regs_sr_hsh = str_htab_create ();
9126 aarch64_reg_hsh = str_htab_create ();
9127 aarch64_barrier_opt_hsh = str_htab_create ();
9128 aarch64_nzcv_hsh = str_htab_create ();
9129 aarch64_pldop_hsh = str_htab_create ();
9130 aarch64_hint_opt_hsh = str_htab_create ();
9131
9132 fill_instruction_hash_table ();
9133
9134 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9135 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9136 (void *) (aarch64_sys_regs + i));
9137
9138 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9139 sysreg_hash_insert (aarch64_pstatefield_hsh,
9140 aarch64_pstatefields[i].name,
9141 (void *) (aarch64_pstatefields + i));
9142
9143 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9144 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9145 aarch64_sys_regs_ic[i].name,
9146 (void *) (aarch64_sys_regs_ic + i));
9147
9148 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9149 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9150 aarch64_sys_regs_dc[i].name,
9151 (void *) (aarch64_sys_regs_dc + i));
9152
9153 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9154 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9155 aarch64_sys_regs_at[i].name,
9156 (void *) (aarch64_sys_regs_at + i));
9157
9158 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9159 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9160 aarch64_sys_regs_tlbi[i].name,
9161 (void *) (aarch64_sys_regs_tlbi + i));
9162
9163 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9164 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9165 aarch64_sys_regs_sr[i].name,
9166 (void *) (aarch64_sys_regs_sr + i));
9167
9168 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9169 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9170 (void *) (reg_names + i));
9171
9172 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9173 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9174 (void *) (nzcv_names + i));
9175
9176 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9177 {
9178 const char *name = aarch64_operand_modifiers[i].name;
9179 checked_hash_insert (aarch64_shift_hsh, name,
9180 (void *) (aarch64_operand_modifiers + i));
9181 /* Also hash the name in the upper case. */
9182 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9183 (void *) (aarch64_operand_modifiers + i));
9184 }
9185
9186 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9187 {
9188 unsigned int j;
9189 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9190 the same condition code. */
9191 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9192 {
9193 const char *name = aarch64_conds[i].names[j];
9194 if (name == NULL)
9195 break;
9196 checked_hash_insert (aarch64_cond_hsh, name,
9197 (void *) (aarch64_conds + i));
9198 /* Also hash the name in the upper case. */
9199 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9200 (void *) (aarch64_conds + i));
9201 }
9202 }
9203
9204 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9205 {
9206 const char *name = aarch64_barrier_options[i].name;
9207 /* Skip xx00 - the unallocated values of option. */
9208 if ((i & 0x3) == 0)
9209 continue;
9210 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9211 (void *) (aarch64_barrier_options + i));
9212 /* Also hash the name in the upper case. */
9213 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9214 (void *) (aarch64_barrier_options + i));
9215 }
9216
9217 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9218 {
9219 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9220 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9221 (void *) (aarch64_barrier_dsb_nxs_options + i));
9222 /* Also hash the name in the upper case. */
9223 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9224 (void *) (aarch64_barrier_dsb_nxs_options + i));
9225 }
9226
9227 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9228 {
9229 const char* name = aarch64_prfops[i].name;
9230 /* Skip the unallocated hint encodings. */
9231 if (name == NULL)
9232 continue;
9233 checked_hash_insert (aarch64_pldop_hsh, name,
9234 (void *) (aarch64_prfops + i));
9235 /* Also hash the name in the upper case. */
9236 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9237 (void *) (aarch64_prfops + i));
9238 }
9239
9240 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9241 {
9242 const char* name = aarch64_hint_options[i].name;
9243 const char* upper_name = get_upper_str(name);
9244
9245 checked_hash_insert (aarch64_hint_opt_hsh, name,
9246 (void *) (aarch64_hint_options + i));
9247
9248 /* Also hash the name in the upper case if not the same. */
9249 if (strcmp (name, upper_name) != 0)
9250 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9251 (void *) (aarch64_hint_options + i));
9252 }
9253
9254 /* Set the cpu variant based on the command-line options. */
9255 if (!mcpu_cpu_opt)
9256 mcpu_cpu_opt = march_cpu_opt;
9257
9258 if (!mcpu_cpu_opt)
9259 mcpu_cpu_opt = &cpu_default;
9260
9261 cpu_variant = *mcpu_cpu_opt;
9262
9263 /* Record the CPU type. */
9264 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9265
9266 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9267 }
9268
9269 /* Command line processing. */
9270
9271 const char *md_shortopts = "m:";
9272
9273 #ifdef AARCH64_BI_ENDIAN
9274 #define OPTION_EB (OPTION_MD_BASE + 0)
9275 #define OPTION_EL (OPTION_MD_BASE + 1)
9276 #else
9277 #if TARGET_BYTES_BIG_ENDIAN
9278 #define OPTION_EB (OPTION_MD_BASE + 0)
9279 #else
9280 #define OPTION_EL (OPTION_MD_BASE + 1)
9281 #endif
9282 #endif
9283
9284 struct option md_longopts[] = {
9285 #ifdef OPTION_EB
9286 {"EB", no_argument, NULL, OPTION_EB},
9287 #endif
9288 #ifdef OPTION_EL
9289 {"EL", no_argument, NULL, OPTION_EL},
9290 #endif
9291 {NULL, no_argument, NULL, 0}
9292 };
9293
9294 size_t md_longopts_size = sizeof (md_longopts);
9295
9296 struct aarch64_option_table
9297 {
9298 const char *option; /* Option name to match. */
9299 const char *help; /* Help information. */
9300 int *var; /* Variable to change. */
9301 int value; /* What to change it to. */
9302 char *deprecated; /* If non-null, print this message. */
9303 };
9304
9305 static struct aarch64_option_table aarch64_opts[] = {
9306 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9307 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9308 NULL},
9309 #ifdef DEBUG_AARCH64
9310 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9311 #endif /* DEBUG_AARCH64 */
9312 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9313 NULL},
9314 {"mno-verbose-error", N_("do not output verbose error messages"),
9315 &verbose_error_p, 0, NULL},
9316 {NULL, NULL, NULL, 0, NULL}
9317 };
9318
9319 struct aarch64_cpu_option_table
9320 {
9321 const char *name;
9322 const aarch64_feature_set value;
9323 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9324 case. */
9325 const char *canonical_name;
9326 };
9327
9328 /* This list should, at a minimum, contain all the cpu names
9329 recognized by GCC. */
9330 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9331 {"all", AARCH64_ANY, NULL},
9332 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9333 AARCH64_FEATURE_CRC), "Cortex-A34"},
9334 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9335 AARCH64_FEATURE_CRC), "Cortex-A35"},
9336 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9337 AARCH64_FEATURE_CRC), "Cortex-A53"},
9338 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9339 AARCH64_FEATURE_CRC), "Cortex-A57"},
9340 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9341 AARCH64_FEATURE_CRC), "Cortex-A72"},
9342 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9343 AARCH64_FEATURE_CRC), "Cortex-A73"},
9344 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9345 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9346 "Cortex-A55"},
9347 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9348 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9349 "Cortex-A75"},
9350 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9351 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9352 "Cortex-A76"},
9353 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9354 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9355 | AARCH64_FEATURE_DOTPROD
9356 | AARCH64_FEATURE_SSBS),
9357 "Cortex-A76AE"},
9358 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9359 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9360 | AARCH64_FEATURE_DOTPROD
9361 | AARCH64_FEATURE_SSBS),
9362 "Cortex-A77"},
9363 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9364 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9365 | AARCH64_FEATURE_DOTPROD
9366 | AARCH64_FEATURE_SSBS),
9367 "Cortex-A65"},
9368 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9369 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9370 | AARCH64_FEATURE_DOTPROD
9371 | AARCH64_FEATURE_SSBS),
9372 "Cortex-A65AE"},
9373 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9374 AARCH64_FEATURE_F16
9375 | AARCH64_FEATURE_RCPC
9376 | AARCH64_FEATURE_DOTPROD
9377 | AARCH64_FEATURE_SSBS
9378 | AARCH64_FEATURE_PROFILE),
9379 "Cortex-A78"},
9380 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9381 AARCH64_FEATURE_F16
9382 | AARCH64_FEATURE_RCPC
9383 | AARCH64_FEATURE_DOTPROD
9384 | AARCH64_FEATURE_SSBS
9385 | AARCH64_FEATURE_PROFILE),
9386 "Cortex-A78AE"},
9387 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9388 AARCH64_FEATURE_DOTPROD
9389 | AARCH64_FEATURE_F16
9390 | AARCH64_FEATURE_FLAGM
9391 | AARCH64_FEATURE_PAC
9392 | AARCH64_FEATURE_PROFILE
9393 | AARCH64_FEATURE_RCPC
9394 | AARCH64_FEATURE_SSBS),
9395 "Cortex-A78C"},
9396 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9397 AARCH64_FEATURE_BFLOAT16
9398 | AARCH64_FEATURE_I8MM
9399 | AARCH64_FEATURE_MEMTAG
9400 | AARCH64_FEATURE_SVE2_BITPERM),
9401 "Cortex-A510"},
9402 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9403 AARCH64_FEATURE_BFLOAT16
9404 | AARCH64_FEATURE_I8MM
9405 | AARCH64_FEATURE_MEMTAG
9406 | AARCH64_FEATURE_SVE2_BITPERM),
9407 "Cortex-A710"},
9408 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9409 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9410 | AARCH64_FEATURE_DOTPROD
9411 | AARCH64_FEATURE_PROFILE),
9412 "Ares"},
9413 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9414 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9415 "Samsung Exynos M1"},
9416 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9417 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9418 | AARCH64_FEATURE_RDMA),
9419 "Qualcomm Falkor"},
9420 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9421 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9422 | AARCH64_FEATURE_DOTPROD
9423 | AARCH64_FEATURE_SSBS),
9424 "Neoverse E1"},
9425 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9426 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9427 | AARCH64_FEATURE_DOTPROD
9428 | AARCH64_FEATURE_PROFILE),
9429 "Neoverse N1"},
9430 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9431 AARCH64_FEATURE_BFLOAT16
9432 | AARCH64_FEATURE_I8MM
9433 | AARCH64_FEATURE_F16
9434 | AARCH64_FEATURE_SVE
9435 | AARCH64_FEATURE_SVE2
9436 | AARCH64_FEATURE_SVE2_BITPERM
9437 | AARCH64_FEATURE_MEMTAG
9438 | AARCH64_FEATURE_RNG),
9439 "Neoverse N2"},
9440 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9441 AARCH64_FEATURE_PROFILE
9442 | AARCH64_FEATURE_CVADP
9443 | AARCH64_FEATURE_SVE
9444 | AARCH64_FEATURE_SSBS
9445 | AARCH64_FEATURE_RNG
9446 | AARCH64_FEATURE_F16
9447 | AARCH64_FEATURE_BFLOAT16
9448 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9449 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9450 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9451 | AARCH64_FEATURE_RDMA),
9452 "Qualcomm QDF24XX"},
9453 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9454 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9455 "Qualcomm Saphira"},
9456 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9457 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9458 "Cavium ThunderX"},
9459 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9460 AARCH64_FEATURE_CRYPTO),
9461 "Broadcom Vulcan"},
9462 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9463 in earlier releases and is superseded by 'xgene1' in all
9464 tools. */
9465 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9466 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9467 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9468 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9469 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9470 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9471 AARCH64_FEATURE_F16
9472 | AARCH64_FEATURE_RCPC
9473 | AARCH64_FEATURE_DOTPROD
9474 | AARCH64_FEATURE_SSBS
9475 | AARCH64_FEATURE_PROFILE),
9476 "Cortex-X1"},
9477 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9478 AARCH64_FEATURE_BFLOAT16
9479 | AARCH64_FEATURE_I8MM
9480 | AARCH64_FEATURE_MEMTAG
9481 | AARCH64_FEATURE_SVE2_BITPERM),
9482 "Cortex-X2"},
9483 {"generic", AARCH64_ARCH_V8, NULL},
9484
9485 {NULL, AARCH64_ARCH_NONE, NULL}
9486 };
9487
9488 struct aarch64_arch_option_table
9489 {
9490 const char *name;
9491 const aarch64_feature_set value;
9492 };
9493
9494 /* This list should, at a minimum, contain all the architecture names
9495 recognized by GCC. */
9496 static const struct aarch64_arch_option_table aarch64_archs[] = {
9497 {"all", AARCH64_ANY},
9498 {"armv8-a", AARCH64_ARCH_V8},
9499 {"armv8.1-a", AARCH64_ARCH_V8_1},
9500 {"armv8.2-a", AARCH64_ARCH_V8_2},
9501 {"armv8.3-a", AARCH64_ARCH_V8_3},
9502 {"armv8.4-a", AARCH64_ARCH_V8_4},
9503 {"armv8.5-a", AARCH64_ARCH_V8_5},
9504 {"armv8.6-a", AARCH64_ARCH_V8_6},
9505 {"armv8.7-a", AARCH64_ARCH_V8_7},
9506 {"armv8-r", AARCH64_ARCH_V8_R},
9507 {"armv9-a", AARCH64_ARCH_V9},
9508 {NULL, AARCH64_ARCH_NONE}
9509 };
9510
9511 /* ISA extensions. */
9512 struct aarch64_option_cpu_value_table
9513 {
9514 const char *name;
9515 const aarch64_feature_set value;
9516 const aarch64_feature_set require; /* Feature dependencies. */
9517 };
9518
9519 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9520 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9521 AARCH64_ARCH_NONE},
9522 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9523 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9524 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9525 AARCH64_ARCH_NONE},
9526 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9527 AARCH64_ARCH_NONE},
9528 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9529 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9530 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9531 AARCH64_ARCH_NONE},
9532 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9533 AARCH64_ARCH_NONE},
9534 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9535 AARCH64_ARCH_NONE},
9536 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9537 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9538 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9539 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9540 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9541 AARCH64_FEATURE (AARCH64_FEATURE_FP
9542 | AARCH64_FEATURE_F16, 0)},
9543 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9544 AARCH64_ARCH_NONE},
9545 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9546 AARCH64_FEATURE (AARCH64_FEATURE_F16
9547 | AARCH64_FEATURE_SIMD
9548 | AARCH64_FEATURE_COMPNUM, 0)},
9549 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9550 AARCH64_ARCH_NONE},
9551 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9552 AARCH64_FEATURE (AARCH64_FEATURE_F16
9553 | AARCH64_FEATURE_SIMD, 0)},
9554 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9555 AARCH64_ARCH_NONE},
9556 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9557 AARCH64_ARCH_NONE},
9558 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9559 AARCH64_ARCH_NONE},
9560 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9561 AARCH64_ARCH_NONE},
9562 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9563 AARCH64_ARCH_NONE},
9564 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9565 AARCH64_ARCH_NONE},
9566 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9567 AARCH64_ARCH_NONE},
9568 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9569 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9570 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9571 AARCH64_ARCH_NONE},
9572 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9573 AARCH64_ARCH_NONE},
9574 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9575 AARCH64_ARCH_NONE},
9576 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9577 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9578 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9579 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9580 | AARCH64_FEATURE_SM4, 0)},
9581 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9582 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9583 | AARCH64_FEATURE_AES, 0)},
9584 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9585 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9586 | AARCH64_FEATURE_SHA3, 0)},
9587 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9588 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9589 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9590 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9591 | AARCH64_FEATURE_BFLOAT16, 0)},
9592 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9593 AARCH64_FEATURE (AARCH64_FEATURE_SME
9594 | AARCH64_FEATURE_SVE2
9595 | AARCH64_FEATURE_BFLOAT16, 0)},
9596 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9597 AARCH64_FEATURE (AARCH64_FEATURE_SME
9598 | AARCH64_FEATURE_SVE2
9599 | AARCH64_FEATURE_BFLOAT16, 0)},
9600 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9601 AARCH64_ARCH_NONE},
9602 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9603 AARCH64_ARCH_NONE},
9604 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9605 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9606 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9607 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9608 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9609 AARCH64_ARCH_NONE},
9610 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9611 AARCH64_ARCH_NONE},
9612 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9613 AARCH64_ARCH_NONE},
9614 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9615 };
9616
9617 struct aarch64_long_option_table
9618 {
9619 const char *option; /* Substring to match. */
9620 const char *help; /* Help information. */
9621 int (*func) (const char *subopt); /* Function to decode sub-option. */
9622 char *deprecated; /* If non-null, print this message. */
9623 };
9624
9625 /* Transitive closure of features depending on set. */
9626 static aarch64_feature_set
9627 aarch64_feature_disable_set (aarch64_feature_set set)
9628 {
9629 const struct aarch64_option_cpu_value_table *opt;
9630 aarch64_feature_set prev = 0;
9631
9632 while (prev != set) {
9633 prev = set;
9634 for (opt = aarch64_features; opt->name != NULL; opt++)
9635 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9636 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9637 }
9638 return set;
9639 }
9640
9641 /* Transitive closure of dependencies of set. */
9642 static aarch64_feature_set
9643 aarch64_feature_enable_set (aarch64_feature_set set)
9644 {
9645 const struct aarch64_option_cpu_value_table *opt;
9646 aarch64_feature_set prev = 0;
9647
9648 while (prev != set) {
9649 prev = set;
9650 for (opt = aarch64_features; opt->name != NULL; opt++)
9651 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9652 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9653 }
9654 return set;
9655 }
9656
9657 static int
9658 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9659 bool ext_only)
9660 {
9661 /* We insist on extensions being added before being removed. We achieve
9662 this by using the ADDING_VALUE variable to indicate whether we are
9663 adding an extension (1) or removing it (0) and only allowing it to
9664 change in the order -1 -> 1 -> 0. */
9665 int adding_value = -1;
9666 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9667
9668 /* Copy the feature set, so that we can modify it. */
9669 *ext_set = **opt_p;
9670 *opt_p = ext_set;
9671
9672 while (str != NULL && *str != 0)
9673 {
9674 const struct aarch64_option_cpu_value_table *opt;
9675 const char *ext = NULL;
9676 int optlen;
9677
9678 if (!ext_only)
9679 {
9680 if (*str != '+')
9681 {
9682 as_bad (_("invalid architectural extension"));
9683 return 0;
9684 }
9685
9686 ext = strchr (++str, '+');
9687 }
9688
9689 if (ext != NULL)
9690 optlen = ext - str;
9691 else
9692 optlen = strlen (str);
9693
9694 if (optlen >= 2 && startswith (str, "no"))
9695 {
9696 if (adding_value != 0)
9697 adding_value = 0;
9698 optlen -= 2;
9699 str += 2;
9700 }
9701 else if (optlen > 0)
9702 {
9703 if (adding_value == -1)
9704 adding_value = 1;
9705 else if (adding_value != 1)
9706 {
9707 as_bad (_("must specify extensions to add before specifying "
9708 "those to remove"));
9709 return false;
9710 }
9711 }
9712
9713 if (optlen == 0)
9714 {
9715 as_bad (_("missing architectural extension"));
9716 return 0;
9717 }
9718
9719 gas_assert (adding_value != -1);
9720
9721 for (opt = aarch64_features; opt->name != NULL; opt++)
9722 if (strncmp (opt->name, str, optlen) == 0)
9723 {
9724 aarch64_feature_set set;
9725
9726 /* Add or remove the extension. */
9727 if (adding_value)
9728 {
9729 set = aarch64_feature_enable_set (opt->value);
9730 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9731 }
9732 else
9733 {
9734 set = aarch64_feature_disable_set (opt->value);
9735 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9736 }
9737 break;
9738 }
9739
9740 if (opt->name == NULL)
9741 {
9742 as_bad (_("unknown architectural extension `%s'"), str);
9743 return 0;
9744 }
9745
9746 str = ext;
9747 };
9748
9749 return 1;
9750 }
9751
9752 static int
9753 aarch64_parse_cpu (const char *str)
9754 {
9755 const struct aarch64_cpu_option_table *opt;
9756 const char *ext = strchr (str, '+');
9757 size_t optlen;
9758
9759 if (ext != NULL)
9760 optlen = ext - str;
9761 else
9762 optlen = strlen (str);
9763
9764 if (optlen == 0)
9765 {
9766 as_bad (_("missing cpu name `%s'"), str);
9767 return 0;
9768 }
9769
9770 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9771 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9772 {
9773 mcpu_cpu_opt = &opt->value;
9774 if (ext != NULL)
9775 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
9776
9777 return 1;
9778 }
9779
9780 as_bad (_("unknown cpu `%s'"), str);
9781 return 0;
9782 }
9783
9784 static int
9785 aarch64_parse_arch (const char *str)
9786 {
9787 const struct aarch64_arch_option_table *opt;
9788 const char *ext = strchr (str, '+');
9789 size_t optlen;
9790
9791 if (ext != NULL)
9792 optlen = ext - str;
9793 else
9794 optlen = strlen (str);
9795
9796 if (optlen == 0)
9797 {
9798 as_bad (_("missing architecture name `%s'"), str);
9799 return 0;
9800 }
9801
9802 for (opt = aarch64_archs; opt->name != NULL; opt++)
9803 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9804 {
9805 march_cpu_opt = &opt->value;
9806 if (ext != NULL)
9807 return aarch64_parse_features (ext, &march_cpu_opt, false);
9808
9809 return 1;
9810 }
9811
9812 as_bad (_("unknown architecture `%s'\n"), str);
9813 return 0;
9814 }
9815
9816 /* ABIs. */
9817 struct aarch64_option_abi_value_table
9818 {
9819 const char *name;
9820 enum aarch64_abi_type value;
9821 };
9822
9823 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9824 {"ilp32", AARCH64_ABI_ILP32},
9825 {"lp64", AARCH64_ABI_LP64},
9826 };
9827
9828 static int
9829 aarch64_parse_abi (const char *str)
9830 {
9831 unsigned int i;
9832
9833 if (str[0] == '\0')
9834 {
9835 as_bad (_("missing abi name `%s'"), str);
9836 return 0;
9837 }
9838
9839 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9840 if (strcmp (str, aarch64_abis[i].name) == 0)
9841 {
9842 aarch64_abi = aarch64_abis[i].value;
9843 return 1;
9844 }
9845
9846 as_bad (_("unknown abi `%s'\n"), str);
9847 return 0;
9848 }
9849
9850 static struct aarch64_long_option_table aarch64_long_opts[] = {
9851 #ifdef OBJ_ELF
9852 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9853 aarch64_parse_abi, NULL},
9854 #endif /* OBJ_ELF */
9855 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9856 aarch64_parse_cpu, NULL},
9857 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9858 aarch64_parse_arch, NULL},
9859 {NULL, NULL, 0, NULL}
9860 };
9861
9862 int
9863 md_parse_option (int c, const char *arg)
9864 {
9865 struct aarch64_option_table *opt;
9866 struct aarch64_long_option_table *lopt;
9867
9868 switch (c)
9869 {
9870 #ifdef OPTION_EB
9871 case OPTION_EB:
9872 target_big_endian = 1;
9873 break;
9874 #endif
9875
9876 #ifdef OPTION_EL
9877 case OPTION_EL:
9878 target_big_endian = 0;
9879 break;
9880 #endif
9881
9882 case 'a':
9883 /* Listing option. Just ignore these, we don't support additional
9884 ones. */
9885 return 0;
9886
9887 default:
9888 for (opt = aarch64_opts; opt->option != NULL; opt++)
9889 {
9890 if (c == opt->option[0]
9891 && ((arg == NULL && opt->option[1] == 0)
9892 || streq (arg, opt->option + 1)))
9893 {
9894 /* If the option is deprecated, tell the user. */
9895 if (opt->deprecated != NULL)
9896 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9897 arg ? arg : "", _(opt->deprecated));
9898
9899 if (opt->var != NULL)
9900 *opt->var = opt->value;
9901
9902 return 1;
9903 }
9904 }
9905
9906 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9907 {
9908 /* These options are expected to have an argument. */
9909 if (c == lopt->option[0]
9910 && arg != NULL
9911 && startswith (arg, lopt->option + 1))
9912 {
9913 /* If the option is deprecated, tell the user. */
9914 if (lopt->deprecated != NULL)
9915 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9916 _(lopt->deprecated));
9917
9918 /* Call the sup-option parser. */
9919 return lopt->func (arg + strlen (lopt->option) - 1);
9920 }
9921 }
9922
9923 return 0;
9924 }
9925
9926 return 1;
9927 }
9928
9929 void
9930 md_show_usage (FILE * fp)
9931 {
9932 struct aarch64_option_table *opt;
9933 struct aarch64_long_option_table *lopt;
9934
9935 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9936
9937 for (opt = aarch64_opts; opt->option != NULL; opt++)
9938 if (opt->help != NULL)
9939 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9940
9941 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9942 if (lopt->help != NULL)
9943 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9944
9945 #ifdef OPTION_EB
9946 fprintf (fp, _("\
9947 -EB assemble code for a big-endian cpu\n"));
9948 #endif
9949
9950 #ifdef OPTION_EL
9951 fprintf (fp, _("\
9952 -EL assemble code for a little-endian cpu\n"));
9953 #endif
9954 }
9955
9956 /* Parse a .cpu directive. */
9957
9958 static void
9959 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9960 {
9961 const struct aarch64_cpu_option_table *opt;
9962 char saved_char;
9963 char *name;
9964 char *ext;
9965 size_t optlen;
9966
9967 name = input_line_pointer;
9968 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9969 input_line_pointer++;
9970 saved_char = *input_line_pointer;
9971 *input_line_pointer = 0;
9972
9973 ext = strchr (name, '+');
9974
9975 if (ext != NULL)
9976 optlen = ext - name;
9977 else
9978 optlen = strlen (name);
9979
9980 /* Skip the first "all" entry. */
9981 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9982 if (strlen (opt->name) == optlen
9983 && strncmp (name, opt->name, optlen) == 0)
9984 {
9985 mcpu_cpu_opt = &opt->value;
9986 if (ext != NULL)
9987 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9988 return;
9989
9990 cpu_variant = *mcpu_cpu_opt;
9991
9992 *input_line_pointer = saved_char;
9993 demand_empty_rest_of_line ();
9994 return;
9995 }
9996 as_bad (_("unknown cpu `%s'"), name);
9997 *input_line_pointer = saved_char;
9998 ignore_rest_of_line ();
9999 }
10000
10001
10002 /* Parse a .arch directive. */
10003
10004 static void
10005 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10006 {
10007 const struct aarch64_arch_option_table *opt;
10008 char saved_char;
10009 char *name;
10010 char *ext;
10011 size_t optlen;
10012
10013 name = input_line_pointer;
10014 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10015 input_line_pointer++;
10016 saved_char = *input_line_pointer;
10017 *input_line_pointer = 0;
10018
10019 ext = strchr (name, '+');
10020
10021 if (ext != NULL)
10022 optlen = ext - name;
10023 else
10024 optlen = strlen (name);
10025
10026 /* Skip the first "all" entry. */
10027 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10028 if (strlen (opt->name) == optlen
10029 && strncmp (name, opt->name, optlen) == 0)
10030 {
10031 mcpu_cpu_opt = &opt->value;
10032 if (ext != NULL)
10033 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10034 return;
10035
10036 cpu_variant = *mcpu_cpu_opt;
10037
10038 *input_line_pointer = saved_char;
10039 demand_empty_rest_of_line ();
10040 return;
10041 }
10042
10043 as_bad (_("unknown architecture `%s'\n"), name);
10044 *input_line_pointer = saved_char;
10045 ignore_rest_of_line ();
10046 }
10047
10048 /* Parse a .arch_extension directive. */
10049
10050 static void
10051 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10052 {
10053 char saved_char;
10054 char *ext = input_line_pointer;;
10055
10056 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10057 input_line_pointer++;
10058 saved_char = *input_line_pointer;
10059 *input_line_pointer = 0;
10060
10061 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10062 return;
10063
10064 cpu_variant = *mcpu_cpu_opt;
10065
10066 *input_line_pointer = saved_char;
10067 demand_empty_rest_of_line ();
10068 }
10069
10070 /* Copy symbol information. */
10071
10072 void
10073 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10074 {
10075 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10076 }
10077
10078 #ifdef OBJ_ELF
10079 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10080 This is needed so AArch64 specific st_other values can be independently
10081 specified for an IFUNC resolver (that is called by the dynamic linker)
10082 and the symbol it resolves (aliased to the resolver). In particular,
10083 if a function symbol has special st_other value set via directives,
10084 then attaching an IFUNC resolver to that symbol should not override
10085 the st_other setting. Requiring the directive on the IFUNC resolver
10086 symbol would be unexpected and problematic in C code, where the two
10087 symbols appear as two independent function declarations. */
10088
10089 void
10090 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10091 {
10092 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10093 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10094 if (srcelf->size)
10095 {
10096 if (destelf->size == NULL)
10097 destelf->size = XNEW (expressionS);
10098 *destelf->size = *srcelf->size;
10099 }
10100 else
10101 {
10102 free (destelf->size);
10103 destelf->size = NULL;
10104 }
10105 S_SET_SIZE (dest, S_GET_SIZE (src));
10106 }
10107 #endif