]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: [SME] Add SME mode selection and state access instructions
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 #ifdef OBJ_ELF
164 # define now_instr_sequence seg_info \
165 (now_seg)->tc_segment_info_data.insn_sequence
166 #else
167 static struct aarch64_instr_sequence now_instr_sequence;
168 #endif
169
170 /* Diagnostics inline function utilities.
171
172 These are lightweight utilities which should only be called by parse_operands
173 and other parsers. GAS processes each assembly line by parsing it against
174 instruction template(s), in the case of multiple templates (for the same
175 mnemonic name), those templates are tried one by one until one succeeds or
176 all fail. An assembly line may fail a few templates before being
177 successfully parsed; an error saved here in most cases is not a user error
178 but an error indicating the current template is not the right template.
179 Therefore it is very important that errors can be saved at a low cost during
180 the parsing; we don't want to slow down the whole parsing by recording
181 non-user errors in detail.
182
183 Remember that the objective is to help GAS pick up the most appropriate
184 error message in the case of multiple templates, e.g. FMOV which has 8
185 templates. */
186
187 static inline void
188 clear_error (void)
189 {
190 inst.parsing_error.kind = AARCH64_OPDE_NIL;
191 inst.parsing_error.error = NULL;
192 }
193
194 static inline bool
195 error_p (void)
196 {
197 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
198 }
199
200 static inline const char *
201 get_error_message (void)
202 {
203 return inst.parsing_error.error;
204 }
205
206 static inline enum aarch64_operand_error_kind
207 get_error_kind (void)
208 {
209 return inst.parsing_error.kind;
210 }
211
212 static inline void
213 set_error (enum aarch64_operand_error_kind kind, const char *error)
214 {
215 inst.parsing_error.kind = kind;
216 inst.parsing_error.error = error;
217 }
218
219 static inline void
220 set_recoverable_error (const char *error)
221 {
222 set_error (AARCH64_OPDE_RECOVERABLE, error);
223 }
224
225 /* Use the DESC field of the corresponding aarch64_operand entry to compose
226 the error message. */
227 static inline void
228 set_default_error (void)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
231 }
232
233 static inline void
234 set_syntax_error (const char *error)
235 {
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_first_syntax_error (const char *error)
241 {
242 if (! error_p ())
243 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
244 }
245
246 static inline void
247 set_fatal_syntax_error (const char *error)
248 {
249 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
250 }
251 \f
252 /* Return value for certain parsers when the parsing fails; those parsers
253 return the information of the parsed result, e.g. register number, on
254 success. */
255 #define PARSE_FAIL -1
256
257 /* This is an invalid condition code that means no conditional field is
258 present. */
259 #define COND_ALWAYS 0x10
260
261 typedef struct
262 {
263 const char *template;
264 uint32_t value;
265 } asm_nzcv;
266
267 struct reloc_entry
268 {
269 char *name;
270 bfd_reloc_code_real_type reloc;
271 };
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
291 BASIC_REG_TYPE(PN) /* p[0-15] */ \
292 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
293 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
294 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
295 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
296 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
301 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
303 /* Typecheck: same, plus SVE registers. */ \
304 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
306 | REG_TYPE(ZN)) \
307 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
308 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
309 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
310 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
314 /* Typecheck: any [BHSDQ]P FP. */ \
315 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
318 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
322 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
323 be used for SVE instructions, since Zn and Pn are valid symbols \
324 in other contexts. */ \
325 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
330 | REG_TYPE(ZN) | REG_TYPE(PN)) \
331 /* Any integer register; used for error messages only. */ \
332 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
335 /* Pseudo type to mark the end of the enumerator sequence. */ \
336 BASIC_REG_TYPE(MAX)
337
338 #undef BASIC_REG_TYPE
339 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
342
343 /* Register type enumerators. */
344 typedef enum aarch64_reg_type_
345 {
346 /* A list of REG_TYPE_*. */
347 AARCH64_REG_TYPES
348 } aarch64_reg_type;
349
350 #undef BASIC_REG_TYPE
351 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
352 #undef REG_TYPE
353 #define REG_TYPE(T) (1 << REG_TYPE_##T)
354 #undef MULTI_REG_TYPE
355 #define MULTI_REG_TYPE(T,V) V,
356
357 /* Structure for a hash table entry for a register. */
358 typedef struct
359 {
360 const char *name;
361 unsigned char number;
362 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
363 unsigned char builtin;
364 } reg_entry;
365
366 /* Values indexed by aarch64_reg_type to assist the type checking. */
367 static const unsigned reg_type_masks[] =
368 {
369 AARCH64_REG_TYPES
370 };
371
372 #undef BASIC_REG_TYPE
373 #undef REG_TYPE
374 #undef MULTI_REG_TYPE
375 #undef AARCH64_REG_TYPES
376
377 /* Diagnostics used when we don't get a register of the expected type.
378 Note: this has to synchronized with aarch64_reg_type definitions
379 above. */
380 static const char *
381 get_reg_expected_msg (aarch64_reg_type reg_type)
382 {
383 const char *msg;
384
385 switch (reg_type)
386 {
387 case REG_TYPE_R_32:
388 msg = N_("integer 32-bit register expected");
389 break;
390 case REG_TYPE_R_64:
391 msg = N_("integer 64-bit register expected");
392 break;
393 case REG_TYPE_R_N:
394 msg = N_("integer register expected");
395 break;
396 case REG_TYPE_R64_SP:
397 msg = N_("64-bit integer or SP register expected");
398 break;
399 case REG_TYPE_SVE_BASE:
400 msg = N_("base register expected");
401 break;
402 case REG_TYPE_R_Z:
403 msg = N_("integer or zero register expected");
404 break;
405 case REG_TYPE_SVE_OFFSET:
406 msg = N_("offset register expected");
407 break;
408 case REG_TYPE_R_SP:
409 msg = N_("integer or SP register expected");
410 break;
411 case REG_TYPE_R_Z_SP:
412 msg = N_("integer, zero or SP register expected");
413 break;
414 case REG_TYPE_FP_B:
415 msg = N_("8-bit SIMD scalar register expected");
416 break;
417 case REG_TYPE_FP_H:
418 msg = N_("16-bit SIMD scalar or floating-point half precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_S:
422 msg = N_("32-bit SIMD scalar or floating-point single precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_D:
426 msg = N_("64-bit SIMD scalar or floating-point double precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_Q:
430 msg = N_("128-bit SIMD scalar or floating-point quad precision "
431 "register expected");
432 break;
433 case REG_TYPE_R_Z_BHSDQ_V:
434 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
435 msg = N_("register expected");
436 break;
437 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
438 msg = N_("SIMD scalar or floating-point register expected");
439 break;
440 case REG_TYPE_VN: /* any V reg */
441 msg = N_("vector register expected");
442 break;
443 case REG_TYPE_ZN:
444 msg = N_("SVE vector register expected");
445 break;
446 case REG_TYPE_PN:
447 msg = N_("SVE predicate register expected");
448 break;
449 default:
450 as_fatal (_("invalid register type %d"), reg_type);
451 }
452 return msg;
453 }
454
455 /* Some well known registers that we refer to directly elsewhere. */
456 #define REG_SP 31
457 #define REG_ZR 31
458
459 /* Instructions take 4 bytes in the object file. */
460 #define INSN_SIZE 4
461
462 static htab_t aarch64_ops_hsh;
463 static htab_t aarch64_cond_hsh;
464 static htab_t aarch64_shift_hsh;
465 static htab_t aarch64_sys_regs_hsh;
466 static htab_t aarch64_pstatefield_hsh;
467 static htab_t aarch64_sys_regs_ic_hsh;
468 static htab_t aarch64_sys_regs_dc_hsh;
469 static htab_t aarch64_sys_regs_at_hsh;
470 static htab_t aarch64_sys_regs_tlbi_hsh;
471 static htab_t aarch64_sys_regs_sr_hsh;
472 static htab_t aarch64_reg_hsh;
473 static htab_t aarch64_barrier_opt_hsh;
474 static htab_t aarch64_nzcv_hsh;
475 static htab_t aarch64_pldop_hsh;
476 static htab_t aarch64_hint_opt_hsh;
477
478 /* Stuff needed to resolve the label ambiguity
479 As:
480 ...
481 label: <insn>
482 may differ from:
483 ...
484 label:
485 <insn> */
486
487 static symbolS *last_label_seen;
488
489 /* Literal pool structure. Held on a per-section
490 and per-sub-section basis. */
491
492 #define MAX_LITERAL_POOL_SIZE 1024
493 typedef struct literal_expression
494 {
495 expressionS exp;
496 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
497 LITTLENUM_TYPE * bignum;
498 } literal_expression;
499
500 typedef struct literal_pool
501 {
502 literal_expression literals[MAX_LITERAL_POOL_SIZE];
503 unsigned int next_free_entry;
504 unsigned int id;
505 symbolS *symbol;
506 segT section;
507 subsegT sub_section;
508 int size;
509 struct literal_pool *next;
510 } literal_pool;
511
512 /* Pointer to a linked list of literal pools. */
513 static literal_pool *list_of_pools = NULL;
514 \f
515 /* Pure syntax. */
516
517 /* This array holds the chars that always start a comment. If the
518 pre-processor is disabled, these aren't very useful. */
519 const char comment_chars[] = "";
520
521 /* This array holds the chars that only start a comment at the beginning of
522 a line. If the line seems to have the form '# 123 filename'
523 .line and .file directives will appear in the pre-processed output. */
524 /* Note that input_file.c hand checks for '#' at the beginning of the
525 first line of the input file. This is because the compiler outputs
526 #NO_APP at the beginning of its output. */
527 /* Also note that comments like this one will always work. */
528 const char line_comment_chars[] = "#";
529
530 const char line_separator_chars[] = ";";
531
532 /* Chars that can be used to separate mant
533 from exp in floating point numbers. */
534 const char EXP_CHARS[] = "eE";
535
536 /* Chars that mean this number is a floating point constant. */
537 /* As in 0f12.456 */
538 /* or 0d1.2345e12 */
539
540 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
541
542 /* Prefix character that indicates the start of an immediate value. */
543 #define is_immediate_prefix(C) ((C) == '#')
544
545 /* Separator character handling. */
546
547 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
548
549 static inline bool
550 skip_past_char (char **str, char c)
551 {
552 if (**str == c)
553 {
554 (*str)++;
555 return true;
556 }
557 else
558 return false;
559 }
560
561 #define skip_past_comma(str) skip_past_char (str, ',')
562
563 /* Arithmetic expressions (possibly involving symbols). */
564
565 static bool in_aarch64_get_expression = false;
566
567 /* Third argument to aarch64_get_expression. */
568 #define GE_NO_PREFIX false
569 #define GE_OPT_PREFIX true
570
571 /* Fourth argument to aarch64_get_expression. */
572 #define ALLOW_ABSENT false
573 #define REJECT_ABSENT true
574
575 /* Fifth argument to aarch64_get_expression. */
576 #define NORMAL_RESOLUTION false
577
578 /* Return TRUE if the string pointed by *STR is successfully parsed
579 as an valid expression; *EP will be filled with the information of
580 such an expression. Otherwise return FALSE.
581
582 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
583 If REJECT_ABSENT is true then trat missing expressions as an error.
584 If DEFER_RESOLUTION is true, then do not resolve expressions against
585 constant symbols. Necessary if the expression is part of a fixup
586 that uses a reloc that must be emitted. */
587
588 static bool
589 aarch64_get_expression (expressionS * ep,
590 char ** str,
591 bool allow_immediate_prefix,
592 bool reject_absent,
593 bool defer_resolution)
594 {
595 char *save_in;
596 segT seg;
597 bool prefix_present = false;
598
599 if (allow_immediate_prefix)
600 {
601 if (is_immediate_prefix (**str))
602 {
603 (*str)++;
604 prefix_present = true;
605 }
606 }
607
608 memset (ep, 0, sizeof (expressionS));
609
610 save_in = input_line_pointer;
611 input_line_pointer = *str;
612 in_aarch64_get_expression = true;
613 if (defer_resolution)
614 seg = deferred_expression (ep);
615 else
616 seg = expression (ep);
617 in_aarch64_get_expression = false;
618
619 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
620 {
621 /* We found a bad expression in md_operand(). */
622 *str = input_line_pointer;
623 input_line_pointer = save_in;
624 if (prefix_present && ! error_p ())
625 set_fatal_syntax_error (_("bad expression"));
626 else
627 set_first_syntax_error (_("bad expression"));
628 return false;
629 }
630
631 #ifdef OBJ_AOUT
632 if (seg != absolute_section
633 && seg != text_section
634 && seg != data_section
635 && seg != bss_section
636 && seg != undefined_section)
637 {
638 set_syntax_error (_("bad segment"));
639 *str = input_line_pointer;
640 input_line_pointer = save_in;
641 return false;
642 }
643 #else
644 (void) seg;
645 #endif
646
647 *str = input_line_pointer;
648 input_line_pointer = save_in;
649 return true;
650 }
651
652 /* Turn a string in input_line_pointer into a floating point constant
653 of type TYPE, and store the appropriate bytes in *LITP. The number
654 of LITTLENUMS emitted is stored in *SIZEP. An error message is
655 returned, or NULL on OK. */
656
657 const char *
658 md_atof (int type, char *litP, int *sizeP)
659 {
660 return ieee_md_atof (type, litP, sizeP, target_big_endian);
661 }
662
663 /* We handle all bad expressions here, so that we can report the faulty
664 instruction in the error message. */
665 void
666 md_operand (expressionS * exp)
667 {
668 if (in_aarch64_get_expression)
669 exp->X_op = O_illegal;
670 }
671
672 /* Immediate values. */
673
674 /* Errors may be set multiple times during parsing or bit encoding
675 (particularly in the Neon bits), but usually the earliest error which is set
676 will be the most meaningful. Avoid overwriting it with later (cascading)
677 errors by calling this function. */
678
679 static void
680 first_error (const char *error)
681 {
682 if (! error_p ())
683 set_syntax_error (error);
684 }
685
686 /* Similar to first_error, but this function accepts formatted error
687 message. */
688 static void
689 first_error_fmt (const char *format, ...)
690 {
691 va_list args;
692 enum
693 { size = 100 };
694 /* N.B. this single buffer will not cause error messages for different
695 instructions to pollute each other; this is because at the end of
696 processing of each assembly line, error message if any will be
697 collected by as_bad. */
698 static char buffer[size];
699
700 if (! error_p ())
701 {
702 int ret ATTRIBUTE_UNUSED;
703 va_start (args, format);
704 ret = vsnprintf (buffer, size, format, args);
705 know (ret <= size - 1 && ret >= 0);
706 va_end (args);
707 set_syntax_error (buffer);
708 }
709 }
710
711 /* Register parsing. */
712
713 /* Generic register parser which is called by other specialized
714 register parsers.
715 CCP points to what should be the beginning of a register name.
716 If it is indeed a valid register name, advance CCP over it and
717 return the reg_entry structure; otherwise return NULL.
718 It does not issue diagnostics. */
719
720 static reg_entry *
721 parse_reg (char **ccp)
722 {
723 char *start = *ccp;
724 char *p;
725 reg_entry *reg;
726
727 #ifdef REGISTER_PREFIX
728 if (*start != REGISTER_PREFIX)
729 return NULL;
730 start++;
731 #endif
732
733 p = start;
734 if (!ISALPHA (*p) || !is_name_beginner (*p))
735 return NULL;
736
737 do
738 p++;
739 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
740
741 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
742
743 if (!reg)
744 return NULL;
745
746 *ccp = p;
747 return reg;
748 }
749
750 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
751 return FALSE. */
752 static bool
753 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
754 {
755 return (reg_type_masks[type] & (1 << reg->type)) != 0;
756 }
757
758 /* Try to parse a base or offset register. Allow SVE base and offset
759 registers if REG_TYPE includes SVE registers. Return the register
760 entry on success, setting *QUALIFIER to the register qualifier.
761 Return null otherwise.
762
763 Note that this function does not issue any diagnostics. */
764
765 static const reg_entry *
766 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
767 aarch64_opnd_qualifier_t *qualifier)
768 {
769 char *str = *ccp;
770 const reg_entry *reg = parse_reg (&str);
771
772 if (reg == NULL)
773 return NULL;
774
775 switch (reg->type)
776 {
777 case REG_TYPE_R_32:
778 case REG_TYPE_SP_32:
779 case REG_TYPE_Z_32:
780 *qualifier = AARCH64_OPND_QLF_W;
781 break;
782
783 case REG_TYPE_R_64:
784 case REG_TYPE_SP_64:
785 case REG_TYPE_Z_64:
786 *qualifier = AARCH64_OPND_QLF_X;
787 break;
788
789 case REG_TYPE_ZN:
790 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
791 || str[0] != '.')
792 return NULL;
793 switch (TOLOWER (str[1]))
794 {
795 case 's':
796 *qualifier = AARCH64_OPND_QLF_S_S;
797 break;
798 case 'd':
799 *qualifier = AARCH64_OPND_QLF_S_D;
800 break;
801 default:
802 return NULL;
803 }
804 str += 2;
805 break;
806
807 default:
808 return NULL;
809 }
810
811 *ccp = str;
812
813 return reg;
814 }
815
816 /* Try to parse a base or offset register. Return the register entry
817 on success, setting *QUALIFIER to the register qualifier. Return null
818 otherwise.
819
820 Note that this function does not issue any diagnostics. */
821
822 static const reg_entry *
823 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
824 {
825 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
826 }
827
828 /* Parse the qualifier of a vector register or vector element of type
829 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
830 succeeds; otherwise return FALSE.
831
832 Accept only one occurrence of:
833 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
834 b h s d q */
835 static bool
836 parse_vector_type_for_operand (aarch64_reg_type reg_type,
837 struct vector_type_el *parsed_type, char **str)
838 {
839 char *ptr = *str;
840 unsigned width;
841 unsigned element_size;
842 enum vector_el_type type;
843
844 /* skip '.' */
845 gas_assert (*ptr == '.');
846 ptr++;
847
848 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
849 {
850 width = 0;
851 goto elt_size;
852 }
853 width = strtoul (ptr, &ptr, 10);
854 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
855 {
856 first_error_fmt (_("bad size %d in vector width specifier"), width);
857 return false;
858 }
859
860 elt_size:
861 switch (TOLOWER (*ptr))
862 {
863 case 'b':
864 type = NT_b;
865 element_size = 8;
866 break;
867 case 'h':
868 type = NT_h;
869 element_size = 16;
870 break;
871 case 's':
872 type = NT_s;
873 element_size = 32;
874 break;
875 case 'd':
876 type = NT_d;
877 element_size = 64;
878 break;
879 case 'q':
880 if (reg_type == REG_TYPE_ZN || width == 1)
881 {
882 type = NT_q;
883 element_size = 128;
884 break;
885 }
886 /* fall through. */
887 default:
888 if (*ptr != '\0')
889 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
890 else
891 first_error (_("missing element size"));
892 return false;
893 }
894 if (width != 0 && width * element_size != 64
895 && width * element_size != 128
896 && !(width == 2 && element_size == 16)
897 && !(width == 4 && element_size == 8))
898 {
899 first_error_fmt (_
900 ("invalid element size %d and vector size combination %c"),
901 width, *ptr);
902 return false;
903 }
904 ptr++;
905
906 parsed_type->type = type;
907 parsed_type->width = width;
908
909 *str = ptr;
910
911 return true;
912 }
913
914 /* *STR contains an SVE zero/merge predication suffix. Parse it into
915 *PARSED_TYPE and point *STR at the end of the suffix. */
916
917 static bool
918 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
919 {
920 char *ptr = *str;
921
922 /* Skip '/'. */
923 gas_assert (*ptr == '/');
924 ptr++;
925 switch (TOLOWER (*ptr))
926 {
927 case 'z':
928 parsed_type->type = NT_zero;
929 break;
930 case 'm':
931 parsed_type->type = NT_merge;
932 break;
933 default:
934 if (*ptr != '\0' && *ptr != ',')
935 first_error_fmt (_("unexpected character `%c' in predication type"),
936 *ptr);
937 else
938 first_error (_("missing predication type"));
939 return false;
940 }
941 parsed_type->width = 0;
942 *str = ptr + 1;
943 return true;
944 }
945
946 /* Parse a register of the type TYPE.
947
948 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
949 name or the parsed register is not of TYPE.
950
951 Otherwise return the register number, and optionally fill in the actual
952 type of the register in *RTYPE when multiple alternatives were given, and
953 return the register shape and element index information in *TYPEINFO.
954
955 IN_REG_LIST should be set with TRUE if the caller is parsing a register
956 list. */
957
958 static int
959 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
960 struct vector_type_el *typeinfo, bool in_reg_list)
961 {
962 char *str = *ccp;
963 const reg_entry *reg = parse_reg (&str);
964 struct vector_type_el atype;
965 struct vector_type_el parsetype;
966 bool is_typed_vecreg = false;
967
968 atype.defined = 0;
969 atype.type = NT_invtype;
970 atype.width = -1;
971 atype.index = 0;
972
973 if (reg == NULL)
974 {
975 if (typeinfo)
976 *typeinfo = atype;
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980
981 if (! aarch64_check_reg_type (reg, type))
982 {
983 DEBUG_TRACE ("reg type check failed");
984 set_default_error ();
985 return PARSE_FAIL;
986 }
987 type = reg->type;
988
989 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
990 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
991 {
992 if (*str == '.')
993 {
994 if (!parse_vector_type_for_operand (type, &parsetype, &str))
995 return PARSE_FAIL;
996 }
997 else
998 {
999 if (!parse_predication_for_operand (&parsetype, &str))
1000 return PARSE_FAIL;
1001 }
1002
1003 /* Register if of the form Vn.[bhsdq]. */
1004 is_typed_vecreg = true;
1005
1006 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1007 {
1008 /* The width is always variable; we don't allow an integer width
1009 to be specified. */
1010 gas_assert (parsetype.width == 0);
1011 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1012 }
1013 else if (parsetype.width == 0)
1014 /* Expect index. In the new scheme we cannot have
1015 Vn.[bhsdq] represent a scalar. Therefore any
1016 Vn.[bhsdq] should have an index following it.
1017 Except in reglists of course. */
1018 atype.defined |= NTA_HASINDEX;
1019 else
1020 atype.defined |= NTA_HASTYPE;
1021
1022 atype.type = parsetype.type;
1023 atype.width = parsetype.width;
1024 }
1025
1026 if (skip_past_char (&str, '['))
1027 {
1028 expressionS exp;
1029
1030 /* Reject Sn[index] syntax. */
1031 if (!is_typed_vecreg)
1032 {
1033 first_error (_("this type of register can't be indexed"));
1034 return PARSE_FAIL;
1035 }
1036
1037 if (in_reg_list)
1038 {
1039 first_error (_("index not allowed inside register list"));
1040 return PARSE_FAIL;
1041 }
1042
1043 atype.defined |= NTA_HASINDEX;
1044
1045 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1046 NORMAL_RESOLUTION);
1047
1048 if (exp.X_op != O_constant)
1049 {
1050 first_error (_("constant expression required"));
1051 return PARSE_FAIL;
1052 }
1053
1054 if (! skip_past_char (&str, ']'))
1055 return PARSE_FAIL;
1056
1057 atype.index = exp.X_add_number;
1058 }
1059 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1060 {
1061 /* Indexed vector register expected. */
1062 first_error (_("indexed vector register expected"));
1063 return PARSE_FAIL;
1064 }
1065
1066 /* A vector reg Vn should be typed or indexed. */
1067 if (type == REG_TYPE_VN && atype.defined == 0)
1068 {
1069 first_error (_("invalid use of vector register"));
1070 }
1071
1072 if (typeinfo)
1073 *typeinfo = atype;
1074
1075 if (rtype)
1076 *rtype = type;
1077
1078 *ccp = str;
1079
1080 return reg->number;
1081 }
1082
1083 /* Parse register.
1084
1085 Return the register number on success; return PARSE_FAIL otherwise.
1086
1087 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1088 the register (e.g. NEON double or quad reg when either has been requested).
1089
1090 If this is a NEON vector register with additional type information, fill
1091 in the struct pointed to by VECTYPE (if non-NULL).
1092
1093 This parser does not handle register list. */
1094
1095 static int
1096 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1097 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1098 {
1099 struct vector_type_el atype;
1100 char *str = *ccp;
1101 int reg = parse_typed_reg (&str, type, rtype, &atype,
1102 /*in_reg_list= */ false);
1103
1104 if (reg == PARSE_FAIL)
1105 return PARSE_FAIL;
1106
1107 if (vectype)
1108 *vectype = atype;
1109
1110 *ccp = str;
1111
1112 return reg;
1113 }
1114
1115 static inline bool
1116 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1117 {
1118 return
1119 e1.type == e2.type
1120 && e1.defined == e2.defined
1121 && e1.width == e2.width && e1.index == e2.index;
1122 }
1123
1124 /* This function parses a list of vector registers of type TYPE.
1125 On success, it returns the parsed register list information in the
1126 following encoded format:
1127
1128 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1129 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1130
1131 The information of the register shape and/or index is returned in
1132 *VECTYPE.
1133
1134 It returns PARSE_FAIL if the register list is invalid.
1135
1136 The list contains one to four registers.
1137 Each register can be one of:
1138 <Vt>.<T>[<index>]
1139 <Vt>.<T>
1140 All <T> should be identical.
1141 All <index> should be identical.
1142 There are restrictions on <Vt> numbers which are checked later
1143 (by reg_list_valid_p). */
1144
1145 static int
1146 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1147 struct vector_type_el *vectype)
1148 {
1149 char *str = *ccp;
1150 int nb_regs;
1151 struct vector_type_el typeinfo, typeinfo_first;
1152 int val, val_range;
1153 int in_range;
1154 int ret_val;
1155 int i;
1156 bool error = false;
1157 bool expect_index = false;
1158
1159 if (*str != '{')
1160 {
1161 set_syntax_error (_("expecting {"));
1162 return PARSE_FAIL;
1163 }
1164 str++;
1165
1166 nb_regs = 0;
1167 typeinfo_first.defined = 0;
1168 typeinfo_first.type = NT_invtype;
1169 typeinfo_first.width = -1;
1170 typeinfo_first.index = 0;
1171 ret_val = 0;
1172 val = -1;
1173 val_range = -1;
1174 in_range = 0;
1175 do
1176 {
1177 if (in_range)
1178 {
1179 str++; /* skip over '-' */
1180 val_range = val;
1181 }
1182 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1183 /*in_reg_list= */ true);
1184 if (val == PARSE_FAIL)
1185 {
1186 set_first_syntax_error (_("invalid vector register in list"));
1187 error = true;
1188 continue;
1189 }
1190 /* reject [bhsd]n */
1191 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1192 {
1193 set_first_syntax_error (_("invalid scalar register in list"));
1194 error = true;
1195 continue;
1196 }
1197
1198 if (typeinfo.defined & NTA_HASINDEX)
1199 expect_index = true;
1200
1201 if (in_range)
1202 {
1203 if (val < val_range)
1204 {
1205 set_first_syntax_error
1206 (_("invalid range in vector register list"));
1207 error = true;
1208 }
1209 val_range++;
1210 }
1211 else
1212 {
1213 val_range = val;
1214 if (nb_regs == 0)
1215 typeinfo_first = typeinfo;
1216 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1217 {
1218 set_first_syntax_error
1219 (_("type mismatch in vector register list"));
1220 error = true;
1221 }
1222 }
1223 if (! error)
1224 for (i = val_range; i <= val; i++)
1225 {
1226 ret_val |= i << (5 * nb_regs);
1227 nb_regs++;
1228 }
1229 in_range = 0;
1230 }
1231 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1232
1233 skip_whitespace (str);
1234 if (*str != '}')
1235 {
1236 set_first_syntax_error (_("end of vector register list not found"));
1237 error = true;
1238 }
1239 str++;
1240
1241 skip_whitespace (str);
1242
1243 if (expect_index)
1244 {
1245 if (skip_past_char (&str, '['))
1246 {
1247 expressionS exp;
1248
1249 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1250 NORMAL_RESOLUTION);
1251 if (exp.X_op != O_constant)
1252 {
1253 set_first_syntax_error (_("constant expression required."));
1254 error = true;
1255 }
1256 if (! skip_past_char (&str, ']'))
1257 error = true;
1258 else
1259 typeinfo_first.index = exp.X_add_number;
1260 }
1261 else
1262 {
1263 set_first_syntax_error (_("expected index"));
1264 error = true;
1265 }
1266 }
1267
1268 if (nb_regs > 4)
1269 {
1270 set_first_syntax_error (_("too many registers in vector register list"));
1271 error = true;
1272 }
1273 else if (nb_regs == 0)
1274 {
1275 set_first_syntax_error (_("empty vector register list"));
1276 error = true;
1277 }
1278
1279 *ccp = str;
1280 if (! error)
1281 *vectype = typeinfo_first;
1282
1283 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1284 }
1285
1286 /* Directives: register aliases. */
1287
1288 static reg_entry *
1289 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1290 {
1291 reg_entry *new;
1292 const char *name;
1293
1294 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1295 {
1296 if (new->builtin)
1297 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1298 str);
1299
1300 /* Only warn about a redefinition if it's not defined as the
1301 same register. */
1302 else if (new->number != number || new->type != type)
1303 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1304
1305 return NULL;
1306 }
1307
1308 name = xstrdup (str);
1309 new = XNEW (reg_entry);
1310
1311 new->name = name;
1312 new->number = number;
1313 new->type = type;
1314 new->builtin = false;
1315
1316 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1317
1318 return new;
1319 }
1320
1321 /* Look for the .req directive. This is of the form:
1322
1323 new_register_name .req existing_register_name
1324
1325 If we find one, or if it looks sufficiently like one that we want to
1326 handle any error here, return TRUE. Otherwise return FALSE. */
1327
1328 static bool
1329 create_register_alias (char *newname, char *p)
1330 {
1331 const reg_entry *old;
1332 char *oldname, *nbuf;
1333 size_t nlen;
1334
1335 /* The input scrubber ensures that whitespace after the mnemonic is
1336 collapsed to single spaces. */
1337 oldname = p;
1338 if (!startswith (oldname, " .req "))
1339 return false;
1340
1341 oldname += 6;
1342 if (*oldname == '\0')
1343 return false;
1344
1345 old = str_hash_find (aarch64_reg_hsh, oldname);
1346 if (!old)
1347 {
1348 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1349 return true;
1350 }
1351
1352 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1353 the desired alias name, and p points to its end. If not, then
1354 the desired alias name is in the global original_case_string. */
1355 #ifdef TC_CASE_SENSITIVE
1356 nlen = p - newname;
1357 #else
1358 newname = original_case_string;
1359 nlen = strlen (newname);
1360 #endif
1361
1362 nbuf = xmemdup0 (newname, nlen);
1363
1364 /* Create aliases under the new name as stated; an all-lowercase
1365 version of the new name; and an all-uppercase version of the new
1366 name. */
1367 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1368 {
1369 for (p = nbuf; *p; p++)
1370 *p = TOUPPER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 {
1374 /* If this attempt to create an additional alias fails, do not bother
1375 trying to create the all-lower case alias. We will fail and issue
1376 a second, duplicate error message. This situation arises when the
1377 programmer does something like:
1378 foo .req r0
1379 Foo .req r1
1380 The second .req creates the "Foo" alias but then fails to create
1381 the artificial FOO alias because it has already been created by the
1382 first .req. */
1383 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1384 {
1385 free (nbuf);
1386 return true;
1387 }
1388 }
1389
1390 for (p = nbuf; *p; p++)
1391 *p = TOLOWER (*p);
1392
1393 if (strncmp (nbuf, newname, nlen))
1394 insert_reg_alias (nbuf, old->number, old->type);
1395 }
1396
1397 free (nbuf);
1398 return true;
1399 }
1400
1401 /* Should never be called, as .req goes between the alias and the
1402 register name, not at the beginning of the line. */
1403 static void
1404 s_req (int a ATTRIBUTE_UNUSED)
1405 {
1406 as_bad (_("invalid syntax for .req directive"));
1407 }
1408
1409 /* The .unreq directive deletes an alias which was previously defined
1410 by .req. For example:
1411
1412 my_alias .req r11
1413 .unreq my_alias */
1414
1415 static void
1416 s_unreq (int a ATTRIBUTE_UNUSED)
1417 {
1418 char *name;
1419 char saved_char;
1420
1421 name = input_line_pointer;
1422
1423 while (*input_line_pointer != 0
1424 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1425 ++input_line_pointer;
1426
1427 saved_char = *input_line_pointer;
1428 *input_line_pointer = 0;
1429
1430 if (!*name)
1431 as_bad (_("invalid syntax for .unreq directive"));
1432 else
1433 {
1434 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1435
1436 if (!reg)
1437 as_bad (_("unknown register alias '%s'"), name);
1438 else if (reg->builtin)
1439 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1440 name);
1441 else
1442 {
1443 char *p;
1444 char *nbuf;
1445
1446 str_hash_delete (aarch64_reg_hsh, name);
1447 free ((char *) reg->name);
1448 free (reg);
1449
1450 /* Also locate the all upper case and all lower case versions.
1451 Do not complain if we cannot find one or the other as it
1452 was probably deleted above. */
1453
1454 nbuf = strdup (name);
1455 for (p = nbuf; *p; p++)
1456 *p = TOUPPER (*p);
1457 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1458 if (reg)
1459 {
1460 str_hash_delete (aarch64_reg_hsh, nbuf);
1461 free ((char *) reg->name);
1462 free (reg);
1463 }
1464
1465 for (p = nbuf; *p; p++)
1466 *p = TOLOWER (*p);
1467 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1468 if (reg)
1469 {
1470 str_hash_delete (aarch64_reg_hsh, nbuf);
1471 free ((char *) reg->name);
1472 free (reg);
1473 }
1474
1475 free (nbuf);
1476 }
1477 }
1478
1479 *input_line_pointer = saved_char;
1480 demand_empty_rest_of_line ();
1481 }
1482
1483 /* Directives: Instruction set selection. */
1484
1485 #ifdef OBJ_ELF
1486 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1487 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1488 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1489 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1490
1491 /* Create a new mapping symbol for the transition to STATE. */
1492
1493 static void
1494 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1495 {
1496 symbolS *symbolP;
1497 const char *symname;
1498 int type;
1499
1500 switch (state)
1501 {
1502 case MAP_DATA:
1503 symname = "$d";
1504 type = BSF_NO_FLAGS;
1505 break;
1506 case MAP_INSN:
1507 symname = "$x";
1508 type = BSF_NO_FLAGS;
1509 break;
1510 default:
1511 abort ();
1512 }
1513
1514 symbolP = symbol_new (symname, now_seg, frag, value);
1515 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1516
1517 /* Save the mapping symbols for future reference. Also check that
1518 we do not place two mapping symbols at the same offset within a
1519 frag. We'll handle overlap between frags in
1520 check_mapping_symbols.
1521
1522 If .fill or other data filling directive generates zero sized data,
1523 the mapping symbol for the following code will have the same value
1524 as the one generated for the data filling directive. In this case,
1525 we replace the old symbol with the new one at the same address. */
1526 if (value == 0)
1527 {
1528 if (frag->tc_frag_data.first_map != NULL)
1529 {
1530 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1531 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1532 &symbol_lastP);
1533 }
1534 frag->tc_frag_data.first_map = symbolP;
1535 }
1536 if (frag->tc_frag_data.last_map != NULL)
1537 {
1538 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1539 S_GET_VALUE (symbolP));
1540 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1541 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1542 &symbol_lastP);
1543 }
1544 frag->tc_frag_data.last_map = symbolP;
1545 }
1546
1547 /* We must sometimes convert a region marked as code to data during
1548 code alignment, if an odd number of bytes have to be padded. The
1549 code mapping symbol is pushed to an aligned address. */
1550
1551 static void
1552 insert_data_mapping_symbol (enum mstate state,
1553 valueT value, fragS * frag, offsetT bytes)
1554 {
1555 /* If there was already a mapping symbol, remove it. */
1556 if (frag->tc_frag_data.last_map != NULL
1557 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1558 frag->fr_address + value)
1559 {
1560 symbolS *symp = frag->tc_frag_data.last_map;
1561
1562 if (value == 0)
1563 {
1564 know (frag->tc_frag_data.first_map == symp);
1565 frag->tc_frag_data.first_map = NULL;
1566 }
1567 frag->tc_frag_data.last_map = NULL;
1568 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1569 }
1570
1571 make_mapping_symbol (MAP_DATA, value, frag);
1572 make_mapping_symbol (state, value + bytes, frag);
1573 }
1574
1575 static void mapping_state_2 (enum mstate state, int max_chars);
1576
1577 /* Set the mapping state to STATE. Only call this when about to
1578 emit some STATE bytes to the file. */
1579
1580 void
1581 mapping_state (enum mstate state)
1582 {
1583 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1584
1585 if (state == MAP_INSN)
1586 /* AArch64 instructions require 4-byte alignment. When emitting
1587 instructions into any section, record the appropriate section
1588 alignment. */
1589 record_alignment (now_seg, 2);
1590
1591 if (mapstate == state)
1592 /* The mapping symbol has already been emitted.
1593 There is nothing else to do. */
1594 return;
1595
1596 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1597 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1598 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1599 evaluated later in the next else. */
1600 return;
1601 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1602 {
1603 /* Only add the symbol if the offset is > 0:
1604 if we're at the first frag, check it's size > 0;
1605 if we're not at the first frag, then for sure
1606 the offset is > 0. */
1607 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1608 const int add_symbol = (frag_now != frag_first)
1609 || (frag_now_fix () > 0);
1610
1611 if (add_symbol)
1612 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1613 }
1614 #undef TRANSITION
1615
1616 mapping_state_2 (state, 0);
1617 }
1618
1619 /* Same as mapping_state, but MAX_CHARS bytes have already been
1620 allocated. Put the mapping symbol that far back. */
1621
1622 static void
1623 mapping_state_2 (enum mstate state, int max_chars)
1624 {
1625 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1626
1627 if (!SEG_NORMAL (now_seg))
1628 return;
1629
1630 if (mapstate == state)
1631 /* The mapping symbol has already been emitted.
1632 There is nothing else to do. */
1633 return;
1634
1635 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1636 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1637 }
1638 #else
1639 #define mapping_state(x) /* nothing */
1640 #define mapping_state_2(x, y) /* nothing */
1641 #endif
1642
1643 /* Directives: sectioning and alignment. */
1644
1645 static void
1646 s_bss (int ignore ATTRIBUTE_UNUSED)
1647 {
1648 /* We don't support putting frags in the BSS segment, we fake it by
1649 marking in_bss, then looking at s_skip for clues. */
1650 subseg_set (bss_section, 0);
1651 demand_empty_rest_of_line ();
1652 mapping_state (MAP_DATA);
1653 }
1654
1655 static void
1656 s_even (int ignore ATTRIBUTE_UNUSED)
1657 {
1658 /* Never make frag if expect extra pass. */
1659 if (!need_pass_2)
1660 frag_align (1, 0, 0);
1661
1662 record_alignment (now_seg, 1);
1663
1664 demand_empty_rest_of_line ();
1665 }
1666
1667 /* Directives: Literal pools. */
1668
1669 static literal_pool *
1670 find_literal_pool (int size)
1671 {
1672 literal_pool *pool;
1673
1674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1675 {
1676 if (pool->section == now_seg
1677 && pool->sub_section == now_subseg && pool->size == size)
1678 break;
1679 }
1680
1681 return pool;
1682 }
1683
1684 static literal_pool *
1685 find_or_make_literal_pool (int size)
1686 {
1687 /* Next literal pool ID number. */
1688 static unsigned int latest_pool_num = 1;
1689 literal_pool *pool;
1690
1691 pool = find_literal_pool (size);
1692
1693 if (pool == NULL)
1694 {
1695 /* Create a new pool. */
1696 pool = XNEW (literal_pool);
1697 if (!pool)
1698 return NULL;
1699
1700 /* Currently we always put the literal pool in the current text
1701 section. If we were generating "small" model code where we
1702 knew that all code and initialised data was within 1MB then
1703 we could output literals to mergeable, read-only data
1704 sections. */
1705
1706 pool->next_free_entry = 0;
1707 pool->section = now_seg;
1708 pool->sub_section = now_subseg;
1709 pool->size = size;
1710 pool->next = list_of_pools;
1711 pool->symbol = NULL;
1712
1713 /* Add it to the list. */
1714 list_of_pools = pool;
1715 }
1716
1717 /* New pools, and emptied pools, will have a NULL symbol. */
1718 if (pool->symbol == NULL)
1719 {
1720 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1721 &zero_address_frag, 0);
1722 pool->id = latest_pool_num++;
1723 }
1724
1725 /* Done. */
1726 return pool;
1727 }
1728
1729 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1730 Return TRUE on success, otherwise return FALSE. */
1731 static bool
1732 add_to_lit_pool (expressionS *exp, int size)
1733 {
1734 literal_pool *pool;
1735 unsigned int entry;
1736
1737 pool = find_or_make_literal_pool (size);
1738
1739 /* Check if this literal value is already in the pool. */
1740 for (entry = 0; entry < pool->next_free_entry; entry++)
1741 {
1742 expressionS * litexp = & pool->literals[entry].exp;
1743
1744 if ((litexp->X_op == exp->X_op)
1745 && (exp->X_op == O_constant)
1746 && (litexp->X_add_number == exp->X_add_number)
1747 && (litexp->X_unsigned == exp->X_unsigned))
1748 break;
1749
1750 if ((litexp->X_op == exp->X_op)
1751 && (exp->X_op == O_symbol)
1752 && (litexp->X_add_number == exp->X_add_number)
1753 && (litexp->X_add_symbol == exp->X_add_symbol)
1754 && (litexp->X_op_symbol == exp->X_op_symbol))
1755 break;
1756 }
1757
1758 /* Do we need to create a new entry? */
1759 if (entry == pool->next_free_entry)
1760 {
1761 if (entry >= MAX_LITERAL_POOL_SIZE)
1762 {
1763 set_syntax_error (_("literal pool overflow"));
1764 return false;
1765 }
1766
1767 pool->literals[entry].exp = *exp;
1768 pool->next_free_entry += 1;
1769 if (exp->X_op == O_big)
1770 {
1771 /* PR 16688: Bignums are held in a single global array. We must
1772 copy and preserve that value now, before it is overwritten. */
1773 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1774 exp->X_add_number);
1775 memcpy (pool->literals[entry].bignum, generic_bignum,
1776 CHARS_PER_LITTLENUM * exp->X_add_number);
1777 }
1778 else
1779 pool->literals[entry].bignum = NULL;
1780 }
1781
1782 exp->X_op = O_symbol;
1783 exp->X_add_number = ((int) entry) * size;
1784 exp->X_add_symbol = pool->symbol;
1785
1786 return true;
1787 }
1788
1789 /* Can't use symbol_new here, so have to create a symbol and then at
1790 a later date assign it a value. That's what these functions do. */
1791
1792 static void
1793 symbol_locate (symbolS * symbolP,
1794 const char *name,/* It is copied, the caller can modify. */
1795 segT segment, /* Segment identifier (SEG_<something>). */
1796 valueT valu, /* Symbol value. */
1797 fragS * frag) /* Associated fragment. */
1798 {
1799 size_t name_length;
1800 char *preserved_copy_of_name;
1801
1802 name_length = strlen (name) + 1; /* +1 for \0. */
1803 obstack_grow (&notes, name, name_length);
1804 preserved_copy_of_name = obstack_finish (&notes);
1805
1806 #ifdef tc_canonicalize_symbol_name
1807 preserved_copy_of_name =
1808 tc_canonicalize_symbol_name (preserved_copy_of_name);
1809 #endif
1810
1811 S_SET_NAME (symbolP, preserved_copy_of_name);
1812
1813 S_SET_SEGMENT (symbolP, segment);
1814 S_SET_VALUE (symbolP, valu);
1815 symbol_clear_list_pointers (symbolP);
1816
1817 symbol_set_frag (symbolP, frag);
1818
1819 /* Link to end of symbol chain. */
1820 {
1821 extern int symbol_table_frozen;
1822
1823 if (symbol_table_frozen)
1824 abort ();
1825 }
1826
1827 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1828
1829 obj_symbol_new_hook (symbolP);
1830
1831 #ifdef tc_symbol_new_hook
1832 tc_symbol_new_hook (symbolP);
1833 #endif
1834
1835 #ifdef DEBUG_SYMS
1836 verify_symbol_chain (symbol_rootP, symbol_lastP);
1837 #endif /* DEBUG_SYMS */
1838 }
1839
1840
1841 static void
1842 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1843 {
1844 unsigned int entry;
1845 literal_pool *pool;
1846 char sym_name[20];
1847 int align;
1848
1849 for (align = 2; align <= 4; align++)
1850 {
1851 int size = 1 << align;
1852
1853 pool = find_literal_pool (size);
1854 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1855 continue;
1856
1857 /* Align pool as you have word accesses.
1858 Only make a frag if we have to. */
1859 if (!need_pass_2)
1860 frag_align (align, 0, 0);
1861
1862 mapping_state (MAP_DATA);
1863
1864 record_alignment (now_seg, align);
1865
1866 sprintf (sym_name, "$$lit_\002%x", pool->id);
1867
1868 symbol_locate (pool->symbol, sym_name, now_seg,
1869 (valueT) frag_now_fix (), frag_now);
1870 symbol_table_insert (pool->symbol);
1871
1872 for (entry = 0; entry < pool->next_free_entry; entry++)
1873 {
1874 expressionS * exp = & pool->literals[entry].exp;
1875
1876 if (exp->X_op == O_big)
1877 {
1878 /* PR 16688: Restore the global bignum value. */
1879 gas_assert (pool->literals[entry].bignum != NULL);
1880 memcpy (generic_bignum, pool->literals[entry].bignum,
1881 CHARS_PER_LITTLENUM * exp->X_add_number);
1882 }
1883
1884 /* First output the expression in the instruction to the pool. */
1885 emit_expr (exp, size); /* .word|.xword */
1886
1887 if (exp->X_op == O_big)
1888 {
1889 free (pool->literals[entry].bignum);
1890 pool->literals[entry].bignum = NULL;
1891 }
1892 }
1893
1894 /* Mark the pool as empty. */
1895 pool->next_free_entry = 0;
1896 pool->symbol = NULL;
1897 }
1898 }
1899
1900 #ifdef OBJ_ELF
1901 /* Forward declarations for functions below, in the MD interface
1902 section. */
1903 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1904 static struct reloc_table_entry * find_reloc_table_entry (char **);
1905
1906 /* Directives: Data. */
1907 /* N.B. the support for relocation suffix in this directive needs to be
1908 implemented properly. */
1909
1910 static void
1911 s_aarch64_elf_cons (int nbytes)
1912 {
1913 expressionS exp;
1914
1915 #ifdef md_flush_pending_output
1916 md_flush_pending_output ();
1917 #endif
1918
1919 if (is_it_end_of_statement ())
1920 {
1921 demand_empty_rest_of_line ();
1922 return;
1923 }
1924
1925 #ifdef md_cons_align
1926 md_cons_align (nbytes);
1927 #endif
1928
1929 mapping_state (MAP_DATA);
1930 do
1931 {
1932 struct reloc_table_entry *reloc;
1933
1934 expression (&exp);
1935
1936 if (exp.X_op != O_symbol)
1937 emit_expr (&exp, (unsigned int) nbytes);
1938 else
1939 {
1940 skip_past_char (&input_line_pointer, '#');
1941 if (skip_past_char (&input_line_pointer, ':'))
1942 {
1943 reloc = find_reloc_table_entry (&input_line_pointer);
1944 if (reloc == NULL)
1945 as_bad (_("unrecognized relocation suffix"));
1946 else
1947 as_bad (_("unimplemented relocation suffix"));
1948 ignore_rest_of_line ();
1949 return;
1950 }
1951 else
1952 emit_expr (&exp, (unsigned int) nbytes);
1953 }
1954 }
1955 while (*input_line_pointer++ == ',');
1956
1957 /* Put terminator back into stream. */
1958 input_line_pointer--;
1959 demand_empty_rest_of_line ();
1960 }
1961
1962 /* Mark symbol that it follows a variant PCS convention. */
1963
1964 static void
1965 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1966 {
1967 char *name;
1968 char c;
1969 symbolS *sym;
1970 asymbol *bfdsym;
1971 elf_symbol_type *elfsym;
1972
1973 c = get_symbol_name (&name);
1974 if (!*name)
1975 as_bad (_("Missing symbol name in directive"));
1976 sym = symbol_find_or_make (name);
1977 restore_line_pointer (c);
1978 demand_empty_rest_of_line ();
1979 bfdsym = symbol_get_bfdsym (sym);
1980 elfsym = elf_symbol_from (bfdsym);
1981 gas_assert (elfsym);
1982 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1983 }
1984 #endif /* OBJ_ELF */
1985
1986 /* Output a 32-bit word, but mark as an instruction. */
1987
1988 static void
1989 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 expressionS exp;
1992
1993 #ifdef md_flush_pending_output
1994 md_flush_pending_output ();
1995 #endif
1996
1997 if (is_it_end_of_statement ())
1998 {
1999 demand_empty_rest_of_line ();
2000 return;
2001 }
2002
2003 /* Sections are assumed to start aligned. In executable section, there is no
2004 MAP_DATA symbol pending. So we only align the address during
2005 MAP_DATA --> MAP_INSN transition.
2006 For other sections, this is not guaranteed. */
2007 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2008 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2009 frag_align_code (2, 0);
2010
2011 #ifdef OBJ_ELF
2012 mapping_state (MAP_INSN);
2013 #endif
2014
2015 do
2016 {
2017 expression (&exp);
2018 if (exp.X_op != O_constant)
2019 {
2020 as_bad (_("constant expression required"));
2021 ignore_rest_of_line ();
2022 return;
2023 }
2024
2025 if (target_big_endian)
2026 {
2027 unsigned int val = exp.X_add_number;
2028 exp.X_add_number = SWAP_32 (val);
2029 }
2030 emit_expr (&exp, 4);
2031 }
2032 while (*input_line_pointer++ == ',');
2033
2034 /* Put terminator back into stream. */
2035 input_line_pointer--;
2036 demand_empty_rest_of_line ();
2037 }
2038
2039 static void
2040 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2041 {
2042 demand_empty_rest_of_line ();
2043 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2044 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2045 }
2046
2047 #ifdef OBJ_ELF
2048 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2049
2050 static void
2051 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2052 {
2053 expressionS exp;
2054
2055 expression (&exp);
2056 frag_grow (4);
2057 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2058 BFD_RELOC_AARCH64_TLSDESC_ADD);
2059
2060 demand_empty_rest_of_line ();
2061 }
2062
2063 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2064
2065 static void
2066 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2067 {
2068 expressionS exp;
2069
2070 /* Since we're just labelling the code, there's no need to define a
2071 mapping symbol. */
2072 expression (&exp);
2073 /* Make sure there is enough room in this frag for the following
2074 blr. This trick only works if the blr follows immediately after
2075 the .tlsdesc directive. */
2076 frag_grow (4);
2077 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2078 BFD_RELOC_AARCH64_TLSDESC_CALL);
2079
2080 demand_empty_rest_of_line ();
2081 }
2082
2083 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2084
2085 static void
2086 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2087 {
2088 expressionS exp;
2089
2090 expression (&exp);
2091 frag_grow (4);
2092 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2093 BFD_RELOC_AARCH64_TLSDESC_LDR);
2094
2095 demand_empty_rest_of_line ();
2096 }
2097 #endif /* OBJ_ELF */
2098
2099 static void s_aarch64_arch (int);
2100 static void s_aarch64_cpu (int);
2101 static void s_aarch64_arch_extension (int);
2102
2103 /* This table describes all the machine specific pseudo-ops the assembler
2104 has to support. The fields are:
2105 pseudo-op name without dot
2106 function to call to execute this pseudo-op
2107 Integer arg to pass to the function. */
2108
2109 const pseudo_typeS md_pseudo_table[] = {
2110 /* Never called because '.req' does not start a line. */
2111 {"req", s_req, 0},
2112 {"unreq", s_unreq, 0},
2113 {"bss", s_bss, 0},
2114 {"even", s_even, 0},
2115 {"ltorg", s_ltorg, 0},
2116 {"pool", s_ltorg, 0},
2117 {"cpu", s_aarch64_cpu, 0},
2118 {"arch", s_aarch64_arch, 0},
2119 {"arch_extension", s_aarch64_arch_extension, 0},
2120 {"inst", s_aarch64_inst, 0},
2121 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2122 #ifdef OBJ_ELF
2123 {"tlsdescadd", s_tlsdescadd, 0},
2124 {"tlsdesccall", s_tlsdesccall, 0},
2125 {"tlsdescldr", s_tlsdescldr, 0},
2126 {"word", s_aarch64_elf_cons, 4},
2127 {"long", s_aarch64_elf_cons, 4},
2128 {"xword", s_aarch64_elf_cons, 8},
2129 {"dword", s_aarch64_elf_cons, 8},
2130 {"variant_pcs", s_variant_pcs, 0},
2131 #endif
2132 {"float16", float_cons, 'h'},
2133 {"bfloat16", float_cons, 'b'},
2134 {0, 0, 0}
2135 };
2136 \f
2137
2138 /* Check whether STR points to a register name followed by a comma or the
2139 end of line; REG_TYPE indicates which register types are checked
2140 against. Return TRUE if STR is such a register name; otherwise return
2141 FALSE. The function does not intend to produce any diagnostics, but since
2142 the register parser aarch64_reg_parse, which is called by this function,
2143 does produce diagnostics, we call clear_error to clear any diagnostics
2144 that may be generated by aarch64_reg_parse.
2145 Also, the function returns FALSE directly if there is any user error
2146 present at the function entry. This prevents the existing diagnostics
2147 state from being spoiled.
2148 The function currently serves parse_constant_immediate and
2149 parse_big_immediate only. */
2150 static bool
2151 reg_name_p (char *str, aarch64_reg_type reg_type)
2152 {
2153 int reg;
2154
2155 /* Prevent the diagnostics state from being spoiled. */
2156 if (error_p ())
2157 return false;
2158
2159 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2160
2161 /* Clear the parsing error that may be set by the reg parser. */
2162 clear_error ();
2163
2164 if (reg == PARSE_FAIL)
2165 return false;
2166
2167 skip_whitespace (str);
2168 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2169 return true;
2170
2171 return false;
2172 }
2173
2174 /* Parser functions used exclusively in instruction operands. */
2175
2176 /* Parse an immediate expression which may not be constant.
2177
2178 To prevent the expression parser from pushing a register name
2179 into the symbol table as an undefined symbol, firstly a check is
2180 done to find out whether STR is a register of type REG_TYPE followed
2181 by a comma or the end of line. Return FALSE if STR is such a string. */
2182
2183 static bool
2184 parse_immediate_expression (char **str, expressionS *exp,
2185 aarch64_reg_type reg_type)
2186 {
2187 if (reg_name_p (*str, reg_type))
2188 {
2189 set_recoverable_error (_("immediate operand required"));
2190 return false;
2191 }
2192
2193 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2194 NORMAL_RESOLUTION);
2195
2196 if (exp->X_op == O_absent)
2197 {
2198 set_fatal_syntax_error (_("missing immediate expression"));
2199 return false;
2200 }
2201
2202 return true;
2203 }
2204
2205 /* Constant immediate-value read function for use in insn parsing.
2206 STR points to the beginning of the immediate (with the optional
2207 leading #); *VAL receives the value. REG_TYPE says which register
2208 names should be treated as registers rather than as symbolic immediates.
2209
2210 Return TRUE on success; otherwise return FALSE. */
2211
2212 static bool
2213 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2214 {
2215 expressionS exp;
2216
2217 if (! parse_immediate_expression (str, &exp, reg_type))
2218 return false;
2219
2220 if (exp.X_op != O_constant)
2221 {
2222 set_syntax_error (_("constant expression required"));
2223 return false;
2224 }
2225
2226 *val = exp.X_add_number;
2227 return true;
2228 }
2229
2230 static uint32_t
2231 encode_imm_float_bits (uint32_t imm)
2232 {
2233 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2234 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2235 }
2236
2237 /* Return TRUE if the single-precision floating-point value encoded in IMM
2238 can be expressed in the AArch64 8-bit signed floating-point format with
2239 3-bit exponent and normalized 4 bits of precision; in other words, the
2240 floating-point value must be expressable as
2241 (+/-) n / 16 * power (2, r)
2242 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2243
2244 static bool
2245 aarch64_imm_float_p (uint32_t imm)
2246 {
2247 /* If a single-precision floating-point value has the following bit
2248 pattern, it can be expressed in the AArch64 8-bit floating-point
2249 format:
2250
2251 3 32222222 2221111111111
2252 1 09876543 21098765432109876543210
2253 n Eeeeeexx xxxx0000000000000000000
2254
2255 where n, e and each x are either 0 or 1 independently, with
2256 E == ~ e. */
2257
2258 uint32_t pattern;
2259
2260 /* Prepare the pattern for 'Eeeeee'. */
2261 if (((imm >> 30) & 0x1) == 0)
2262 pattern = 0x3e000000;
2263 else
2264 pattern = 0x40000000;
2265
2266 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2267 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2268 }
2269
2270 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2271 as an IEEE float without any loss of precision. Store the value in
2272 *FPWORD if so. */
2273
2274 static bool
2275 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2276 {
2277 /* If a double-precision floating-point value has the following bit
2278 pattern, it can be expressed in a float:
2279
2280 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2281 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2282 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2283
2284 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2285 if Eeee_eeee != 1111_1111
2286
2287 where n, e, s and S are either 0 or 1 independently and where ~ is the
2288 inverse of E. */
2289
2290 uint32_t pattern;
2291 uint32_t high32 = imm >> 32;
2292 uint32_t low32 = imm;
2293
2294 /* Lower 29 bits need to be 0s. */
2295 if ((imm & 0x1fffffff) != 0)
2296 return false;
2297
2298 /* Prepare the pattern for 'Eeeeeeeee'. */
2299 if (((high32 >> 30) & 0x1) == 0)
2300 pattern = 0x38000000;
2301 else
2302 pattern = 0x40000000;
2303
2304 /* Check E~~~. */
2305 if ((high32 & 0x78000000) != pattern)
2306 return false;
2307
2308 /* Check Eeee_eeee != 1111_1111. */
2309 if ((high32 & 0x7ff00000) == 0x47f00000)
2310 return false;
2311
2312 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2313 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2314 | (low32 >> 29)); /* 3 S bits. */
2315 return true;
2316 }
2317
2318 /* Return true if we should treat OPERAND as a double-precision
2319 floating-point operand rather than a single-precision one. */
2320 static bool
2321 double_precision_operand_p (const aarch64_opnd_info *operand)
2322 {
2323 /* Check for unsuffixed SVE registers, which are allowed
2324 for LDR and STR but not in instructions that require an
2325 immediate. We get better error messages if we arbitrarily
2326 pick one size, parse the immediate normally, and then
2327 report the match failure in the normal way. */
2328 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2329 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2330 }
2331
2332 /* Parse a floating-point immediate. Return TRUE on success and return the
2333 value in *IMMED in the format of IEEE754 single-precision encoding.
2334 *CCP points to the start of the string; DP_P is TRUE when the immediate
2335 is expected to be in double-precision (N.B. this only matters when
2336 hexadecimal representation is involved). REG_TYPE says which register
2337 names should be treated as registers rather than as symbolic immediates.
2338
2339 This routine accepts any IEEE float; it is up to the callers to reject
2340 invalid ones. */
2341
2342 static bool
2343 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2344 aarch64_reg_type reg_type)
2345 {
2346 char *str = *ccp;
2347 char *fpnum;
2348 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2349 int64_t val = 0;
2350 unsigned fpword = 0;
2351 bool hex_p = false;
2352
2353 skip_past_char (&str, '#');
2354
2355 fpnum = str;
2356 skip_whitespace (fpnum);
2357
2358 if (startswith (fpnum, "0x"))
2359 {
2360 /* Support the hexadecimal representation of the IEEE754 encoding.
2361 Double-precision is expected when DP_P is TRUE, otherwise the
2362 representation should be in single-precision. */
2363 if (! parse_constant_immediate (&str, &val, reg_type))
2364 goto invalid_fp;
2365
2366 if (dp_p)
2367 {
2368 if (!can_convert_double_to_float (val, &fpword))
2369 goto invalid_fp;
2370 }
2371 else if ((uint64_t) val > 0xffffffff)
2372 goto invalid_fp;
2373 else
2374 fpword = val;
2375
2376 hex_p = true;
2377 }
2378 else if (reg_name_p (str, reg_type))
2379 {
2380 set_recoverable_error (_("immediate operand required"));
2381 return false;
2382 }
2383
2384 if (! hex_p)
2385 {
2386 int i;
2387
2388 if ((str = atof_ieee (str, 's', words)) == NULL)
2389 goto invalid_fp;
2390
2391 /* Our FP word must be 32 bits (single-precision FP). */
2392 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2393 {
2394 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2395 fpword |= words[i];
2396 }
2397 }
2398
2399 *immed = fpword;
2400 *ccp = str;
2401 return true;
2402
2403 invalid_fp:
2404 set_fatal_syntax_error (_("invalid floating-point constant"));
2405 return false;
2406 }
2407
2408 /* Less-generic immediate-value read function with the possibility of loading
2409 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2410 instructions.
2411
2412 To prevent the expression parser from pushing a register name into the
2413 symbol table as an undefined symbol, a check is firstly done to find
2414 out whether STR is a register of type REG_TYPE followed by a comma or
2415 the end of line. Return FALSE if STR is such a register. */
2416
2417 static bool
2418 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2419 {
2420 char *ptr = *str;
2421
2422 if (reg_name_p (ptr, reg_type))
2423 {
2424 set_syntax_error (_("immediate operand required"));
2425 return false;
2426 }
2427
2428 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2429 NORMAL_RESOLUTION);
2430
2431 if (inst.reloc.exp.X_op == O_constant)
2432 *imm = inst.reloc.exp.X_add_number;
2433
2434 *str = ptr;
2435
2436 return true;
2437 }
2438
2439 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2440 if NEED_LIBOPCODES is non-zero, the fixup will need
2441 assistance from the libopcodes. */
2442
2443 static inline void
2444 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2445 const aarch64_opnd_info *operand,
2446 int need_libopcodes_p)
2447 {
2448 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2449 reloc->opnd = operand->type;
2450 if (need_libopcodes_p)
2451 reloc->need_libopcodes_p = 1;
2452 };
2453
2454 /* Return TRUE if the instruction needs to be fixed up later internally by
2455 the GAS; otherwise return FALSE. */
2456
2457 static inline bool
2458 aarch64_gas_internal_fixup_p (void)
2459 {
2460 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2461 }
2462
2463 /* Assign the immediate value to the relevant field in *OPERAND if
2464 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2465 needs an internal fixup in a later stage.
2466 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2467 IMM.VALUE that may get assigned with the constant. */
2468 static inline void
2469 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2470 aarch64_opnd_info *operand,
2471 int addr_off_p,
2472 int need_libopcodes_p,
2473 int skip_p)
2474 {
2475 if (reloc->exp.X_op == O_constant)
2476 {
2477 if (addr_off_p)
2478 operand->addr.offset.imm = reloc->exp.X_add_number;
2479 else
2480 operand->imm.value = reloc->exp.X_add_number;
2481 reloc->type = BFD_RELOC_UNUSED;
2482 }
2483 else
2484 {
2485 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2486 /* Tell libopcodes to ignore this operand or not. This is helpful
2487 when one of the operands needs to be fixed up later but we need
2488 libopcodes to check the other operands. */
2489 operand->skip = skip_p;
2490 }
2491 }
2492
2493 /* Relocation modifiers. Each entry in the table contains the textual
2494 name for the relocation which may be placed before a symbol used as
2495 a load/store offset, or add immediate. It must be surrounded by a
2496 leading and trailing colon, for example:
2497
2498 ldr x0, [x1, #:rello:varsym]
2499 add x0, x1, #:rello:varsym */
2500
2501 struct reloc_table_entry
2502 {
2503 const char *name;
2504 int pc_rel;
2505 bfd_reloc_code_real_type adr_type;
2506 bfd_reloc_code_real_type adrp_type;
2507 bfd_reloc_code_real_type movw_type;
2508 bfd_reloc_code_real_type add_type;
2509 bfd_reloc_code_real_type ldst_type;
2510 bfd_reloc_code_real_type ld_literal_type;
2511 };
2512
2513 static struct reloc_table_entry reloc_table[] =
2514 {
2515 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2516 {"lo12", 0,
2517 0, /* adr_type */
2518 0,
2519 0,
2520 BFD_RELOC_AARCH64_ADD_LO12,
2521 BFD_RELOC_AARCH64_LDST_LO12,
2522 0},
2523
2524 /* Higher 21 bits of pc-relative page offset: ADRP */
2525 {"pg_hi21", 1,
2526 0, /* adr_type */
2527 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2528 0,
2529 0,
2530 0,
2531 0},
2532
2533 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2534 {"pg_hi21_nc", 1,
2535 0, /* adr_type */
2536 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2537 0,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2543 {"abs_g0", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G0,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2552 {"abs_g0_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G0_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 0-15 of address/value: MOVK, no check */
2561 {"abs_g0_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G0_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2570 {"abs_g1", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G1,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2579 {"abs_g1_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G1_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 16-31 of address/value: MOVK, no check */
2588 {"abs_g1_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G1_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2597 {"abs_g2", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G2,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2606 {"abs_g2_s", 0,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_G2_S,
2610 0,
2611 0,
2612 0},
2613
2614 /* Less significant bits 32-47 of address/value: MOVK, no check */
2615 {"abs_g2_nc", 0,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_G2_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2624 {"abs_g3", 0,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_G3,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2633 {"prel_g0", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2642 {"prel_g0_nc", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2651 {"prel_g1", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2660 {"prel_g1_nc", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2664 0,
2665 0,
2666 0},
2667
2668 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2669 {"prel_g2", 1,
2670 0, /* adr_type */
2671 0,
2672 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2673 0,
2674 0,
2675 0},
2676
2677 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2678 {"prel_g2_nc", 1,
2679 0, /* adr_type */
2680 0,
2681 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2687 {"prel_g3", 1,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2691 0,
2692 0,
2693 0},
2694
2695 /* Get to the page containing GOT entry for a symbol. */
2696 {"got", 1,
2697 0, /* adr_type */
2698 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2699 0,
2700 0,
2701 0,
2702 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2703
2704 /* 12 bit offset into the page containing GOT entry for that symbol. */
2705 {"got_lo12", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2711 0},
2712
2713 /* 0-15 bits of address/value: MOVk, no check. */
2714 {"gotoff_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Most significant bits 16-31 of address/value: MOVZ. */
2723 {"gotoff_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* 15 bit offset into the page containing GOT entry for that symbol. */
2732 {"gotoff_lo15", 0,
2733 0, /* adr_type */
2734 0,
2735 0,
2736 0,
2737 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2738 0},
2739
2740 /* Get to the page containing GOT TLS entry for a symbol */
2741 {"gottprel_g0_nc", 0,
2742 0, /* adr_type */
2743 0,
2744 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2745 0,
2746 0,
2747 0},
2748
2749 /* Get to the page containing GOT TLS entry for a symbol */
2750 {"gottprel_g1", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2754 0,
2755 0,
2756 0},
2757
2758 /* Get to the page containing GOT TLS entry for a symbol */
2759 {"tlsgd", 0,
2760 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsgd_lo12", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* Lower 16 bits address/value: MOVk. */
2777 {"tlsgd_g0_nc", 0,
2778 0, /* adr_type */
2779 0,
2780 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2781 0,
2782 0,
2783 0},
2784
2785 /* Most significant bits 16-31 of address/value: MOVZ. */
2786 {"tlsgd_g1", 0,
2787 0, /* adr_type */
2788 0,
2789 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2790 0,
2791 0,
2792 0},
2793
2794 /* Get to the page containing GOT TLS entry for a symbol */
2795 {"tlsdesc", 0,
2796 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2797 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2798 0,
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2802
2803 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2804 {"tlsdesc_lo12", 0,
2805 0, /* adr_type */
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2809 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2810 0},
2811
2812 /* Get to the page containing GOT TLS entry for a symbol.
2813 The same as GD, we allocate two consecutive GOT slots
2814 for module index and module offset, the only difference
2815 with GD is the module offset should be initialized to
2816 zero without any outstanding runtime relocation. */
2817 {"tlsldm", 0,
2818 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2819 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2820 0,
2821 0,
2822 0,
2823 0},
2824
2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2826 {"tlsldm_lo12_nc", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2831 0,
2832 0},
2833
2834 /* 12 bit offset into the module TLS base address. */
2835 {"dtprel_lo12", 0,
2836 0, /* adr_type */
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2840 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2841 0},
2842
2843 /* Same as dtprel_lo12, no overflow check. */
2844 {"dtprel_lo12_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2849 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2850 0},
2851
2852 /* bits[23:12] of offset to the module TLS base address. */
2853 {"dtprel_hi12", 0,
2854 0, /* adr_type */
2855 0,
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2858 0,
2859 0},
2860
2861 /* bits[15:0] of offset to the module TLS base address. */
2862 {"dtprel_g0", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2866 0,
2867 0,
2868 0},
2869
2870 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2871 {"dtprel_g0_nc", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2875 0,
2876 0,
2877 0},
2878
2879 /* bits[31:16] of offset to the module TLS base address. */
2880 {"dtprel_g1", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2884 0,
2885 0,
2886 0},
2887
2888 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2889 {"dtprel_g1_nc", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2893 0,
2894 0,
2895 0},
2896
2897 /* bits[47:32] of offset to the module TLS base address. */
2898 {"dtprel_g2", 0,
2899 0, /* adr_type */
2900 0,
2901 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2902 0,
2903 0,
2904 0},
2905
2906 /* Lower 16 bit offset into GOT entry for a symbol */
2907 {"tlsdesc_off_g0_nc", 0,
2908 0, /* adr_type */
2909 0,
2910 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2911 0,
2912 0,
2913 0},
2914
2915 /* Higher 16 bit offset into GOT entry for a symbol */
2916 {"tlsdesc_off_g1", 0,
2917 0, /* adr_type */
2918 0,
2919 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2920 0,
2921 0,
2922 0},
2923
2924 /* Get to the page containing GOT TLS entry for a symbol */
2925 {"gottprel", 0,
2926 0, /* adr_type */
2927 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2928 0,
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2932
2933 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2934 {"gottprel_lo12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2948 0,
2949 0},
2950
2951 /* Get tp offset for a symbol. */
2952 {"tprel_lo12", 0,
2953 0, /* adr_type */
2954 0,
2955 0,
2956 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2957 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2958 0},
2959
2960 /* Get tp offset for a symbol. */
2961 {"tprel_hi12", 0,
2962 0, /* adr_type */
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2966 0,
2967 0},
2968
2969 /* Get tp offset for a symbol. */
2970 {"tprel_lo12_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2975 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2976 0},
2977
2978 /* Most significant bits 32-47 of address/value: MOVZ. */
2979 {"tprel_g2", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 16-31 of address/value: MOVZ. */
2988 {"tprel_g1", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2992 0,
2993 0,
2994 0},
2995
2996 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2997 {"tprel_g1_nc", 0,
2998 0, /* adr_type */
2999 0,
3000 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3001 0,
3002 0,
3003 0},
3004
3005 /* Most significant bits 0-15 of address/value: MOVZ. */
3006 {"tprel_g0", 0,
3007 0, /* adr_type */
3008 0,
3009 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3010 0,
3011 0,
3012 0},
3013
3014 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3015 {"tprel_g0_nc", 0,
3016 0, /* adr_type */
3017 0,
3018 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3019 0,
3020 0,
3021 0},
3022
3023 /* 15bit offset from got entry to base address of GOT table. */
3024 {"gotpage_lo15", 0,
3025 0,
3026 0,
3027 0,
3028 0,
3029 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3030 0},
3031
3032 /* 14bit offset from got entry to base address of GOT table. */
3033 {"gotpage_lo14", 0,
3034 0,
3035 0,
3036 0,
3037 0,
3038 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3039 0},
3040 };
3041
3042 /* Given the address of a pointer pointing to the textual name of a
3043 relocation as may appear in assembler source, attempt to find its
3044 details in reloc_table. The pointer will be updated to the character
3045 after the trailing colon. On failure, NULL will be returned;
3046 otherwise return the reloc_table_entry. */
3047
3048 static struct reloc_table_entry *
3049 find_reloc_table_entry (char **str)
3050 {
3051 unsigned int i;
3052 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3053 {
3054 int length = strlen (reloc_table[i].name);
3055
3056 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3057 && (*str)[length] == ':')
3058 {
3059 *str += (length + 1);
3060 return &reloc_table[i];
3061 }
3062 }
3063
3064 return NULL;
3065 }
3066
3067 /* Returns 0 if the relocation should never be forced,
3068 1 if the relocation must be forced, and -1 if either
3069 result is OK. */
3070
3071 static signed int
3072 aarch64_force_reloc (unsigned int type)
3073 {
3074 switch (type)
3075 {
3076 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3077 /* Perform these "immediate" internal relocations
3078 even if the symbol is extern or weak. */
3079 return 0;
3080
3081 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3082 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3083 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3084 /* Pseudo relocs that need to be fixed up according to
3085 ilp32_p. */
3086 return 0;
3087
3088 case BFD_RELOC_AARCH64_ADD_LO12:
3089 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3090 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3091 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3092 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3093 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3094 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3095 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3096 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3097 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3098 case BFD_RELOC_AARCH64_LDST128_LO12:
3099 case BFD_RELOC_AARCH64_LDST16_LO12:
3100 case BFD_RELOC_AARCH64_LDST32_LO12:
3101 case BFD_RELOC_AARCH64_LDST64_LO12:
3102 case BFD_RELOC_AARCH64_LDST8_LO12:
3103 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3104 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3106 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3107 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3108 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3109 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3110 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3111 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3113 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3114 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3116 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3117 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3120 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3122 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3123 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3127 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3142 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3146 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3148 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3150 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3157 /* Always leave these relocations for the linker. */
3158 return 1;
3159
3160 default:
3161 return -1;
3162 }
3163 }
3164
3165 int
3166 aarch64_force_relocation (struct fix *fixp)
3167 {
3168 int res = aarch64_force_reloc (fixp->fx_r_type);
3169
3170 if (res == -1)
3171 return generic_force_reloc (fixp);
3172 return res;
3173 }
3174
3175 /* Mode argument to parse_shift and parser_shifter_operand. */
3176 enum parse_shift_mode
3177 {
3178 SHIFTED_NONE, /* no shifter allowed */
3179 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3180 "#imm{,lsl #n}" */
3181 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3182 "#imm" */
3183 SHIFTED_LSL, /* bare "lsl #n" */
3184 SHIFTED_MUL, /* bare "mul #n" */
3185 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3186 SHIFTED_MUL_VL, /* "mul vl" */
3187 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3188 };
3189
3190 /* Parse a <shift> operator on an AArch64 data processing instruction.
3191 Return TRUE on success; otherwise return FALSE. */
3192 static bool
3193 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3194 {
3195 const struct aarch64_name_value_pair *shift_op;
3196 enum aarch64_modifier_kind kind;
3197 expressionS exp;
3198 int exp_has_prefix;
3199 char *s = *str;
3200 char *p = s;
3201
3202 for (p = *str; ISALPHA (*p); p++)
3203 ;
3204
3205 if (p == *str)
3206 {
3207 set_syntax_error (_("shift expression expected"));
3208 return false;
3209 }
3210
3211 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3212
3213 if (shift_op == NULL)
3214 {
3215 set_syntax_error (_("shift operator expected"));
3216 return false;
3217 }
3218
3219 kind = aarch64_get_operand_modifier (shift_op);
3220
3221 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3222 {
3223 set_syntax_error (_("invalid use of 'MSL'"));
3224 return false;
3225 }
3226
3227 if (kind == AARCH64_MOD_MUL
3228 && mode != SHIFTED_MUL
3229 && mode != SHIFTED_MUL_VL)
3230 {
3231 set_syntax_error (_("invalid use of 'MUL'"));
3232 return false;
3233 }
3234
3235 switch (mode)
3236 {
3237 case SHIFTED_LOGIC_IMM:
3238 if (aarch64_extend_operator_p (kind))
3239 {
3240 set_syntax_error (_("extending shift is not permitted"));
3241 return false;
3242 }
3243 break;
3244
3245 case SHIFTED_ARITH_IMM:
3246 if (kind == AARCH64_MOD_ROR)
3247 {
3248 set_syntax_error (_("'ROR' shift is not permitted"));
3249 return false;
3250 }
3251 break;
3252
3253 case SHIFTED_LSL:
3254 if (kind != AARCH64_MOD_LSL)
3255 {
3256 set_syntax_error (_("only 'LSL' shift is permitted"));
3257 return false;
3258 }
3259 break;
3260
3261 case SHIFTED_MUL:
3262 if (kind != AARCH64_MOD_MUL)
3263 {
3264 set_syntax_error (_("only 'MUL' is permitted"));
3265 return false;
3266 }
3267 break;
3268
3269 case SHIFTED_MUL_VL:
3270 /* "MUL VL" consists of two separate tokens. Require the first
3271 token to be "MUL" and look for a following "VL". */
3272 if (kind == AARCH64_MOD_MUL)
3273 {
3274 skip_whitespace (p);
3275 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3276 {
3277 p += 2;
3278 kind = AARCH64_MOD_MUL_VL;
3279 break;
3280 }
3281 }
3282 set_syntax_error (_("only 'MUL VL' is permitted"));
3283 return false;
3284
3285 case SHIFTED_REG_OFFSET:
3286 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3287 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3288 {
3289 set_fatal_syntax_error
3290 (_("invalid shift for the register offset addressing mode"));
3291 return false;
3292 }
3293 break;
3294
3295 case SHIFTED_LSL_MSL:
3296 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3297 {
3298 set_syntax_error (_("invalid shift operator"));
3299 return false;
3300 }
3301 break;
3302
3303 default:
3304 abort ();
3305 }
3306
3307 /* Whitespace can appear here if the next thing is a bare digit. */
3308 skip_whitespace (p);
3309
3310 /* Parse shift amount. */
3311 exp_has_prefix = 0;
3312 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3313 exp.X_op = O_absent;
3314 else
3315 {
3316 if (is_immediate_prefix (*p))
3317 {
3318 p++;
3319 exp_has_prefix = 1;
3320 }
3321 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3322 NORMAL_RESOLUTION);
3323 }
3324 if (kind == AARCH64_MOD_MUL_VL)
3325 /* For consistency, give MUL VL the same shift amount as an implicit
3326 MUL #1. */
3327 operand->shifter.amount = 1;
3328 else if (exp.X_op == O_absent)
3329 {
3330 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3331 {
3332 set_syntax_error (_("missing shift amount"));
3333 return false;
3334 }
3335 operand->shifter.amount = 0;
3336 }
3337 else if (exp.X_op != O_constant)
3338 {
3339 set_syntax_error (_("constant shift amount required"));
3340 return false;
3341 }
3342 /* For parsing purposes, MUL #n has no inherent range. The range
3343 depends on the operand and will be checked by operand-specific
3344 routines. */
3345 else if (kind != AARCH64_MOD_MUL
3346 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3347 {
3348 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3349 return false;
3350 }
3351 else
3352 {
3353 operand->shifter.amount = exp.X_add_number;
3354 operand->shifter.amount_present = 1;
3355 }
3356
3357 operand->shifter.operator_present = 1;
3358 operand->shifter.kind = kind;
3359
3360 *str = p;
3361 return true;
3362 }
3363
3364 /* Parse a <shifter_operand> for a data processing instruction:
3365
3366 #<immediate>
3367 #<immediate>, LSL #imm
3368
3369 Validation of immediate operands is deferred to md_apply_fix.
3370
3371 Return TRUE on success; otherwise return FALSE. */
3372
3373 static bool
3374 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3375 enum parse_shift_mode mode)
3376 {
3377 char *p;
3378
3379 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3380 return false;
3381
3382 p = *str;
3383
3384 /* Accept an immediate expression. */
3385 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3386 REJECT_ABSENT, NORMAL_RESOLUTION))
3387 return false;
3388
3389 /* Accept optional LSL for arithmetic immediate values. */
3390 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3391 if (! parse_shift (&p, operand, SHIFTED_LSL))
3392 return false;
3393
3394 /* Not accept any shifter for logical immediate values. */
3395 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3396 && parse_shift (&p, operand, mode))
3397 {
3398 set_syntax_error (_("unexpected shift operator"));
3399 return false;
3400 }
3401
3402 *str = p;
3403 return true;
3404 }
3405
3406 /* Parse a <shifter_operand> for a data processing instruction:
3407
3408 <Rm>
3409 <Rm>, <shift>
3410 #<immediate>
3411 #<immediate>, LSL #imm
3412
3413 where <shift> is handled by parse_shift above, and the last two
3414 cases are handled by the function above.
3415
3416 Validation of immediate operands is deferred to md_apply_fix.
3417
3418 Return TRUE on success; otherwise return FALSE. */
3419
3420 static bool
3421 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3422 enum parse_shift_mode mode)
3423 {
3424 const reg_entry *reg;
3425 aarch64_opnd_qualifier_t qualifier;
3426 enum aarch64_operand_class opd_class
3427 = aarch64_get_operand_class (operand->type);
3428
3429 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3430 if (reg)
3431 {
3432 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3433 {
3434 set_syntax_error (_("unexpected register in the immediate operand"));
3435 return false;
3436 }
3437
3438 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3439 {
3440 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3441 return false;
3442 }
3443
3444 operand->reg.regno = reg->number;
3445 operand->qualifier = qualifier;
3446
3447 /* Accept optional shift operation on register. */
3448 if (! skip_past_comma (str))
3449 return true;
3450
3451 if (! parse_shift (str, operand, mode))
3452 return false;
3453
3454 return true;
3455 }
3456 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3457 {
3458 set_syntax_error
3459 (_("integer register expected in the extended/shifted operand "
3460 "register"));
3461 return false;
3462 }
3463
3464 /* We have a shifted immediate variable. */
3465 return parse_shifter_operand_imm (str, operand, mode);
3466 }
3467
3468 /* Return TRUE on success; return FALSE otherwise. */
3469
3470 static bool
3471 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3472 enum parse_shift_mode mode)
3473 {
3474 char *p = *str;
3475
3476 /* Determine if we have the sequence of characters #: or just :
3477 coming next. If we do, then we check for a :rello: relocation
3478 modifier. If we don't, punt the whole lot to
3479 parse_shifter_operand. */
3480
3481 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3482 {
3483 struct reloc_table_entry *entry;
3484
3485 if (p[0] == '#')
3486 p += 2;
3487 else
3488 p++;
3489 *str = p;
3490
3491 /* Try to parse a relocation. Anything else is an error. */
3492 if (!(entry = find_reloc_table_entry (str)))
3493 {
3494 set_syntax_error (_("unknown relocation modifier"));
3495 return false;
3496 }
3497
3498 if (entry->add_type == 0)
3499 {
3500 set_syntax_error
3501 (_("this relocation modifier is not allowed on this instruction"));
3502 return false;
3503 }
3504
3505 /* Save str before we decompose it. */
3506 p = *str;
3507
3508 /* Next, we parse the expression. */
3509 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3510 REJECT_ABSENT,
3511 aarch64_force_reloc (entry->add_type) == 1))
3512 return false;
3513
3514 /* Record the relocation type (use the ADD variant here). */
3515 inst.reloc.type = entry->add_type;
3516 inst.reloc.pc_rel = entry->pc_rel;
3517
3518 /* If str is empty, we've reached the end, stop here. */
3519 if (**str == '\0')
3520 return true;
3521
3522 /* Otherwise, we have a shifted reloc modifier, so rewind to
3523 recover the variable name and continue parsing for the shifter. */
3524 *str = p;
3525 return parse_shifter_operand_imm (str, operand, mode);
3526 }
3527
3528 return parse_shifter_operand (str, operand, mode);
3529 }
3530
3531 /* Parse all forms of an address expression. Information is written
3532 to *OPERAND and/or inst.reloc.
3533
3534 The A64 instruction set has the following addressing modes:
3535
3536 Offset
3537 [base] // in SIMD ld/st structure
3538 [base{,#0}] // in ld/st exclusive
3539 [base{,#imm}]
3540 [base,Xm{,LSL #imm}]
3541 [base,Xm,SXTX {#imm}]
3542 [base,Wm,(S|U)XTW {#imm}]
3543 Pre-indexed
3544 [base]! // in ldraa/ldrab exclusive
3545 [base,#imm]!
3546 Post-indexed
3547 [base],#imm
3548 [base],Xm // in SIMD ld/st structure
3549 PC-relative (literal)
3550 label
3551 SVE:
3552 [base,#imm,MUL VL]
3553 [base,Zm.D{,LSL #imm}]
3554 [base,Zm.S,(S|U)XTW {#imm}]
3555 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3556 [Zn.S,#imm]
3557 [Zn.D,#imm]
3558 [Zn.S{, Xm}]
3559 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3560 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3561 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3562
3563 (As a convenience, the notation "=immediate" is permitted in conjunction
3564 with the pc-relative literal load instructions to automatically place an
3565 immediate value or symbolic address in a nearby literal pool and generate
3566 a hidden label which references it.)
3567
3568 Upon a successful parsing, the address structure in *OPERAND will be
3569 filled in the following way:
3570
3571 .base_regno = <base>
3572 .offset.is_reg // 1 if the offset is a register
3573 .offset.imm = <imm>
3574 .offset.regno = <Rm>
3575
3576 For different addressing modes defined in the A64 ISA:
3577
3578 Offset
3579 .pcrel=0; .preind=1; .postind=0; .writeback=0
3580 Pre-indexed
3581 .pcrel=0; .preind=1; .postind=0; .writeback=1
3582 Post-indexed
3583 .pcrel=0; .preind=0; .postind=1; .writeback=1
3584 PC-relative (literal)
3585 .pcrel=1; .preind=1; .postind=0; .writeback=0
3586
3587 The shift/extension information, if any, will be stored in .shifter.
3588 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3589 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3590 corresponding register.
3591
3592 BASE_TYPE says which types of base register should be accepted and
3593 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3594 is the type of shifter that is allowed for immediate offsets,
3595 or SHIFTED_NONE if none.
3596
3597 In all other respects, it is the caller's responsibility to check
3598 for addressing modes not supported by the instruction, and to set
3599 inst.reloc.type. */
3600
3601 static bool
3602 parse_address_main (char **str, aarch64_opnd_info *operand,
3603 aarch64_opnd_qualifier_t *base_qualifier,
3604 aarch64_opnd_qualifier_t *offset_qualifier,
3605 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3606 enum parse_shift_mode imm_shift_mode)
3607 {
3608 char *p = *str;
3609 const reg_entry *reg;
3610 expressionS *exp = &inst.reloc.exp;
3611
3612 *base_qualifier = AARCH64_OPND_QLF_NIL;
3613 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3614 if (! skip_past_char (&p, '['))
3615 {
3616 /* =immediate or label. */
3617 operand->addr.pcrel = 1;
3618 operand->addr.preind = 1;
3619
3620 /* #:<reloc_op>:<symbol> */
3621 skip_past_char (&p, '#');
3622 if (skip_past_char (&p, ':'))
3623 {
3624 bfd_reloc_code_real_type ty;
3625 struct reloc_table_entry *entry;
3626
3627 /* Try to parse a relocation modifier. Anything else is
3628 an error. */
3629 entry = find_reloc_table_entry (&p);
3630 if (! entry)
3631 {
3632 set_syntax_error (_("unknown relocation modifier"));
3633 return false;
3634 }
3635
3636 switch (operand->type)
3637 {
3638 case AARCH64_OPND_ADDR_PCREL21:
3639 /* adr */
3640 ty = entry->adr_type;
3641 break;
3642
3643 default:
3644 ty = entry->ld_literal_type;
3645 break;
3646 }
3647
3648 if (ty == 0)
3649 {
3650 set_syntax_error
3651 (_("this relocation modifier is not allowed on this "
3652 "instruction"));
3653 return false;
3654 }
3655
3656 /* #:<reloc_op>: */
3657 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3658 aarch64_force_reloc (entry->add_type) == 1))
3659 {
3660 set_syntax_error (_("invalid relocation expression"));
3661 return false;
3662 }
3663 /* #:<reloc_op>:<expr> */
3664 /* Record the relocation type. */
3665 inst.reloc.type = ty;
3666 inst.reloc.pc_rel = entry->pc_rel;
3667 }
3668 else
3669 {
3670 if (skip_past_char (&p, '='))
3671 /* =immediate; need to generate the literal in the literal pool. */
3672 inst.gen_lit_pool = 1;
3673
3674 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3675 NORMAL_RESOLUTION))
3676 {
3677 set_syntax_error (_("invalid address"));
3678 return false;
3679 }
3680 }
3681
3682 *str = p;
3683 return true;
3684 }
3685
3686 /* [ */
3687
3688 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3689 if (!reg || !aarch64_check_reg_type (reg, base_type))
3690 {
3691 set_syntax_error (_(get_reg_expected_msg (base_type)));
3692 return false;
3693 }
3694 operand->addr.base_regno = reg->number;
3695
3696 /* [Xn */
3697 if (skip_past_comma (&p))
3698 {
3699 /* [Xn, */
3700 operand->addr.preind = 1;
3701
3702 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3703 if (reg)
3704 {
3705 if (!aarch64_check_reg_type (reg, offset_type))
3706 {
3707 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3708 return false;
3709 }
3710
3711 /* [Xn,Rm */
3712 operand->addr.offset.regno = reg->number;
3713 operand->addr.offset.is_reg = 1;
3714 /* Shifted index. */
3715 if (skip_past_comma (&p))
3716 {
3717 /* [Xn,Rm, */
3718 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3719 /* Use the diagnostics set in parse_shift, so not set new
3720 error message here. */
3721 return false;
3722 }
3723 /* We only accept:
3724 [base,Xm] # For vector plus scalar SVE2 indexing.
3725 [base,Xm{,LSL #imm}]
3726 [base,Xm,SXTX {#imm}]
3727 [base,Wm,(S|U)XTW {#imm}] */
3728 if (operand->shifter.kind == AARCH64_MOD_NONE
3729 || operand->shifter.kind == AARCH64_MOD_LSL
3730 || operand->shifter.kind == AARCH64_MOD_SXTX)
3731 {
3732 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3733 {
3734 set_syntax_error (_("invalid use of 32-bit register offset"));
3735 return false;
3736 }
3737 if (aarch64_get_qualifier_esize (*base_qualifier)
3738 != aarch64_get_qualifier_esize (*offset_qualifier)
3739 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3740 || *base_qualifier != AARCH64_OPND_QLF_S_S
3741 || *offset_qualifier != AARCH64_OPND_QLF_X))
3742 {
3743 set_syntax_error (_("offset has different size from base"));
3744 return false;
3745 }
3746 }
3747 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3748 {
3749 set_syntax_error (_("invalid use of 64-bit register offset"));
3750 return false;
3751 }
3752 }
3753 else
3754 {
3755 /* [Xn,#:<reloc_op>:<symbol> */
3756 skip_past_char (&p, '#');
3757 if (skip_past_char (&p, ':'))
3758 {
3759 struct reloc_table_entry *entry;
3760
3761 /* Try to parse a relocation modifier. Anything else is
3762 an error. */
3763 if (!(entry = find_reloc_table_entry (&p)))
3764 {
3765 set_syntax_error (_("unknown relocation modifier"));
3766 return false;
3767 }
3768
3769 if (entry->ldst_type == 0)
3770 {
3771 set_syntax_error
3772 (_("this relocation modifier is not allowed on this "
3773 "instruction"));
3774 return false;
3775 }
3776
3777 /* [Xn,#:<reloc_op>: */
3778 /* We now have the group relocation table entry corresponding to
3779 the name in the assembler source. Next, we parse the
3780 expression. */
3781 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3782 aarch64_force_reloc (entry->add_type) == 1))
3783 {
3784 set_syntax_error (_("invalid relocation expression"));
3785 return false;
3786 }
3787
3788 /* [Xn,#:<reloc_op>:<expr> */
3789 /* Record the load/store relocation type. */
3790 inst.reloc.type = entry->ldst_type;
3791 inst.reloc.pc_rel = entry->pc_rel;
3792 }
3793 else
3794 {
3795 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3796 NORMAL_RESOLUTION))
3797 {
3798 set_syntax_error (_("invalid expression in the address"));
3799 return false;
3800 }
3801 /* [Xn,<expr> */
3802 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3803 /* [Xn,<expr>,<shifter> */
3804 if (! parse_shift (&p, operand, imm_shift_mode))
3805 return false;
3806 }
3807 }
3808 }
3809
3810 if (! skip_past_char (&p, ']'))
3811 {
3812 set_syntax_error (_("']' expected"));
3813 return false;
3814 }
3815
3816 if (skip_past_char (&p, '!'))
3817 {
3818 if (operand->addr.preind && operand->addr.offset.is_reg)
3819 {
3820 set_syntax_error (_("register offset not allowed in pre-indexed "
3821 "addressing mode"));
3822 return false;
3823 }
3824 /* [Xn]! */
3825 operand->addr.writeback = 1;
3826 }
3827 else if (skip_past_comma (&p))
3828 {
3829 /* [Xn], */
3830 operand->addr.postind = 1;
3831 operand->addr.writeback = 1;
3832
3833 if (operand->addr.preind)
3834 {
3835 set_syntax_error (_("cannot combine pre- and post-indexing"));
3836 return false;
3837 }
3838
3839 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3840 if (reg)
3841 {
3842 /* [Xn],Xm */
3843 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3844 {
3845 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3846 return false;
3847 }
3848
3849 operand->addr.offset.regno = reg->number;
3850 operand->addr.offset.is_reg = 1;
3851 }
3852 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3853 NORMAL_RESOLUTION))
3854 {
3855 /* [Xn],#expr */
3856 set_syntax_error (_("invalid expression in the address"));
3857 return false;
3858 }
3859 }
3860
3861 /* If at this point neither .preind nor .postind is set, we have a
3862 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3863 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3864 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3865 [Zn.<T>, xzr]. */
3866 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3867 {
3868 if (operand->addr.writeback)
3869 {
3870 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3871 {
3872 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3873 operand->addr.offset.is_reg = 0;
3874 operand->addr.offset.imm = 0;
3875 operand->addr.preind = 1;
3876 }
3877 else
3878 {
3879 /* Reject [Rn]! */
3880 set_syntax_error (_("missing offset in the pre-indexed address"));
3881 return false;
3882 }
3883 }
3884 else
3885 {
3886 operand->addr.preind = 1;
3887 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3888 {
3889 operand->addr.offset.is_reg = 1;
3890 operand->addr.offset.regno = REG_ZR;
3891 *offset_qualifier = AARCH64_OPND_QLF_X;
3892 }
3893 else
3894 {
3895 inst.reloc.exp.X_op = O_constant;
3896 inst.reloc.exp.X_add_number = 0;
3897 }
3898 }
3899 }
3900
3901 *str = p;
3902 return true;
3903 }
3904
3905 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3906 on success. */
3907 static bool
3908 parse_address (char **str, aarch64_opnd_info *operand)
3909 {
3910 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3911 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3912 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3913 }
3914
3915 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3916 The arguments have the same meaning as for parse_address_main.
3917 Return TRUE on success. */
3918 static bool
3919 parse_sve_address (char **str, aarch64_opnd_info *operand,
3920 aarch64_opnd_qualifier_t *base_qualifier,
3921 aarch64_opnd_qualifier_t *offset_qualifier)
3922 {
3923 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3924 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3925 SHIFTED_MUL_VL);
3926 }
3927
3928 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3929 Return TRUE on success; otherwise return FALSE. */
3930 static bool
3931 parse_half (char **str, int *internal_fixup_p)
3932 {
3933 char *p = *str;
3934
3935 skip_past_char (&p, '#');
3936
3937 gas_assert (internal_fixup_p);
3938 *internal_fixup_p = 0;
3939
3940 if (*p == ':')
3941 {
3942 struct reloc_table_entry *entry;
3943
3944 /* Try to parse a relocation. Anything else is an error. */
3945 ++p;
3946
3947 if (!(entry = find_reloc_table_entry (&p)))
3948 {
3949 set_syntax_error (_("unknown relocation modifier"));
3950 return false;
3951 }
3952
3953 if (entry->movw_type == 0)
3954 {
3955 set_syntax_error
3956 (_("this relocation modifier is not allowed on this instruction"));
3957 return false;
3958 }
3959
3960 inst.reloc.type = entry->movw_type;
3961 }
3962 else
3963 *internal_fixup_p = 1;
3964
3965 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3966 aarch64_force_reloc (inst.reloc.type) == 1))
3967 return false;
3968
3969 *str = p;
3970 return true;
3971 }
3972
3973 /* Parse an operand for an ADRP instruction:
3974 ADRP <Xd>, <label>
3975 Return TRUE on success; otherwise return FALSE. */
3976
3977 static bool
3978 parse_adrp (char **str)
3979 {
3980 char *p;
3981
3982 p = *str;
3983 if (*p == ':')
3984 {
3985 struct reloc_table_entry *entry;
3986
3987 /* Try to parse a relocation. Anything else is an error. */
3988 ++p;
3989 if (!(entry = find_reloc_table_entry (&p)))
3990 {
3991 set_syntax_error (_("unknown relocation modifier"));
3992 return false;
3993 }
3994
3995 if (entry->adrp_type == 0)
3996 {
3997 set_syntax_error
3998 (_("this relocation modifier is not allowed on this instruction"));
3999 return false;
4000 }
4001
4002 inst.reloc.type = entry->adrp_type;
4003 }
4004 else
4005 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4006
4007 inst.reloc.pc_rel = 1;
4008 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4009 aarch64_force_reloc (inst.reloc.type) == 1))
4010 return false;
4011 *str = p;
4012 return true;
4013 }
4014
4015 /* Miscellaneous. */
4016
4017 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4018 of SIZE tokens in which index I gives the token for field value I,
4019 or is null if field value I is invalid. REG_TYPE says which register
4020 names should be treated as registers rather than as symbolic immediates.
4021
4022 Return true on success, moving *STR past the operand and storing the
4023 field value in *VAL. */
4024
4025 static int
4026 parse_enum_string (char **str, int64_t *val, const char *const *array,
4027 size_t size, aarch64_reg_type reg_type)
4028 {
4029 expressionS exp;
4030 char *p, *q;
4031 size_t i;
4032
4033 /* Match C-like tokens. */
4034 p = q = *str;
4035 while (ISALNUM (*q))
4036 q++;
4037
4038 for (i = 0; i < size; ++i)
4039 if (array[i]
4040 && strncasecmp (array[i], p, q - p) == 0
4041 && array[i][q - p] == 0)
4042 {
4043 *val = i;
4044 *str = q;
4045 return true;
4046 }
4047
4048 if (!parse_immediate_expression (&p, &exp, reg_type))
4049 return false;
4050
4051 if (exp.X_op == O_constant
4052 && (uint64_t) exp.X_add_number < size)
4053 {
4054 *val = exp.X_add_number;
4055 *str = p;
4056 return true;
4057 }
4058
4059 /* Use the default error for this operand. */
4060 return false;
4061 }
4062
4063 /* Parse an option for a preload instruction. Returns the encoding for the
4064 option, or PARSE_FAIL. */
4065
4066 static int
4067 parse_pldop (char **str)
4068 {
4069 char *p, *q;
4070 const struct aarch64_name_value_pair *o;
4071
4072 p = q = *str;
4073 while (ISALNUM (*q))
4074 q++;
4075
4076 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4077 if (!o)
4078 return PARSE_FAIL;
4079
4080 *str = q;
4081 return o->value;
4082 }
4083
4084 /* Parse an option for a barrier instruction. Returns the encoding for the
4085 option, or PARSE_FAIL. */
4086
4087 static int
4088 parse_barrier (char **str)
4089 {
4090 char *p, *q;
4091 const struct aarch64_name_value_pair *o;
4092
4093 p = q = *str;
4094 while (ISALPHA (*q))
4095 q++;
4096
4097 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4098 if (!o)
4099 return PARSE_FAIL;
4100
4101 *str = q;
4102 return o->value;
4103 }
4104
4105 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4106 return 0 if successful. Otherwise return PARSE_FAIL. */
4107
4108 static int
4109 parse_barrier_psb (char **str,
4110 const struct aarch64_name_value_pair ** hint_opt)
4111 {
4112 char *p, *q;
4113 const struct aarch64_name_value_pair *o;
4114
4115 p = q = *str;
4116 while (ISALPHA (*q))
4117 q++;
4118
4119 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4120 if (!o)
4121 {
4122 set_fatal_syntax_error
4123 ( _("unknown or missing option to PSB/TSB"));
4124 return PARSE_FAIL;
4125 }
4126
4127 if (o->value != 0x11)
4128 {
4129 /* PSB only accepts option name 'CSYNC'. */
4130 set_syntax_error
4131 (_("the specified option is not accepted for PSB/TSB"));
4132 return PARSE_FAIL;
4133 }
4134
4135 *str = q;
4136 *hint_opt = o;
4137 return 0;
4138 }
4139
4140 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4141 return 0 if successful. Otherwise return PARSE_FAIL. */
4142
4143 static int
4144 parse_bti_operand (char **str,
4145 const struct aarch64_name_value_pair ** hint_opt)
4146 {
4147 char *p, *q;
4148 const struct aarch64_name_value_pair *o;
4149
4150 p = q = *str;
4151 while (ISALPHA (*q))
4152 q++;
4153
4154 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4155 if (!o)
4156 {
4157 set_fatal_syntax_error
4158 ( _("unknown option to BTI"));
4159 return PARSE_FAIL;
4160 }
4161
4162 switch (o->value)
4163 {
4164 /* Valid BTI operands. */
4165 case HINT_OPD_C:
4166 case HINT_OPD_J:
4167 case HINT_OPD_JC:
4168 break;
4169
4170 default:
4171 set_syntax_error
4172 (_("unknown option to BTI"));
4173 return PARSE_FAIL;
4174 }
4175
4176 *str = q;
4177 *hint_opt = o;
4178 return 0;
4179 }
4180
4181 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4182 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4183 on failure. Format:
4184
4185 REG_TYPE.QUALIFIER
4186
4187 Side effect: Update STR with current parse position of success.
4188 */
4189
4190 static const reg_entry *
4191 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4192 aarch64_opnd_qualifier_t *qualifier)
4193 {
4194 char *q;
4195
4196 reg_entry *reg = parse_reg (str);
4197 if (reg != NULL && reg->type == reg_type)
4198 {
4199 if (!skip_past_char (str, '.'))
4200 {
4201 set_syntax_error (_("missing ZA tile element size separator"));
4202 return NULL;
4203 }
4204
4205 q = *str;
4206 switch (TOLOWER (*q))
4207 {
4208 case 'b':
4209 *qualifier = AARCH64_OPND_QLF_S_B;
4210 break;
4211 case 'h':
4212 *qualifier = AARCH64_OPND_QLF_S_H;
4213 break;
4214 case 's':
4215 *qualifier = AARCH64_OPND_QLF_S_S;
4216 break;
4217 case 'd':
4218 *qualifier = AARCH64_OPND_QLF_S_D;
4219 break;
4220 case 'q':
4221 *qualifier = AARCH64_OPND_QLF_S_Q;
4222 break;
4223 default:
4224 return NULL;
4225 }
4226 q++;
4227
4228 *str = q;
4229 return reg;
4230 }
4231
4232 return NULL;
4233 }
4234
4235 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4236 Function return tile QUALIFIER on success.
4237
4238 Tiles are in example format: za[0-9]\.[bhsd]
4239
4240 Function returns <ZAda> register number or PARSE_FAIL.
4241 */
4242 static int
4243 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4244 {
4245 int regno;
4246 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4247
4248 if (reg == NULL)
4249 return PARSE_FAIL;
4250 regno = reg->number;
4251
4252 switch (*qualifier)
4253 {
4254 case AARCH64_OPND_QLF_S_B:
4255 if (regno != 0x00)
4256 {
4257 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4258 return PARSE_FAIL;
4259 }
4260 break;
4261 case AARCH64_OPND_QLF_S_H:
4262 if (regno > 0x01)
4263 {
4264 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4265 return PARSE_FAIL;
4266 }
4267 break;
4268 case AARCH64_OPND_QLF_S_S:
4269 if (regno > 0x03)
4270 {
4271 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4272 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4273 return PARSE_FAIL;
4274 }
4275 break;
4276 case AARCH64_OPND_QLF_S_D:
4277 if (regno > 0x07)
4278 {
4279 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4280 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4281 return PARSE_FAIL;
4282 }
4283 break;
4284 default:
4285 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4286 return PARSE_FAIL;
4287 }
4288
4289 return regno;
4290 }
4291
4292 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4293
4294 #<imm>
4295 <imm>
4296
4297 Function return TRUE if immediate was found, or FALSE.
4298 */
4299 static bool
4300 parse_sme_immediate (char **str, int64_t *imm)
4301 {
4302 int64_t val;
4303 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4304 return false;
4305
4306 *imm = val;
4307 return true;
4308 }
4309
4310 /* Parse index with vector select register and immediate:
4311
4312 [<Wv>, <imm>]
4313 [<Wv>, #<imm>]
4314 where <Wv> is in W12-W15 range and # is optional for immediate.
4315
4316 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4317 is set to true.
4318
4319 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4320 IMM output.
4321 */
4322 static bool
4323 parse_sme_za_hv_tiles_operand_index (char **str,
4324 int *vector_select_register,
4325 int64_t *imm)
4326 {
4327 const reg_entry *reg;
4328
4329 if (!skip_past_char (str, '['))
4330 {
4331 set_syntax_error (_("expected '['"));
4332 return false;
4333 }
4334
4335 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4336 reg = parse_reg (str);
4337 if (reg == NULL || reg->type != REG_TYPE_R_32
4338 || reg->number < 12 || reg->number > 15)
4339 {
4340 set_syntax_error (_("expected vector select register W12-W15"));
4341 return false;
4342 }
4343 *vector_select_register = reg->number;
4344
4345 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4346 {
4347 set_syntax_error (_("expected ','"));
4348 return false;
4349 }
4350
4351 if (!parse_sme_immediate (str, imm))
4352 {
4353 set_syntax_error (_("index offset immediate expected"));
4354 return false;
4355 }
4356
4357 if (!skip_past_char (str, ']'))
4358 {
4359 set_syntax_error (_("expected ']'"));
4360 return false;
4361 }
4362
4363 return true;
4364 }
4365
4366 /* Parse SME ZA horizontal or vertical vector access to tiles.
4367 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4368 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4369 contains <Wv> select register and corresponding optional IMMEDIATE.
4370 In addition QUALIFIER is extracted.
4371
4372 Field format examples:
4373
4374 ZA0<HV>.B[<Wv>, #<imm>]
4375 <ZAn><HV>.H[<Wv>, #<imm>]
4376 <ZAn><HV>.S[<Wv>, #<imm>]
4377 <ZAn><HV>.D[<Wv>, #<imm>]
4378 <ZAn><HV>.Q[<Wv>, #<imm>]
4379
4380 Function returns <ZAda> register number or PARSE_FAIL.
4381 */
4382 static int
4383 parse_sme_za_hv_tiles_operand (char **str,
4384 enum sme_hv_slice *slice_indicator,
4385 int *vector_select_register,
4386 int *imm,
4387 aarch64_opnd_qualifier_t *qualifier)
4388 {
4389 char *qh, *qv;
4390 int regno;
4391 int regno_limit;
4392 int64_t imm_limit;
4393 int64_t imm_value;
4394 const reg_entry *reg;
4395
4396 qh = qv = *str;
4397 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4398 {
4399 *slice_indicator = HV_horizontal;
4400 *str = qh;
4401 }
4402 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4403 {
4404 *slice_indicator = HV_vertical;
4405 *str = qv;
4406 }
4407 else
4408 return PARSE_FAIL;
4409 regno = reg->number;
4410
4411 switch (*qualifier)
4412 {
4413 case AARCH64_OPND_QLF_S_B:
4414 regno_limit = 0;
4415 imm_limit = 15;
4416 break;
4417 case AARCH64_OPND_QLF_S_H:
4418 regno_limit = 1;
4419 imm_limit = 7;
4420 break;
4421 case AARCH64_OPND_QLF_S_S:
4422 regno_limit = 3;
4423 imm_limit = 3;
4424 break;
4425 case AARCH64_OPND_QLF_S_D:
4426 regno_limit = 7;
4427 imm_limit = 1;
4428 break;
4429 case AARCH64_OPND_QLF_S_Q:
4430 regno_limit = 15;
4431 imm_limit = 0;
4432 break;
4433 default:
4434 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4435 return PARSE_FAIL;
4436 }
4437
4438 /* Check if destination register ZA tile vector is in range for given
4439 instruction variant. */
4440 if (regno < 0 || regno > regno_limit)
4441 {
4442 set_syntax_error (_("ZA tile vector out of range"));
4443 return PARSE_FAIL;
4444 }
4445
4446 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4447 &imm_value))
4448 return PARSE_FAIL;
4449
4450 /* Check if optional index offset is in the range for instruction
4451 variant. */
4452 if (imm_value < 0 || imm_value > imm_limit)
4453 {
4454 set_syntax_error (_("index offset out of range"));
4455 return PARSE_FAIL;
4456 }
4457
4458 *imm = imm_value;
4459
4460 return regno;
4461 }
4462
4463
4464 static int
4465 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4466 enum sme_hv_slice *slice_indicator,
4467 int *vector_select_register,
4468 int *imm,
4469 aarch64_opnd_qualifier_t *qualifier)
4470 {
4471 int regno;
4472
4473 if (!skip_past_char (str, '{'))
4474 {
4475 set_syntax_error (_("expected '{'"));
4476 return PARSE_FAIL;
4477 }
4478
4479 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4480 vector_select_register, imm,
4481 qualifier);
4482
4483 if (regno == PARSE_FAIL)
4484 return PARSE_FAIL;
4485
4486 if (!skip_past_char (str, '}'))
4487 {
4488 set_syntax_error (_("expected '}'"));
4489 return PARSE_FAIL;
4490 }
4491
4492 return regno;
4493 }
4494
4495 /* Parse list of up to eight 64-bit element tile names separated by commas in
4496 SME's ZERO instruction:
4497
4498 ZERO { <mask> }
4499
4500 Function returns <mask>:
4501
4502 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4503 */
4504 static int
4505 parse_sme_zero_mask(char **str)
4506 {
4507 char *q;
4508 int mask;
4509 aarch64_opnd_qualifier_t qualifier;
4510
4511 mask = 0x00;
4512 q = *str;
4513 do
4514 {
4515 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4516 if (reg)
4517 {
4518 int regno = reg->number;
4519 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4520 {
4521 /* { ZA0.B } is assembled as all-ones immediate. */
4522 mask = 0xff;
4523 }
4524 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4525 mask |= 0x55 << regno;
4526 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4527 mask |= 0x11 << regno;
4528 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4529 mask |= 0x01 << regno;
4530 else
4531 {
4532 set_syntax_error (_("wrong ZA tile element format"));
4533 return PARSE_FAIL;
4534 }
4535 continue;
4536 }
4537 else if (strncasecmp (q, "za", 2) == 0
4538 && !ISALNUM (q[2]))
4539 {
4540 /* { ZA } is assembled as all-ones immediate. */
4541 mask = 0xff;
4542 q += 2;
4543 continue;
4544 }
4545 else
4546 {
4547 set_syntax_error (_("wrong ZA tile element format"));
4548 return PARSE_FAIL;
4549 }
4550 }
4551 while (skip_past_char (&q, ','));
4552
4553 *str = q;
4554 return mask;
4555 }
4556
4557 /* Wraps in curly braces <mask> operand ZERO instruction:
4558
4559 ZERO { <mask> }
4560
4561 Function returns value of <mask> bit-field.
4562 */
4563 static int
4564 parse_sme_list_of_64bit_tiles (char **str)
4565 {
4566 int regno;
4567
4568 if (!skip_past_char (str, '{'))
4569 {
4570 set_syntax_error (_("expected '{'"));
4571 return PARSE_FAIL;
4572 }
4573
4574 /* Empty <mask> list is an all-zeros immediate. */
4575 if (!skip_past_char (str, '}'))
4576 {
4577 regno = parse_sme_zero_mask (str);
4578 if (regno == PARSE_FAIL)
4579 return PARSE_FAIL;
4580
4581 if (!skip_past_char (str, '}'))
4582 {
4583 set_syntax_error (_("expected '}'"));
4584 return PARSE_FAIL;
4585 }
4586 }
4587 else
4588 regno = 0x00;
4589
4590 return regno;
4591 }
4592
4593 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4594 Operand format:
4595
4596 ZA[<Wv>, <imm>]
4597 ZA[<Wv>, #<imm>]
4598
4599 Function returns <Wv> or PARSE_FAIL.
4600 */
4601 static int
4602 parse_sme_za_array (char **str, int *imm)
4603 {
4604 char *p, *q;
4605 int regno;
4606 int64_t imm_value;
4607
4608 p = q = *str;
4609 while (ISALPHA (*q))
4610 q++;
4611
4612 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4613 {
4614 set_syntax_error (_("expected ZA array"));
4615 return PARSE_FAIL;
4616 }
4617
4618 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4619 return PARSE_FAIL;
4620
4621 if (imm_value < 0 || imm_value > 15)
4622 {
4623 set_syntax_error (_("offset out of range"));
4624 return PARSE_FAIL;
4625 }
4626
4627 *imm = imm_value;
4628 *str = q;
4629 return regno;
4630 }
4631
4632 /* Parse streaming mode operand for SMSTART and SMSTOP.
4633
4634 {SM | ZA}
4635
4636 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4637 */
4638 static int
4639 parse_sme_sm_za (char **str)
4640 {
4641 char *p, *q;
4642
4643 p = q = *str;
4644 while (ISALPHA (*q))
4645 q++;
4646
4647 if ((q - p != 2)
4648 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4649 {
4650 set_syntax_error (_("expected SM or ZA operand"));
4651 return PARSE_FAIL;
4652 }
4653
4654 *str = q;
4655 return TOLOWER (p[0]);
4656 }
4657
4658 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4659 Returns the encoding for the option, or PARSE_FAIL.
4660
4661 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4662 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4663
4664 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4665 field, otherwise as a system register.
4666 */
4667
4668 static int
4669 parse_sys_reg (char **str, htab_t sys_regs,
4670 int imple_defined_p, int pstatefield_p,
4671 uint32_t* flags)
4672 {
4673 char *p, *q;
4674 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4675 const aarch64_sys_reg *o;
4676 int value;
4677
4678 p = buf;
4679 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4680 if (p < buf + (sizeof (buf) - 1))
4681 *p++ = TOLOWER (*q);
4682 *p = '\0';
4683
4684 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4685 valid system register. This is enforced by construction of the hash
4686 table. */
4687 if (p - buf != q - *str)
4688 return PARSE_FAIL;
4689
4690 o = str_hash_find (sys_regs, buf);
4691 if (!o)
4692 {
4693 if (!imple_defined_p)
4694 return PARSE_FAIL;
4695 else
4696 {
4697 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4698 unsigned int op0, op1, cn, cm, op2;
4699
4700 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4701 != 5)
4702 return PARSE_FAIL;
4703 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4704 return PARSE_FAIL;
4705 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4706 if (flags)
4707 *flags = 0;
4708 }
4709 }
4710 else
4711 {
4712 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4713 as_bad (_("selected processor does not support PSTATE field "
4714 "name '%s'"), buf);
4715 if (!pstatefield_p
4716 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4717 o->value, o->flags, o->features))
4718 as_bad (_("selected processor does not support system register "
4719 "name '%s'"), buf);
4720 if (aarch64_sys_reg_deprecated_p (o->flags))
4721 as_warn (_("system register name '%s' is deprecated and may be "
4722 "removed in a future release"), buf);
4723 value = o->value;
4724 if (flags)
4725 *flags = o->flags;
4726 }
4727
4728 *str = q;
4729 return value;
4730 }
4731
4732 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4733 for the option, or NULL. */
4734
4735 static const aarch64_sys_ins_reg *
4736 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4737 {
4738 char *p, *q;
4739 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4740 const aarch64_sys_ins_reg *o;
4741
4742 p = buf;
4743 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4744 if (p < buf + (sizeof (buf) - 1))
4745 *p++ = TOLOWER (*q);
4746 *p = '\0';
4747
4748 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4749 valid system register. This is enforced by construction of the hash
4750 table. */
4751 if (p - buf != q - *str)
4752 return NULL;
4753
4754 o = str_hash_find (sys_ins_regs, buf);
4755 if (!o)
4756 return NULL;
4757
4758 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4759 o->name, o->value, o->flags, 0))
4760 as_bad (_("selected processor does not support system register "
4761 "name '%s'"), buf);
4762 if (aarch64_sys_reg_deprecated_p (o->flags))
4763 as_warn (_("system register name '%s' is deprecated and may be "
4764 "removed in a future release"), buf);
4765
4766 *str = q;
4767 return o;
4768 }
4769 \f
4770 #define po_char_or_fail(chr) do { \
4771 if (! skip_past_char (&str, chr)) \
4772 goto failure; \
4773 } while (0)
4774
4775 #define po_reg_or_fail(regtype) do { \
4776 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4777 if (val == PARSE_FAIL) \
4778 { \
4779 set_default_error (); \
4780 goto failure; \
4781 } \
4782 } while (0)
4783
4784 #define po_int_reg_or_fail(reg_type) do { \
4785 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4786 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4787 { \
4788 set_default_error (); \
4789 goto failure; \
4790 } \
4791 info->reg.regno = reg->number; \
4792 info->qualifier = qualifier; \
4793 } while (0)
4794
4795 #define po_imm_nc_or_fail() do { \
4796 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4797 goto failure; \
4798 } while (0)
4799
4800 #define po_imm_or_fail(min, max) do { \
4801 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4802 goto failure; \
4803 if (val < min || val > max) \
4804 { \
4805 set_fatal_syntax_error (_("immediate value out of range "\
4806 #min " to "#max)); \
4807 goto failure; \
4808 } \
4809 } while (0)
4810
4811 #define po_enum_or_fail(array) do { \
4812 if (!parse_enum_string (&str, &val, array, \
4813 ARRAY_SIZE (array), imm_reg_type)) \
4814 goto failure; \
4815 } while (0)
4816
4817 #define po_misc_or_fail(expr) do { \
4818 if (!expr) \
4819 goto failure; \
4820 } while (0)
4821 \f
4822 /* encode the 12-bit imm field of Add/sub immediate */
4823 static inline uint32_t
4824 encode_addsub_imm (uint32_t imm)
4825 {
4826 return imm << 10;
4827 }
4828
4829 /* encode the shift amount field of Add/sub immediate */
4830 static inline uint32_t
4831 encode_addsub_imm_shift_amount (uint32_t cnt)
4832 {
4833 return cnt << 22;
4834 }
4835
4836
4837 /* encode the imm field of Adr instruction */
4838 static inline uint32_t
4839 encode_adr_imm (uint32_t imm)
4840 {
4841 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4842 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4843 }
4844
4845 /* encode the immediate field of Move wide immediate */
4846 static inline uint32_t
4847 encode_movw_imm (uint32_t imm)
4848 {
4849 return imm << 5;
4850 }
4851
4852 /* encode the 26-bit offset of unconditional branch */
4853 static inline uint32_t
4854 encode_branch_ofs_26 (uint32_t ofs)
4855 {
4856 return ofs & ((1 << 26) - 1);
4857 }
4858
4859 /* encode the 19-bit offset of conditional branch and compare & branch */
4860 static inline uint32_t
4861 encode_cond_branch_ofs_19 (uint32_t ofs)
4862 {
4863 return (ofs & ((1 << 19) - 1)) << 5;
4864 }
4865
4866 /* encode the 19-bit offset of ld literal */
4867 static inline uint32_t
4868 encode_ld_lit_ofs_19 (uint32_t ofs)
4869 {
4870 return (ofs & ((1 << 19) - 1)) << 5;
4871 }
4872
4873 /* Encode the 14-bit offset of test & branch. */
4874 static inline uint32_t
4875 encode_tst_branch_ofs_14 (uint32_t ofs)
4876 {
4877 return (ofs & ((1 << 14) - 1)) << 5;
4878 }
4879
4880 /* Encode the 16-bit imm field of svc/hvc/smc. */
4881 static inline uint32_t
4882 encode_svc_imm (uint32_t imm)
4883 {
4884 return imm << 5;
4885 }
4886
4887 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4888 static inline uint32_t
4889 reencode_addsub_switch_add_sub (uint32_t opcode)
4890 {
4891 return opcode ^ (1 << 30);
4892 }
4893
4894 static inline uint32_t
4895 reencode_movzn_to_movz (uint32_t opcode)
4896 {
4897 return opcode | (1 << 30);
4898 }
4899
4900 static inline uint32_t
4901 reencode_movzn_to_movn (uint32_t opcode)
4902 {
4903 return opcode & ~(1 << 30);
4904 }
4905
4906 /* Overall per-instruction processing. */
4907
4908 /* We need to be able to fix up arbitrary expressions in some statements.
4909 This is so that we can handle symbols that are an arbitrary distance from
4910 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4911 which returns part of an address in a form which will be valid for
4912 a data instruction. We do this by pushing the expression into a symbol
4913 in the expr_section, and creating a fix for that. */
4914
4915 static fixS *
4916 fix_new_aarch64 (fragS * frag,
4917 int where,
4918 short int size,
4919 expressionS * exp,
4920 int pc_rel,
4921 int reloc)
4922 {
4923 fixS *new_fix;
4924
4925 switch (exp->X_op)
4926 {
4927 case O_constant:
4928 case O_symbol:
4929 case O_add:
4930 case O_subtract:
4931 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4932 break;
4933
4934 default:
4935 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4936 pc_rel, reloc);
4937 break;
4938 }
4939 return new_fix;
4940 }
4941 \f
4942 /* Diagnostics on operands errors. */
4943
4944 /* By default, output verbose error message.
4945 Disable the verbose error message by -mno-verbose-error. */
4946 static int verbose_error_p = 1;
4947
4948 #ifdef DEBUG_AARCH64
4949 /* N.B. this is only for the purpose of debugging. */
4950 const char* operand_mismatch_kind_names[] =
4951 {
4952 "AARCH64_OPDE_NIL",
4953 "AARCH64_OPDE_RECOVERABLE",
4954 "AARCH64_OPDE_SYNTAX_ERROR",
4955 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4956 "AARCH64_OPDE_INVALID_VARIANT",
4957 "AARCH64_OPDE_OUT_OF_RANGE",
4958 "AARCH64_OPDE_UNALIGNED",
4959 "AARCH64_OPDE_REG_LIST",
4960 "AARCH64_OPDE_OTHER_ERROR",
4961 };
4962 #endif /* DEBUG_AARCH64 */
4963
4964 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4965
4966 When multiple errors of different kinds are found in the same assembly
4967 line, only the error of the highest severity will be picked up for
4968 issuing the diagnostics. */
4969
4970 static inline bool
4971 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4972 enum aarch64_operand_error_kind rhs)
4973 {
4974 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4975 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4976 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4977 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4978 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4979 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4980 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4981 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4982 return lhs > rhs;
4983 }
4984
4985 /* Helper routine to get the mnemonic name from the assembly instruction
4986 line; should only be called for the diagnosis purpose, as there is
4987 string copy operation involved, which may affect the runtime
4988 performance if used in elsewhere. */
4989
4990 static const char*
4991 get_mnemonic_name (const char *str)
4992 {
4993 static char mnemonic[32];
4994 char *ptr;
4995
4996 /* Get the first 15 bytes and assume that the full name is included. */
4997 strncpy (mnemonic, str, 31);
4998 mnemonic[31] = '\0';
4999
5000 /* Scan up to the end of the mnemonic, which must end in white space,
5001 '.', or end of string. */
5002 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5003 ;
5004
5005 *ptr = '\0';
5006
5007 /* Append '...' to the truncated long name. */
5008 if (ptr - mnemonic == 31)
5009 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5010
5011 return mnemonic;
5012 }
5013
5014 static void
5015 reset_aarch64_instruction (aarch64_instruction *instruction)
5016 {
5017 memset (instruction, '\0', sizeof (aarch64_instruction));
5018 instruction->reloc.type = BFD_RELOC_UNUSED;
5019 }
5020
5021 /* Data structures storing one user error in the assembly code related to
5022 operands. */
5023
5024 struct operand_error_record
5025 {
5026 const aarch64_opcode *opcode;
5027 aarch64_operand_error detail;
5028 struct operand_error_record *next;
5029 };
5030
5031 typedef struct operand_error_record operand_error_record;
5032
5033 struct operand_errors
5034 {
5035 operand_error_record *head;
5036 operand_error_record *tail;
5037 };
5038
5039 typedef struct operand_errors operand_errors;
5040
5041 /* Top-level data structure reporting user errors for the current line of
5042 the assembly code.
5043 The way md_assemble works is that all opcodes sharing the same mnemonic
5044 name are iterated to find a match to the assembly line. In this data
5045 structure, each of the such opcodes will have one operand_error_record
5046 allocated and inserted. In other words, excessive errors related with
5047 a single opcode are disregarded. */
5048 operand_errors operand_error_report;
5049
5050 /* Free record nodes. */
5051 static operand_error_record *free_opnd_error_record_nodes = NULL;
5052
5053 /* Initialize the data structure that stores the operand mismatch
5054 information on assembling one line of the assembly code. */
5055 static void
5056 init_operand_error_report (void)
5057 {
5058 if (operand_error_report.head != NULL)
5059 {
5060 gas_assert (operand_error_report.tail != NULL);
5061 operand_error_report.tail->next = free_opnd_error_record_nodes;
5062 free_opnd_error_record_nodes = operand_error_report.head;
5063 operand_error_report.head = NULL;
5064 operand_error_report.tail = NULL;
5065 return;
5066 }
5067 gas_assert (operand_error_report.tail == NULL);
5068 }
5069
5070 /* Return TRUE if some operand error has been recorded during the
5071 parsing of the current assembly line using the opcode *OPCODE;
5072 otherwise return FALSE. */
5073 static inline bool
5074 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5075 {
5076 operand_error_record *record = operand_error_report.head;
5077 return record && record->opcode == opcode;
5078 }
5079
5080 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5081 OPCODE field is initialized with OPCODE.
5082 N.B. only one record for each opcode, i.e. the maximum of one error is
5083 recorded for each instruction template. */
5084
5085 static void
5086 add_operand_error_record (const operand_error_record* new_record)
5087 {
5088 const aarch64_opcode *opcode = new_record->opcode;
5089 operand_error_record* record = operand_error_report.head;
5090
5091 /* The record may have been created for this opcode. If not, we need
5092 to prepare one. */
5093 if (! opcode_has_operand_error_p (opcode))
5094 {
5095 /* Get one empty record. */
5096 if (free_opnd_error_record_nodes == NULL)
5097 {
5098 record = XNEW (operand_error_record);
5099 }
5100 else
5101 {
5102 record = free_opnd_error_record_nodes;
5103 free_opnd_error_record_nodes = record->next;
5104 }
5105 record->opcode = opcode;
5106 /* Insert at the head. */
5107 record->next = operand_error_report.head;
5108 operand_error_report.head = record;
5109 if (operand_error_report.tail == NULL)
5110 operand_error_report.tail = record;
5111 }
5112 else if (record->detail.kind != AARCH64_OPDE_NIL
5113 && record->detail.index <= new_record->detail.index
5114 && operand_error_higher_severity_p (record->detail.kind,
5115 new_record->detail.kind))
5116 {
5117 /* In the case of multiple errors found on operands related with a
5118 single opcode, only record the error of the leftmost operand and
5119 only if the error is of higher severity. */
5120 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5121 " the existing error %s on operand %d",
5122 operand_mismatch_kind_names[new_record->detail.kind],
5123 new_record->detail.index,
5124 operand_mismatch_kind_names[record->detail.kind],
5125 record->detail.index);
5126 return;
5127 }
5128
5129 record->detail = new_record->detail;
5130 }
5131
5132 static inline void
5133 record_operand_error_info (const aarch64_opcode *opcode,
5134 aarch64_operand_error *error_info)
5135 {
5136 operand_error_record record;
5137 record.opcode = opcode;
5138 record.detail = *error_info;
5139 add_operand_error_record (&record);
5140 }
5141
5142 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5143 error message *ERROR, for operand IDX (count from 0). */
5144
5145 static void
5146 record_operand_error (const aarch64_opcode *opcode, int idx,
5147 enum aarch64_operand_error_kind kind,
5148 const char* error)
5149 {
5150 aarch64_operand_error info;
5151 memset(&info, 0, sizeof (info));
5152 info.index = idx;
5153 info.kind = kind;
5154 info.error = error;
5155 info.non_fatal = false;
5156 record_operand_error_info (opcode, &info);
5157 }
5158
5159 static void
5160 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5161 enum aarch64_operand_error_kind kind,
5162 const char* error, const int *extra_data)
5163 {
5164 aarch64_operand_error info;
5165 info.index = idx;
5166 info.kind = kind;
5167 info.error = error;
5168 info.data[0] = extra_data[0];
5169 info.data[1] = extra_data[1];
5170 info.data[2] = extra_data[2];
5171 info.non_fatal = false;
5172 record_operand_error_info (opcode, &info);
5173 }
5174
5175 static void
5176 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5177 const char* error, int lower_bound,
5178 int upper_bound)
5179 {
5180 int data[3] = {lower_bound, upper_bound, 0};
5181 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5182 error, data);
5183 }
5184
5185 /* Remove the operand error record for *OPCODE. */
5186 static void ATTRIBUTE_UNUSED
5187 remove_operand_error_record (const aarch64_opcode *opcode)
5188 {
5189 if (opcode_has_operand_error_p (opcode))
5190 {
5191 operand_error_record* record = operand_error_report.head;
5192 gas_assert (record != NULL && operand_error_report.tail != NULL);
5193 operand_error_report.head = record->next;
5194 record->next = free_opnd_error_record_nodes;
5195 free_opnd_error_record_nodes = record;
5196 if (operand_error_report.head == NULL)
5197 {
5198 gas_assert (operand_error_report.tail == record);
5199 operand_error_report.tail = NULL;
5200 }
5201 }
5202 }
5203
5204 /* Given the instruction in *INSTR, return the index of the best matched
5205 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5206
5207 Return -1 if there is no qualifier sequence; return the first match
5208 if there is multiple matches found. */
5209
5210 static int
5211 find_best_match (const aarch64_inst *instr,
5212 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5213 {
5214 int i, num_opnds, max_num_matched, idx;
5215
5216 num_opnds = aarch64_num_of_operands (instr->opcode);
5217 if (num_opnds == 0)
5218 {
5219 DEBUG_TRACE ("no operand");
5220 return -1;
5221 }
5222
5223 max_num_matched = 0;
5224 idx = 0;
5225
5226 /* For each pattern. */
5227 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5228 {
5229 int j, num_matched;
5230 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5231
5232 /* Most opcodes has much fewer patterns in the list. */
5233 if (empty_qualifier_sequence_p (qualifiers))
5234 {
5235 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5236 break;
5237 }
5238
5239 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5240 if (*qualifiers == instr->operands[j].qualifier)
5241 ++num_matched;
5242
5243 if (num_matched > max_num_matched)
5244 {
5245 max_num_matched = num_matched;
5246 idx = i;
5247 }
5248 }
5249
5250 DEBUG_TRACE ("return with %d", idx);
5251 return idx;
5252 }
5253
5254 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5255 corresponding operands in *INSTR. */
5256
5257 static inline void
5258 assign_qualifier_sequence (aarch64_inst *instr,
5259 const aarch64_opnd_qualifier_t *qualifiers)
5260 {
5261 int i = 0;
5262 int num_opnds = aarch64_num_of_operands (instr->opcode);
5263 gas_assert (num_opnds);
5264 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5265 instr->operands[i].qualifier = *qualifiers;
5266 }
5267
5268 /* Print operands for the diagnosis purpose. */
5269
5270 static void
5271 print_operands (char *buf, const aarch64_opcode *opcode,
5272 const aarch64_opnd_info *opnds)
5273 {
5274 int i;
5275
5276 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5277 {
5278 char str[128];
5279
5280 /* We regard the opcode operand info more, however we also look into
5281 the inst->operands to support the disassembling of the optional
5282 operand.
5283 The two operand code should be the same in all cases, apart from
5284 when the operand can be optional. */
5285 if (opcode->operands[i] == AARCH64_OPND_NIL
5286 || opnds[i].type == AARCH64_OPND_NIL)
5287 break;
5288
5289 /* Generate the operand string in STR. */
5290 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5291 NULL, cpu_variant);
5292
5293 /* Delimiter. */
5294 if (str[0] != '\0')
5295 strcat (buf, i == 0 ? " " : ", ");
5296
5297 /* Append the operand string. */
5298 strcat (buf, str);
5299 }
5300 }
5301
5302 /* Send to stderr a string as information. */
5303
5304 static void
5305 output_info (const char *format, ...)
5306 {
5307 const char *file;
5308 unsigned int line;
5309 va_list args;
5310
5311 file = as_where (&line);
5312 if (file)
5313 {
5314 if (line != 0)
5315 fprintf (stderr, "%s:%u: ", file, line);
5316 else
5317 fprintf (stderr, "%s: ", file);
5318 }
5319 fprintf (stderr, _("Info: "));
5320 va_start (args, format);
5321 vfprintf (stderr, format, args);
5322 va_end (args);
5323 (void) putc ('\n', stderr);
5324 }
5325
5326 /* Output one operand error record. */
5327
5328 static void
5329 output_operand_error_record (const operand_error_record *record, char *str)
5330 {
5331 const aarch64_operand_error *detail = &record->detail;
5332 int idx = detail->index;
5333 const aarch64_opcode *opcode = record->opcode;
5334 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5335 : AARCH64_OPND_NIL);
5336
5337 typedef void (*handler_t)(const char *format, ...);
5338 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5339
5340 switch (detail->kind)
5341 {
5342 case AARCH64_OPDE_NIL:
5343 gas_assert (0);
5344 break;
5345 case AARCH64_OPDE_SYNTAX_ERROR:
5346 case AARCH64_OPDE_RECOVERABLE:
5347 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5348 case AARCH64_OPDE_OTHER_ERROR:
5349 /* Use the prepared error message if there is, otherwise use the
5350 operand description string to describe the error. */
5351 if (detail->error != NULL)
5352 {
5353 if (idx < 0)
5354 handler (_("%s -- `%s'"), detail->error, str);
5355 else
5356 handler (_("%s at operand %d -- `%s'"),
5357 detail->error, idx + 1, str);
5358 }
5359 else
5360 {
5361 gas_assert (idx >= 0);
5362 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5363 aarch64_get_operand_desc (opd_code), str);
5364 }
5365 break;
5366
5367 case AARCH64_OPDE_INVALID_VARIANT:
5368 handler (_("operand mismatch -- `%s'"), str);
5369 if (verbose_error_p)
5370 {
5371 /* We will try to correct the erroneous instruction and also provide
5372 more information e.g. all other valid variants.
5373
5374 The string representation of the corrected instruction and other
5375 valid variants are generated by
5376
5377 1) obtaining the intermediate representation of the erroneous
5378 instruction;
5379 2) manipulating the IR, e.g. replacing the operand qualifier;
5380 3) printing out the instruction by calling the printer functions
5381 shared with the disassembler.
5382
5383 The limitation of this method is that the exact input assembly
5384 line cannot be accurately reproduced in some cases, for example an
5385 optional operand present in the actual assembly line will be
5386 omitted in the output; likewise for the optional syntax rules,
5387 e.g. the # before the immediate. Another limitation is that the
5388 assembly symbols and relocation operations in the assembly line
5389 currently cannot be printed out in the error report. Last but not
5390 least, when there is other error(s) co-exist with this error, the
5391 'corrected' instruction may be still incorrect, e.g. given
5392 'ldnp h0,h1,[x0,#6]!'
5393 this diagnosis will provide the version:
5394 'ldnp s0,s1,[x0,#6]!'
5395 which is still not right. */
5396 size_t len = strlen (get_mnemonic_name (str));
5397 int i, qlf_idx;
5398 bool result;
5399 char buf[2048];
5400 aarch64_inst *inst_base = &inst.base;
5401 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5402
5403 /* Init inst. */
5404 reset_aarch64_instruction (&inst);
5405 inst_base->opcode = opcode;
5406
5407 /* Reset the error report so that there is no side effect on the
5408 following operand parsing. */
5409 init_operand_error_report ();
5410
5411 /* Fill inst. */
5412 result = parse_operands (str + len, opcode)
5413 && programmer_friendly_fixup (&inst);
5414 gas_assert (result);
5415 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5416 NULL, NULL, insn_sequence);
5417 gas_assert (!result);
5418
5419 /* Find the most matched qualifier sequence. */
5420 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5421 gas_assert (qlf_idx > -1);
5422
5423 /* Assign the qualifiers. */
5424 assign_qualifier_sequence (inst_base,
5425 opcode->qualifiers_list[qlf_idx]);
5426
5427 /* Print the hint. */
5428 output_info (_(" did you mean this?"));
5429 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5430 print_operands (buf, opcode, inst_base->operands);
5431 output_info (_(" %s"), buf);
5432
5433 /* Print out other variant(s) if there is any. */
5434 if (qlf_idx != 0 ||
5435 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5436 output_info (_(" other valid variant(s):"));
5437
5438 /* For each pattern. */
5439 qualifiers_list = opcode->qualifiers_list;
5440 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5441 {
5442 /* Most opcodes has much fewer patterns in the list.
5443 First NIL qualifier indicates the end in the list. */
5444 if (empty_qualifier_sequence_p (*qualifiers_list))
5445 break;
5446
5447 if (i != qlf_idx)
5448 {
5449 /* Mnemonics name. */
5450 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5451
5452 /* Assign the qualifiers. */
5453 assign_qualifier_sequence (inst_base, *qualifiers_list);
5454
5455 /* Print instruction. */
5456 print_operands (buf, opcode, inst_base->operands);
5457
5458 output_info (_(" %s"), buf);
5459 }
5460 }
5461 }
5462 break;
5463
5464 case AARCH64_OPDE_UNTIED_IMMS:
5465 handler (_("operand %d must have the same immediate value "
5466 "as operand 1 -- `%s'"),
5467 detail->index + 1, str);
5468 break;
5469
5470 case AARCH64_OPDE_UNTIED_OPERAND:
5471 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5472 detail->index + 1, str);
5473 break;
5474
5475 case AARCH64_OPDE_OUT_OF_RANGE:
5476 if (detail->data[0] != detail->data[1])
5477 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5478 detail->error ? detail->error : _("immediate value"),
5479 detail->data[0], detail->data[1], idx + 1, str);
5480 else
5481 handler (_("%s must be %d at operand %d -- `%s'"),
5482 detail->error ? detail->error : _("immediate value"),
5483 detail->data[0], idx + 1, str);
5484 break;
5485
5486 case AARCH64_OPDE_REG_LIST:
5487 if (detail->data[0] == 1)
5488 handler (_("invalid number of registers in the list; "
5489 "only 1 register is expected at operand %d -- `%s'"),
5490 idx + 1, str);
5491 else
5492 handler (_("invalid number of registers in the list; "
5493 "%d registers are expected at operand %d -- `%s'"),
5494 detail->data[0], idx + 1, str);
5495 break;
5496
5497 case AARCH64_OPDE_UNALIGNED:
5498 handler (_("immediate value must be a multiple of "
5499 "%d at operand %d -- `%s'"),
5500 detail->data[0], idx + 1, str);
5501 break;
5502
5503 default:
5504 gas_assert (0);
5505 break;
5506 }
5507 }
5508
5509 /* Process and output the error message about the operand mismatching.
5510
5511 When this function is called, the operand error information had
5512 been collected for an assembly line and there will be multiple
5513 errors in the case of multiple instruction templates; output the
5514 error message that most closely describes the problem.
5515
5516 The errors to be printed can be filtered on printing all errors
5517 or only non-fatal errors. This distinction has to be made because
5518 the error buffer may already be filled with fatal errors we don't want to
5519 print due to the different instruction templates. */
5520
5521 static void
5522 output_operand_error_report (char *str, bool non_fatal_only)
5523 {
5524 int largest_error_pos;
5525 const char *msg = NULL;
5526 enum aarch64_operand_error_kind kind;
5527 operand_error_record *curr;
5528 operand_error_record *head = operand_error_report.head;
5529 operand_error_record *record = NULL;
5530
5531 /* No error to report. */
5532 if (head == NULL)
5533 return;
5534
5535 gas_assert (head != NULL && operand_error_report.tail != NULL);
5536
5537 /* Only one error. */
5538 if (head == operand_error_report.tail)
5539 {
5540 /* If the only error is a non-fatal one and we don't want to print it,
5541 just exit. */
5542 if (!non_fatal_only || head->detail.non_fatal)
5543 {
5544 DEBUG_TRACE ("single opcode entry with error kind: %s",
5545 operand_mismatch_kind_names[head->detail.kind]);
5546 output_operand_error_record (head, str);
5547 }
5548 return;
5549 }
5550
5551 /* Find the error kind of the highest severity. */
5552 DEBUG_TRACE ("multiple opcode entries with error kind");
5553 kind = AARCH64_OPDE_NIL;
5554 for (curr = head; curr != NULL; curr = curr->next)
5555 {
5556 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5557 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5558 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5559 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5560 kind = curr->detail.kind;
5561 }
5562
5563 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5564
5565 /* Pick up one of errors of KIND to report. */
5566 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5567 for (curr = head; curr != NULL; curr = curr->next)
5568 {
5569 /* If we don't want to print non-fatal errors then don't consider them
5570 at all. */
5571 if (curr->detail.kind != kind
5572 || (non_fatal_only && !curr->detail.non_fatal))
5573 continue;
5574 /* If there are multiple errors, pick up the one with the highest
5575 mismatching operand index. In the case of multiple errors with
5576 the equally highest operand index, pick up the first one or the
5577 first one with non-NULL error message. */
5578 if (curr->detail.index > largest_error_pos
5579 || (curr->detail.index == largest_error_pos && msg == NULL
5580 && curr->detail.error != NULL))
5581 {
5582 largest_error_pos = curr->detail.index;
5583 record = curr;
5584 msg = record->detail.error;
5585 }
5586 }
5587
5588 /* The way errors are collected in the back-end is a bit non-intuitive. But
5589 essentially, because each operand template is tried recursively you may
5590 always have errors collected from the previous tried OPND. These are
5591 usually skipped if there is one successful match. However now with the
5592 non-fatal errors we have to ignore those previously collected hard errors
5593 when we're only interested in printing the non-fatal ones. This condition
5594 prevents us from printing errors that are not appropriate, since we did
5595 match a condition, but it also has warnings that it wants to print. */
5596 if (non_fatal_only && !record)
5597 return;
5598
5599 gas_assert (largest_error_pos != -2 && record != NULL);
5600 DEBUG_TRACE ("Pick up error kind %s to report",
5601 operand_mismatch_kind_names[record->detail.kind]);
5602
5603 /* Output. */
5604 output_operand_error_record (record, str);
5605 }
5606 \f
5607 /* Write an AARCH64 instruction to buf - always little-endian. */
5608 static void
5609 put_aarch64_insn (char *buf, uint32_t insn)
5610 {
5611 unsigned char *where = (unsigned char *) buf;
5612 where[0] = insn;
5613 where[1] = insn >> 8;
5614 where[2] = insn >> 16;
5615 where[3] = insn >> 24;
5616 }
5617
5618 static uint32_t
5619 get_aarch64_insn (char *buf)
5620 {
5621 unsigned char *where = (unsigned char *) buf;
5622 uint32_t result;
5623 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5624 | ((uint32_t) where[3] << 24)));
5625 return result;
5626 }
5627
5628 static void
5629 output_inst (struct aarch64_inst *new_inst)
5630 {
5631 char *to = NULL;
5632
5633 to = frag_more (INSN_SIZE);
5634
5635 frag_now->tc_frag_data.recorded = 1;
5636
5637 put_aarch64_insn (to, inst.base.value);
5638
5639 if (inst.reloc.type != BFD_RELOC_UNUSED)
5640 {
5641 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5642 INSN_SIZE, &inst.reloc.exp,
5643 inst.reloc.pc_rel,
5644 inst.reloc.type);
5645 DEBUG_TRACE ("Prepared relocation fix up");
5646 /* Don't check the addend value against the instruction size,
5647 that's the job of our code in md_apply_fix(). */
5648 fixp->fx_no_overflow = 1;
5649 if (new_inst != NULL)
5650 fixp->tc_fix_data.inst = new_inst;
5651 if (aarch64_gas_internal_fixup_p ())
5652 {
5653 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5654 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5655 fixp->fx_addnumber = inst.reloc.flags;
5656 }
5657 }
5658
5659 dwarf2_emit_insn (INSN_SIZE);
5660 }
5661
5662 /* Link together opcodes of the same name. */
5663
5664 struct templates
5665 {
5666 const aarch64_opcode *opcode;
5667 struct templates *next;
5668 };
5669
5670 typedef struct templates templates;
5671
5672 static templates *
5673 lookup_mnemonic (const char *start, int len)
5674 {
5675 templates *templ = NULL;
5676
5677 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5678 return templ;
5679 }
5680
5681 /* Subroutine of md_assemble, responsible for looking up the primary
5682 opcode from the mnemonic the user wrote. STR points to the
5683 beginning of the mnemonic. */
5684
5685 static templates *
5686 opcode_lookup (char **str)
5687 {
5688 char *end, *base, *dot;
5689 const aarch64_cond *cond;
5690 char condname[16];
5691 int len;
5692
5693 /* Scan up to the end of the mnemonic, which must end in white space,
5694 '.', or end of string. */
5695 dot = 0;
5696 for (base = end = *str; is_part_of_name(*end); end++)
5697 if (*end == '.' && !dot)
5698 dot = end;
5699
5700 if (end == base || dot == base)
5701 return 0;
5702
5703 inst.cond = COND_ALWAYS;
5704
5705 /* Handle a possible condition. */
5706 if (dot)
5707 {
5708 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5709 if (cond)
5710 {
5711 inst.cond = cond->value;
5712 *str = end;
5713 }
5714 else
5715 {
5716 *str = dot;
5717 return 0;
5718 }
5719 len = dot - base;
5720 }
5721 else
5722 {
5723 *str = end;
5724 len = end - base;
5725 }
5726
5727 if (inst.cond == COND_ALWAYS)
5728 {
5729 /* Look for unaffixed mnemonic. */
5730 return lookup_mnemonic (base, len);
5731 }
5732 else if (len <= 13)
5733 {
5734 /* append ".c" to mnemonic if conditional */
5735 memcpy (condname, base, len);
5736 memcpy (condname + len, ".c", 2);
5737 base = condname;
5738 len += 2;
5739 return lookup_mnemonic (base, len);
5740 }
5741
5742 return NULL;
5743 }
5744
5745 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5746 to a corresponding operand qualifier. */
5747
5748 static inline aarch64_opnd_qualifier_t
5749 vectype_to_qualifier (const struct vector_type_el *vectype)
5750 {
5751 /* Element size in bytes indexed by vector_el_type. */
5752 const unsigned char ele_size[5]
5753 = {1, 2, 4, 8, 16};
5754 const unsigned int ele_base [5] =
5755 {
5756 AARCH64_OPND_QLF_V_4B,
5757 AARCH64_OPND_QLF_V_2H,
5758 AARCH64_OPND_QLF_V_2S,
5759 AARCH64_OPND_QLF_V_1D,
5760 AARCH64_OPND_QLF_V_1Q
5761 };
5762
5763 if (!vectype->defined || vectype->type == NT_invtype)
5764 goto vectype_conversion_fail;
5765
5766 if (vectype->type == NT_zero)
5767 return AARCH64_OPND_QLF_P_Z;
5768 if (vectype->type == NT_merge)
5769 return AARCH64_OPND_QLF_P_M;
5770
5771 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5772
5773 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5774 {
5775 /* Special case S_4B. */
5776 if (vectype->type == NT_b && vectype->width == 4)
5777 return AARCH64_OPND_QLF_S_4B;
5778
5779 /* Special case S_2H. */
5780 if (vectype->type == NT_h && vectype->width == 2)
5781 return AARCH64_OPND_QLF_S_2H;
5782
5783 /* Vector element register. */
5784 return AARCH64_OPND_QLF_S_B + vectype->type;
5785 }
5786 else
5787 {
5788 /* Vector register. */
5789 int reg_size = ele_size[vectype->type] * vectype->width;
5790 unsigned offset;
5791 unsigned shift;
5792 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5793 goto vectype_conversion_fail;
5794
5795 /* The conversion is by calculating the offset from the base operand
5796 qualifier for the vector type. The operand qualifiers are regular
5797 enough that the offset can established by shifting the vector width by
5798 a vector-type dependent amount. */
5799 shift = 0;
5800 if (vectype->type == NT_b)
5801 shift = 3;
5802 else if (vectype->type == NT_h || vectype->type == NT_s)
5803 shift = 2;
5804 else if (vectype->type >= NT_d)
5805 shift = 1;
5806 else
5807 gas_assert (0);
5808
5809 offset = ele_base [vectype->type] + (vectype->width >> shift);
5810 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5811 && offset <= AARCH64_OPND_QLF_V_1Q);
5812 return offset;
5813 }
5814
5815 vectype_conversion_fail:
5816 first_error (_("bad vector arrangement type"));
5817 return AARCH64_OPND_QLF_NIL;
5818 }
5819
5820 /* Process an optional operand that is found omitted from the assembly line.
5821 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5822 instruction's opcode entry while IDX is the index of this omitted operand.
5823 */
5824
5825 static void
5826 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5827 int idx, aarch64_opnd_info *operand)
5828 {
5829 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5830 gas_assert (optional_operand_p (opcode, idx));
5831 gas_assert (!operand->present);
5832
5833 switch (type)
5834 {
5835 case AARCH64_OPND_Rd:
5836 case AARCH64_OPND_Rn:
5837 case AARCH64_OPND_Rm:
5838 case AARCH64_OPND_Rt:
5839 case AARCH64_OPND_Rt2:
5840 case AARCH64_OPND_Rt_LS64:
5841 case AARCH64_OPND_Rt_SP:
5842 case AARCH64_OPND_Rs:
5843 case AARCH64_OPND_Ra:
5844 case AARCH64_OPND_Rt_SYS:
5845 case AARCH64_OPND_Rd_SP:
5846 case AARCH64_OPND_Rn_SP:
5847 case AARCH64_OPND_Rm_SP:
5848 case AARCH64_OPND_Fd:
5849 case AARCH64_OPND_Fn:
5850 case AARCH64_OPND_Fm:
5851 case AARCH64_OPND_Fa:
5852 case AARCH64_OPND_Ft:
5853 case AARCH64_OPND_Ft2:
5854 case AARCH64_OPND_Sd:
5855 case AARCH64_OPND_Sn:
5856 case AARCH64_OPND_Sm:
5857 case AARCH64_OPND_Va:
5858 case AARCH64_OPND_Vd:
5859 case AARCH64_OPND_Vn:
5860 case AARCH64_OPND_Vm:
5861 case AARCH64_OPND_VdD1:
5862 case AARCH64_OPND_VnD1:
5863 operand->reg.regno = default_value;
5864 break;
5865
5866 case AARCH64_OPND_Ed:
5867 case AARCH64_OPND_En:
5868 case AARCH64_OPND_Em:
5869 case AARCH64_OPND_Em16:
5870 case AARCH64_OPND_SM3_IMM2:
5871 operand->reglane.regno = default_value;
5872 break;
5873
5874 case AARCH64_OPND_IDX:
5875 case AARCH64_OPND_BIT_NUM:
5876 case AARCH64_OPND_IMMR:
5877 case AARCH64_OPND_IMMS:
5878 case AARCH64_OPND_SHLL_IMM:
5879 case AARCH64_OPND_IMM_VLSL:
5880 case AARCH64_OPND_IMM_VLSR:
5881 case AARCH64_OPND_CCMP_IMM:
5882 case AARCH64_OPND_FBITS:
5883 case AARCH64_OPND_UIMM4:
5884 case AARCH64_OPND_UIMM3_OP1:
5885 case AARCH64_OPND_UIMM3_OP2:
5886 case AARCH64_OPND_IMM:
5887 case AARCH64_OPND_IMM_2:
5888 case AARCH64_OPND_WIDTH:
5889 case AARCH64_OPND_UIMM7:
5890 case AARCH64_OPND_NZCV:
5891 case AARCH64_OPND_SVE_PATTERN:
5892 case AARCH64_OPND_SVE_PRFOP:
5893 operand->imm.value = default_value;
5894 break;
5895
5896 case AARCH64_OPND_SVE_PATTERN_SCALED:
5897 operand->imm.value = default_value;
5898 operand->shifter.kind = AARCH64_MOD_MUL;
5899 operand->shifter.amount = 1;
5900 break;
5901
5902 case AARCH64_OPND_EXCEPTION:
5903 inst.reloc.type = BFD_RELOC_UNUSED;
5904 break;
5905
5906 case AARCH64_OPND_BARRIER_ISB:
5907 operand->barrier = aarch64_barrier_options + default_value;
5908 break;
5909
5910 case AARCH64_OPND_BTI_TARGET:
5911 operand->hint_option = aarch64_hint_options + default_value;
5912 break;
5913
5914 default:
5915 break;
5916 }
5917 }
5918
5919 /* Process the relocation type for move wide instructions.
5920 Return TRUE on success; otherwise return FALSE. */
5921
5922 static bool
5923 process_movw_reloc_info (void)
5924 {
5925 int is32;
5926 unsigned shift;
5927
5928 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5929
5930 if (inst.base.opcode->op == OP_MOVK)
5931 switch (inst.reloc.type)
5932 {
5933 case BFD_RELOC_AARCH64_MOVW_G0_S:
5934 case BFD_RELOC_AARCH64_MOVW_G1_S:
5935 case BFD_RELOC_AARCH64_MOVW_G2_S:
5936 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5937 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5938 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5939 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5940 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5941 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5942 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5943 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5944 set_syntax_error
5945 (_("the specified relocation type is not allowed for MOVK"));
5946 return false;
5947 default:
5948 break;
5949 }
5950
5951 switch (inst.reloc.type)
5952 {
5953 case BFD_RELOC_AARCH64_MOVW_G0:
5954 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5955 case BFD_RELOC_AARCH64_MOVW_G0_S:
5956 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5957 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5958 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5959 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5960 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5961 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5962 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5963 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5964 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5965 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5966 shift = 0;
5967 break;
5968 case BFD_RELOC_AARCH64_MOVW_G1:
5969 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5970 case BFD_RELOC_AARCH64_MOVW_G1_S:
5971 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5972 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5973 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5974 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5975 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5976 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5977 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5978 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5979 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5980 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5981 shift = 16;
5982 break;
5983 case BFD_RELOC_AARCH64_MOVW_G2:
5984 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5985 case BFD_RELOC_AARCH64_MOVW_G2_S:
5986 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5987 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5988 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5989 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5990 if (is32)
5991 {
5992 set_fatal_syntax_error
5993 (_("the specified relocation type is not allowed for 32-bit "
5994 "register"));
5995 return false;
5996 }
5997 shift = 32;
5998 break;
5999 case BFD_RELOC_AARCH64_MOVW_G3:
6000 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6001 if (is32)
6002 {
6003 set_fatal_syntax_error
6004 (_("the specified relocation type is not allowed for 32-bit "
6005 "register"));
6006 return false;
6007 }
6008 shift = 48;
6009 break;
6010 default:
6011 /* More cases should be added when more MOVW-related relocation types
6012 are supported in GAS. */
6013 gas_assert (aarch64_gas_internal_fixup_p ());
6014 /* The shift amount should have already been set by the parser. */
6015 return true;
6016 }
6017 inst.base.operands[1].shifter.amount = shift;
6018 return true;
6019 }
6020
6021 /* A primitive log calculator. */
6022
6023 static inline unsigned int
6024 get_logsz (unsigned int size)
6025 {
6026 const unsigned char ls[16] =
6027 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6028 if (size > 16)
6029 {
6030 gas_assert (0);
6031 return -1;
6032 }
6033 gas_assert (ls[size - 1] != (unsigned char)-1);
6034 return ls[size - 1];
6035 }
6036
6037 /* Determine and return the real reloc type code for an instruction
6038 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6039
6040 static inline bfd_reloc_code_real_type
6041 ldst_lo12_determine_real_reloc_type (void)
6042 {
6043 unsigned logsz, max_logsz;
6044 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6045 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6046
6047 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6048 {
6049 BFD_RELOC_AARCH64_LDST8_LO12,
6050 BFD_RELOC_AARCH64_LDST16_LO12,
6051 BFD_RELOC_AARCH64_LDST32_LO12,
6052 BFD_RELOC_AARCH64_LDST64_LO12,
6053 BFD_RELOC_AARCH64_LDST128_LO12
6054 },
6055 {
6056 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6057 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6058 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6059 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6060 BFD_RELOC_AARCH64_NONE
6061 },
6062 {
6063 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6064 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6065 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6066 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6067 BFD_RELOC_AARCH64_NONE
6068 },
6069 {
6070 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6071 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6072 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6073 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6074 BFD_RELOC_AARCH64_NONE
6075 },
6076 {
6077 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6078 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6079 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6080 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6081 BFD_RELOC_AARCH64_NONE
6082 }
6083 };
6084
6085 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6086 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6087 || (inst.reloc.type
6088 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6089 || (inst.reloc.type
6090 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6091 || (inst.reloc.type
6092 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6093 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6094
6095 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6096 opd1_qlf =
6097 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6098 1, opd0_qlf, 0);
6099 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6100
6101 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6102
6103 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6104 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6105 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6106 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6107 max_logsz = 3;
6108 else
6109 max_logsz = 4;
6110
6111 if (logsz > max_logsz)
6112 {
6113 /* SEE PR 27904 for an example of this. */
6114 set_fatal_syntax_error
6115 (_("relocation qualifier does not match instruction size"));
6116 return BFD_RELOC_AARCH64_NONE;
6117 }
6118
6119 /* In reloc.c, these pseudo relocation types should be defined in similar
6120 order as above reloc_ldst_lo12 array. Because the array index calculation
6121 below relies on this. */
6122 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6123 }
6124
6125 /* Check whether a register list REGINFO is valid. The registers must be
6126 numbered in increasing order (modulo 32), in increments of one or two.
6127
6128 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6129 increments of two.
6130
6131 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6132
6133 static bool
6134 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6135 {
6136 uint32_t i, nb_regs, prev_regno, incr;
6137
6138 nb_regs = 1 + (reginfo & 0x3);
6139 reginfo >>= 2;
6140 prev_regno = reginfo & 0x1f;
6141 incr = accept_alternate ? 2 : 1;
6142
6143 for (i = 1; i < nb_regs; ++i)
6144 {
6145 uint32_t curr_regno;
6146 reginfo >>= 5;
6147 curr_regno = reginfo & 0x1f;
6148 if (curr_regno != ((prev_regno + incr) & 0x1f))
6149 return false;
6150 prev_regno = curr_regno;
6151 }
6152
6153 return true;
6154 }
6155
6156 /* Generic instruction operand parser. This does no encoding and no
6157 semantic validation; it merely squirrels values away in the inst
6158 structure. Returns TRUE or FALSE depending on whether the
6159 specified grammar matched. */
6160
6161 static bool
6162 parse_operands (char *str, const aarch64_opcode *opcode)
6163 {
6164 int i;
6165 char *backtrack_pos = 0;
6166 const enum aarch64_opnd *operands = opcode->operands;
6167 aarch64_reg_type imm_reg_type;
6168
6169 clear_error ();
6170 skip_whitespace (str);
6171
6172 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6173 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6174 else
6175 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6176
6177 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6178 {
6179 int64_t val;
6180 const reg_entry *reg;
6181 int comma_skipped_p = 0;
6182 aarch64_reg_type rtype;
6183 struct vector_type_el vectype;
6184 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6185 aarch64_opnd_info *info = &inst.base.operands[i];
6186 aarch64_reg_type reg_type;
6187
6188 DEBUG_TRACE ("parse operand %d", i);
6189
6190 /* Assign the operand code. */
6191 info->type = operands[i];
6192
6193 if (optional_operand_p (opcode, i))
6194 {
6195 /* Remember where we are in case we need to backtrack. */
6196 gas_assert (!backtrack_pos);
6197 backtrack_pos = str;
6198 }
6199
6200 /* Expect comma between operands; the backtrack mechanism will take
6201 care of cases of omitted optional operand. */
6202 if (i > 0 && ! skip_past_char (&str, ','))
6203 {
6204 set_syntax_error (_("comma expected between operands"));
6205 goto failure;
6206 }
6207 else
6208 comma_skipped_p = 1;
6209
6210 switch (operands[i])
6211 {
6212 case AARCH64_OPND_Rd:
6213 case AARCH64_OPND_Rn:
6214 case AARCH64_OPND_Rm:
6215 case AARCH64_OPND_Rt:
6216 case AARCH64_OPND_Rt2:
6217 case AARCH64_OPND_Rs:
6218 case AARCH64_OPND_Ra:
6219 case AARCH64_OPND_Rt_LS64:
6220 case AARCH64_OPND_Rt_SYS:
6221 case AARCH64_OPND_PAIRREG:
6222 case AARCH64_OPND_SVE_Rm:
6223 po_int_reg_or_fail (REG_TYPE_R_Z);
6224
6225 /* In LS64 load/store instructions Rt register number must be even
6226 and <=22. */
6227 if (operands[i] == AARCH64_OPND_Rt_LS64)
6228 {
6229 /* We've already checked if this is valid register.
6230 This will check if register number (Rt) is not undefined for LS64
6231 instructions:
6232 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6233 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6234 {
6235 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6236 goto failure;
6237 }
6238 }
6239 break;
6240
6241 case AARCH64_OPND_Rd_SP:
6242 case AARCH64_OPND_Rn_SP:
6243 case AARCH64_OPND_Rt_SP:
6244 case AARCH64_OPND_SVE_Rn_SP:
6245 case AARCH64_OPND_Rm_SP:
6246 po_int_reg_or_fail (REG_TYPE_R_SP);
6247 break;
6248
6249 case AARCH64_OPND_Rm_EXT:
6250 case AARCH64_OPND_Rm_SFT:
6251 po_misc_or_fail (parse_shifter_operand
6252 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6253 ? SHIFTED_ARITH_IMM
6254 : SHIFTED_LOGIC_IMM)));
6255 if (!info->shifter.operator_present)
6256 {
6257 /* Default to LSL if not present. Libopcodes prefers shifter
6258 kind to be explicit. */
6259 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6260 info->shifter.kind = AARCH64_MOD_LSL;
6261 /* For Rm_EXT, libopcodes will carry out further check on whether
6262 or not stack pointer is used in the instruction (Recall that
6263 "the extend operator is not optional unless at least one of
6264 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6265 }
6266 break;
6267
6268 case AARCH64_OPND_Fd:
6269 case AARCH64_OPND_Fn:
6270 case AARCH64_OPND_Fm:
6271 case AARCH64_OPND_Fa:
6272 case AARCH64_OPND_Ft:
6273 case AARCH64_OPND_Ft2:
6274 case AARCH64_OPND_Sd:
6275 case AARCH64_OPND_Sn:
6276 case AARCH64_OPND_Sm:
6277 case AARCH64_OPND_SVE_VZn:
6278 case AARCH64_OPND_SVE_Vd:
6279 case AARCH64_OPND_SVE_Vm:
6280 case AARCH64_OPND_SVE_Vn:
6281 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6282 if (val == PARSE_FAIL)
6283 {
6284 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6285 goto failure;
6286 }
6287 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6288
6289 info->reg.regno = val;
6290 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6291 break;
6292
6293 case AARCH64_OPND_SVE_Pd:
6294 case AARCH64_OPND_SVE_Pg3:
6295 case AARCH64_OPND_SVE_Pg4_5:
6296 case AARCH64_OPND_SVE_Pg4_10:
6297 case AARCH64_OPND_SVE_Pg4_16:
6298 case AARCH64_OPND_SVE_Pm:
6299 case AARCH64_OPND_SVE_Pn:
6300 case AARCH64_OPND_SVE_Pt:
6301 case AARCH64_OPND_SME_Pm:
6302 reg_type = REG_TYPE_PN;
6303 goto vector_reg;
6304
6305 case AARCH64_OPND_SVE_Za_5:
6306 case AARCH64_OPND_SVE_Za_16:
6307 case AARCH64_OPND_SVE_Zd:
6308 case AARCH64_OPND_SVE_Zm_5:
6309 case AARCH64_OPND_SVE_Zm_16:
6310 case AARCH64_OPND_SVE_Zn:
6311 case AARCH64_OPND_SVE_Zt:
6312 reg_type = REG_TYPE_ZN;
6313 goto vector_reg;
6314
6315 case AARCH64_OPND_Va:
6316 case AARCH64_OPND_Vd:
6317 case AARCH64_OPND_Vn:
6318 case AARCH64_OPND_Vm:
6319 reg_type = REG_TYPE_VN;
6320 vector_reg:
6321 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6322 if (val == PARSE_FAIL)
6323 {
6324 first_error (_(get_reg_expected_msg (reg_type)));
6325 goto failure;
6326 }
6327 if (vectype.defined & NTA_HASINDEX)
6328 goto failure;
6329
6330 info->reg.regno = val;
6331 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6332 && vectype.type == NT_invtype)
6333 /* Unqualified Pn and Zn registers are allowed in certain
6334 contexts. Rely on F_STRICT qualifier checking to catch
6335 invalid uses. */
6336 info->qualifier = AARCH64_OPND_QLF_NIL;
6337 else
6338 {
6339 info->qualifier = vectype_to_qualifier (&vectype);
6340 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6341 goto failure;
6342 }
6343 break;
6344
6345 case AARCH64_OPND_VdD1:
6346 case AARCH64_OPND_VnD1:
6347 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6348 if (val == PARSE_FAIL)
6349 {
6350 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6351 goto failure;
6352 }
6353 if (vectype.type != NT_d || vectype.index != 1)
6354 {
6355 set_fatal_syntax_error
6356 (_("the top half of a 128-bit FP/SIMD register is expected"));
6357 goto failure;
6358 }
6359 info->reg.regno = val;
6360 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6361 here; it is correct for the purpose of encoding/decoding since
6362 only the register number is explicitly encoded in the related
6363 instructions, although this appears a bit hacky. */
6364 info->qualifier = AARCH64_OPND_QLF_S_D;
6365 break;
6366
6367 case AARCH64_OPND_SVE_Zm3_INDEX:
6368 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6369 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6370 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6371 case AARCH64_OPND_SVE_Zm4_INDEX:
6372 case AARCH64_OPND_SVE_Zn_INDEX:
6373 reg_type = REG_TYPE_ZN;
6374 goto vector_reg_index;
6375
6376 case AARCH64_OPND_Ed:
6377 case AARCH64_OPND_En:
6378 case AARCH64_OPND_Em:
6379 case AARCH64_OPND_Em16:
6380 case AARCH64_OPND_SM3_IMM2:
6381 reg_type = REG_TYPE_VN;
6382 vector_reg_index:
6383 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6384 if (val == PARSE_FAIL)
6385 {
6386 first_error (_(get_reg_expected_msg (reg_type)));
6387 goto failure;
6388 }
6389 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6390 goto failure;
6391
6392 info->reglane.regno = val;
6393 info->reglane.index = vectype.index;
6394 info->qualifier = vectype_to_qualifier (&vectype);
6395 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6396 goto failure;
6397 break;
6398
6399 case AARCH64_OPND_SVE_ZnxN:
6400 case AARCH64_OPND_SVE_ZtxN:
6401 reg_type = REG_TYPE_ZN;
6402 goto vector_reg_list;
6403
6404 case AARCH64_OPND_LVn:
6405 case AARCH64_OPND_LVt:
6406 case AARCH64_OPND_LVt_AL:
6407 case AARCH64_OPND_LEt:
6408 reg_type = REG_TYPE_VN;
6409 vector_reg_list:
6410 if (reg_type == REG_TYPE_ZN
6411 && get_opcode_dependent_value (opcode) == 1
6412 && *str != '{')
6413 {
6414 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6415 if (val == PARSE_FAIL)
6416 {
6417 first_error (_(get_reg_expected_msg (reg_type)));
6418 goto failure;
6419 }
6420 info->reglist.first_regno = val;
6421 info->reglist.num_regs = 1;
6422 }
6423 else
6424 {
6425 val = parse_vector_reg_list (&str, reg_type, &vectype);
6426 if (val == PARSE_FAIL)
6427 goto failure;
6428
6429 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6430 {
6431 set_fatal_syntax_error (_("invalid register list"));
6432 goto failure;
6433 }
6434
6435 if (vectype.width != 0 && *str != ',')
6436 {
6437 set_fatal_syntax_error
6438 (_("expected element type rather than vector type"));
6439 goto failure;
6440 }
6441
6442 info->reglist.first_regno = (val >> 2) & 0x1f;
6443 info->reglist.num_regs = (val & 0x3) + 1;
6444 }
6445 if (operands[i] == AARCH64_OPND_LEt)
6446 {
6447 if (!(vectype.defined & NTA_HASINDEX))
6448 goto failure;
6449 info->reglist.has_index = 1;
6450 info->reglist.index = vectype.index;
6451 }
6452 else
6453 {
6454 if (vectype.defined & NTA_HASINDEX)
6455 goto failure;
6456 if (!(vectype.defined & NTA_HASTYPE))
6457 {
6458 if (reg_type == REG_TYPE_ZN)
6459 set_fatal_syntax_error (_("missing type suffix"));
6460 goto failure;
6461 }
6462 }
6463 info->qualifier = vectype_to_qualifier (&vectype);
6464 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6465 goto failure;
6466 break;
6467
6468 case AARCH64_OPND_CRn:
6469 case AARCH64_OPND_CRm:
6470 {
6471 char prefix = *(str++);
6472 if (prefix != 'c' && prefix != 'C')
6473 goto failure;
6474
6475 po_imm_nc_or_fail ();
6476 if (val > 15)
6477 {
6478 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6479 goto failure;
6480 }
6481 info->qualifier = AARCH64_OPND_QLF_CR;
6482 info->imm.value = val;
6483 break;
6484 }
6485
6486 case AARCH64_OPND_SHLL_IMM:
6487 case AARCH64_OPND_IMM_VLSR:
6488 po_imm_or_fail (1, 64);
6489 info->imm.value = val;
6490 break;
6491
6492 case AARCH64_OPND_CCMP_IMM:
6493 case AARCH64_OPND_SIMM5:
6494 case AARCH64_OPND_FBITS:
6495 case AARCH64_OPND_TME_UIMM16:
6496 case AARCH64_OPND_UIMM4:
6497 case AARCH64_OPND_UIMM4_ADDG:
6498 case AARCH64_OPND_UIMM10:
6499 case AARCH64_OPND_UIMM3_OP1:
6500 case AARCH64_OPND_UIMM3_OP2:
6501 case AARCH64_OPND_IMM_VLSL:
6502 case AARCH64_OPND_IMM:
6503 case AARCH64_OPND_IMM_2:
6504 case AARCH64_OPND_WIDTH:
6505 case AARCH64_OPND_SVE_INV_LIMM:
6506 case AARCH64_OPND_SVE_LIMM:
6507 case AARCH64_OPND_SVE_LIMM_MOV:
6508 case AARCH64_OPND_SVE_SHLIMM_PRED:
6509 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6510 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6511 case AARCH64_OPND_SVE_SHRIMM_PRED:
6512 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6513 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6514 case AARCH64_OPND_SVE_SIMM5:
6515 case AARCH64_OPND_SVE_SIMM5B:
6516 case AARCH64_OPND_SVE_SIMM6:
6517 case AARCH64_OPND_SVE_SIMM8:
6518 case AARCH64_OPND_SVE_UIMM3:
6519 case AARCH64_OPND_SVE_UIMM7:
6520 case AARCH64_OPND_SVE_UIMM8:
6521 case AARCH64_OPND_SVE_UIMM8_53:
6522 case AARCH64_OPND_IMM_ROT1:
6523 case AARCH64_OPND_IMM_ROT2:
6524 case AARCH64_OPND_IMM_ROT3:
6525 case AARCH64_OPND_SVE_IMM_ROT1:
6526 case AARCH64_OPND_SVE_IMM_ROT2:
6527 case AARCH64_OPND_SVE_IMM_ROT3:
6528 po_imm_nc_or_fail ();
6529 info->imm.value = val;
6530 break;
6531
6532 case AARCH64_OPND_SVE_AIMM:
6533 case AARCH64_OPND_SVE_ASIMM:
6534 po_imm_nc_or_fail ();
6535 info->imm.value = val;
6536 skip_whitespace (str);
6537 if (skip_past_comma (&str))
6538 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6539 else
6540 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6541 break;
6542
6543 case AARCH64_OPND_SVE_PATTERN:
6544 po_enum_or_fail (aarch64_sve_pattern_array);
6545 info->imm.value = val;
6546 break;
6547
6548 case AARCH64_OPND_SVE_PATTERN_SCALED:
6549 po_enum_or_fail (aarch64_sve_pattern_array);
6550 info->imm.value = val;
6551 if (skip_past_comma (&str)
6552 && !parse_shift (&str, info, SHIFTED_MUL))
6553 goto failure;
6554 if (!info->shifter.operator_present)
6555 {
6556 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6557 info->shifter.kind = AARCH64_MOD_MUL;
6558 info->shifter.amount = 1;
6559 }
6560 break;
6561
6562 case AARCH64_OPND_SVE_PRFOP:
6563 po_enum_or_fail (aarch64_sve_prfop_array);
6564 info->imm.value = val;
6565 break;
6566
6567 case AARCH64_OPND_UIMM7:
6568 po_imm_or_fail (0, 127);
6569 info->imm.value = val;
6570 break;
6571
6572 case AARCH64_OPND_IDX:
6573 case AARCH64_OPND_MASK:
6574 case AARCH64_OPND_BIT_NUM:
6575 case AARCH64_OPND_IMMR:
6576 case AARCH64_OPND_IMMS:
6577 po_imm_or_fail (0, 63);
6578 info->imm.value = val;
6579 break;
6580
6581 case AARCH64_OPND_IMM0:
6582 po_imm_nc_or_fail ();
6583 if (val != 0)
6584 {
6585 set_fatal_syntax_error (_("immediate zero expected"));
6586 goto failure;
6587 }
6588 info->imm.value = 0;
6589 break;
6590
6591 case AARCH64_OPND_FPIMM0:
6592 {
6593 int qfloat;
6594 bool res1 = false, res2 = false;
6595 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6596 it is probably not worth the effort to support it. */
6597 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6598 imm_reg_type))
6599 && (error_p ()
6600 || !(res2 = parse_constant_immediate (&str, &val,
6601 imm_reg_type))))
6602 goto failure;
6603 if ((res1 && qfloat == 0) || (res2 && val == 0))
6604 {
6605 info->imm.value = 0;
6606 info->imm.is_fp = 1;
6607 break;
6608 }
6609 set_fatal_syntax_error (_("immediate zero expected"));
6610 goto failure;
6611 }
6612
6613 case AARCH64_OPND_IMM_MOV:
6614 {
6615 char *saved = str;
6616 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6617 reg_name_p (str, REG_TYPE_VN))
6618 goto failure;
6619 str = saved;
6620 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6621 GE_OPT_PREFIX, REJECT_ABSENT,
6622 NORMAL_RESOLUTION));
6623 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6624 later. fix_mov_imm_insn will try to determine a machine
6625 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6626 message if the immediate cannot be moved by a single
6627 instruction. */
6628 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6629 inst.base.operands[i].skip = 1;
6630 }
6631 break;
6632
6633 case AARCH64_OPND_SIMD_IMM:
6634 case AARCH64_OPND_SIMD_IMM_SFT:
6635 if (! parse_big_immediate (&str, &val, imm_reg_type))
6636 goto failure;
6637 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6638 /* addr_off_p */ 0,
6639 /* need_libopcodes_p */ 1,
6640 /* skip_p */ 1);
6641 /* Parse shift.
6642 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6643 shift, we don't check it here; we leave the checking to
6644 the libopcodes (operand_general_constraint_met_p). By
6645 doing this, we achieve better diagnostics. */
6646 if (skip_past_comma (&str)
6647 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6648 goto failure;
6649 if (!info->shifter.operator_present
6650 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6651 {
6652 /* Default to LSL if not present. Libopcodes prefers shifter
6653 kind to be explicit. */
6654 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6655 info->shifter.kind = AARCH64_MOD_LSL;
6656 }
6657 break;
6658
6659 case AARCH64_OPND_FPIMM:
6660 case AARCH64_OPND_SIMD_FPIMM:
6661 case AARCH64_OPND_SVE_FPIMM8:
6662 {
6663 int qfloat;
6664 bool dp_p;
6665
6666 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6667 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6668 || !aarch64_imm_float_p (qfloat))
6669 {
6670 if (!error_p ())
6671 set_fatal_syntax_error (_("invalid floating-point"
6672 " constant"));
6673 goto failure;
6674 }
6675 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6676 inst.base.operands[i].imm.is_fp = 1;
6677 }
6678 break;
6679
6680 case AARCH64_OPND_SVE_I1_HALF_ONE:
6681 case AARCH64_OPND_SVE_I1_HALF_TWO:
6682 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6683 {
6684 int qfloat;
6685 bool dp_p;
6686
6687 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6688 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6689 {
6690 if (!error_p ())
6691 set_fatal_syntax_error (_("invalid floating-point"
6692 " constant"));
6693 goto failure;
6694 }
6695 inst.base.operands[i].imm.value = qfloat;
6696 inst.base.operands[i].imm.is_fp = 1;
6697 }
6698 break;
6699
6700 case AARCH64_OPND_LIMM:
6701 po_misc_or_fail (parse_shifter_operand (&str, info,
6702 SHIFTED_LOGIC_IMM));
6703 if (info->shifter.operator_present)
6704 {
6705 set_fatal_syntax_error
6706 (_("shift not allowed for bitmask immediate"));
6707 goto failure;
6708 }
6709 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6710 /* addr_off_p */ 0,
6711 /* need_libopcodes_p */ 1,
6712 /* skip_p */ 1);
6713 break;
6714
6715 case AARCH64_OPND_AIMM:
6716 if (opcode->op == OP_ADD)
6717 /* ADD may have relocation types. */
6718 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6719 SHIFTED_ARITH_IMM));
6720 else
6721 po_misc_or_fail (parse_shifter_operand (&str, info,
6722 SHIFTED_ARITH_IMM));
6723 switch (inst.reloc.type)
6724 {
6725 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6726 info->shifter.amount = 12;
6727 break;
6728 case BFD_RELOC_UNUSED:
6729 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6730 if (info->shifter.kind != AARCH64_MOD_NONE)
6731 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6732 inst.reloc.pc_rel = 0;
6733 break;
6734 default:
6735 break;
6736 }
6737 info->imm.value = 0;
6738 if (!info->shifter.operator_present)
6739 {
6740 /* Default to LSL if not present. Libopcodes prefers shifter
6741 kind to be explicit. */
6742 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6743 info->shifter.kind = AARCH64_MOD_LSL;
6744 }
6745 break;
6746
6747 case AARCH64_OPND_HALF:
6748 {
6749 /* #<imm16> or relocation. */
6750 int internal_fixup_p;
6751 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6752 if (internal_fixup_p)
6753 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6754 skip_whitespace (str);
6755 if (skip_past_comma (&str))
6756 {
6757 /* {, LSL #<shift>} */
6758 if (! aarch64_gas_internal_fixup_p ())
6759 {
6760 set_fatal_syntax_error (_("can't mix relocation modifier "
6761 "with explicit shift"));
6762 goto failure;
6763 }
6764 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6765 }
6766 else
6767 inst.base.operands[i].shifter.amount = 0;
6768 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6769 inst.base.operands[i].imm.value = 0;
6770 if (! process_movw_reloc_info ())
6771 goto failure;
6772 }
6773 break;
6774
6775 case AARCH64_OPND_EXCEPTION:
6776 case AARCH64_OPND_UNDEFINED:
6777 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6778 imm_reg_type));
6779 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6780 /* addr_off_p */ 0,
6781 /* need_libopcodes_p */ 0,
6782 /* skip_p */ 1);
6783 break;
6784
6785 case AARCH64_OPND_NZCV:
6786 {
6787 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6788 if (nzcv != NULL)
6789 {
6790 str += 4;
6791 info->imm.value = nzcv->value;
6792 break;
6793 }
6794 po_imm_or_fail (0, 15);
6795 info->imm.value = val;
6796 }
6797 break;
6798
6799 case AARCH64_OPND_COND:
6800 case AARCH64_OPND_COND1:
6801 {
6802 char *start = str;
6803 do
6804 str++;
6805 while (ISALPHA (*str));
6806 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6807 if (info->cond == NULL)
6808 {
6809 set_syntax_error (_("invalid condition"));
6810 goto failure;
6811 }
6812 else if (operands[i] == AARCH64_OPND_COND1
6813 && (info->cond->value & 0xe) == 0xe)
6814 {
6815 /* Do not allow AL or NV. */
6816 set_default_error ();
6817 goto failure;
6818 }
6819 }
6820 break;
6821
6822 case AARCH64_OPND_ADDR_ADRP:
6823 po_misc_or_fail (parse_adrp (&str));
6824 /* Clear the value as operand needs to be relocated. */
6825 info->imm.value = 0;
6826 break;
6827
6828 case AARCH64_OPND_ADDR_PCREL14:
6829 case AARCH64_OPND_ADDR_PCREL19:
6830 case AARCH64_OPND_ADDR_PCREL21:
6831 case AARCH64_OPND_ADDR_PCREL26:
6832 po_misc_or_fail (parse_address (&str, info));
6833 if (!info->addr.pcrel)
6834 {
6835 set_syntax_error (_("invalid pc-relative address"));
6836 goto failure;
6837 }
6838 if (inst.gen_lit_pool
6839 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6840 {
6841 /* Only permit "=value" in the literal load instructions.
6842 The literal will be generated by programmer_friendly_fixup. */
6843 set_syntax_error (_("invalid use of \"=immediate\""));
6844 goto failure;
6845 }
6846 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6847 {
6848 set_syntax_error (_("unrecognized relocation suffix"));
6849 goto failure;
6850 }
6851 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6852 {
6853 info->imm.value = inst.reloc.exp.X_add_number;
6854 inst.reloc.type = BFD_RELOC_UNUSED;
6855 }
6856 else
6857 {
6858 info->imm.value = 0;
6859 if (inst.reloc.type == BFD_RELOC_UNUSED)
6860 switch (opcode->iclass)
6861 {
6862 case compbranch:
6863 case condbranch:
6864 /* e.g. CBZ or B.COND */
6865 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6866 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6867 break;
6868 case testbranch:
6869 /* e.g. TBZ */
6870 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6871 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6872 break;
6873 case branch_imm:
6874 /* e.g. B or BL */
6875 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6876 inst.reloc.type =
6877 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6878 : BFD_RELOC_AARCH64_JUMP26;
6879 break;
6880 case loadlit:
6881 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6882 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6883 break;
6884 case pcreladdr:
6885 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6886 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6887 break;
6888 default:
6889 gas_assert (0);
6890 abort ();
6891 }
6892 inst.reloc.pc_rel = 1;
6893 }
6894 break;
6895
6896 case AARCH64_OPND_ADDR_SIMPLE:
6897 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6898 {
6899 /* [<Xn|SP>{, #<simm>}] */
6900 char *start = str;
6901 /* First use the normal address-parsing routines, to get
6902 the usual syntax errors. */
6903 po_misc_or_fail (parse_address (&str, info));
6904 if (info->addr.pcrel || info->addr.offset.is_reg
6905 || !info->addr.preind || info->addr.postind
6906 || info->addr.writeback)
6907 {
6908 set_syntax_error (_("invalid addressing mode"));
6909 goto failure;
6910 }
6911
6912 /* Then retry, matching the specific syntax of these addresses. */
6913 str = start;
6914 po_char_or_fail ('[');
6915 po_reg_or_fail (REG_TYPE_R64_SP);
6916 /* Accept optional ", #0". */
6917 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6918 && skip_past_char (&str, ','))
6919 {
6920 skip_past_char (&str, '#');
6921 if (! skip_past_char (&str, '0'))
6922 {
6923 set_fatal_syntax_error
6924 (_("the optional immediate offset can only be 0"));
6925 goto failure;
6926 }
6927 }
6928 po_char_or_fail (']');
6929 break;
6930 }
6931
6932 case AARCH64_OPND_ADDR_REGOFF:
6933 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6934 po_misc_or_fail (parse_address (&str, info));
6935 regoff_addr:
6936 if (info->addr.pcrel || !info->addr.offset.is_reg
6937 || !info->addr.preind || info->addr.postind
6938 || info->addr.writeback)
6939 {
6940 set_syntax_error (_("invalid addressing mode"));
6941 goto failure;
6942 }
6943 if (!info->shifter.operator_present)
6944 {
6945 /* Default to LSL if not present. Libopcodes prefers shifter
6946 kind to be explicit. */
6947 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6948 info->shifter.kind = AARCH64_MOD_LSL;
6949 }
6950 /* Qualifier to be deduced by libopcodes. */
6951 break;
6952
6953 case AARCH64_OPND_ADDR_SIMM7:
6954 po_misc_or_fail (parse_address (&str, info));
6955 if (info->addr.pcrel || info->addr.offset.is_reg
6956 || (!info->addr.preind && !info->addr.postind))
6957 {
6958 set_syntax_error (_("invalid addressing mode"));
6959 goto failure;
6960 }
6961 if (inst.reloc.type != BFD_RELOC_UNUSED)
6962 {
6963 set_syntax_error (_("relocation not allowed"));
6964 goto failure;
6965 }
6966 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6967 /* addr_off_p */ 1,
6968 /* need_libopcodes_p */ 1,
6969 /* skip_p */ 0);
6970 break;
6971
6972 case AARCH64_OPND_ADDR_SIMM9:
6973 case AARCH64_OPND_ADDR_SIMM9_2:
6974 case AARCH64_OPND_ADDR_SIMM11:
6975 case AARCH64_OPND_ADDR_SIMM13:
6976 po_misc_or_fail (parse_address (&str, info));
6977 if (info->addr.pcrel || info->addr.offset.is_reg
6978 || (!info->addr.preind && !info->addr.postind)
6979 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6980 && info->addr.writeback))
6981 {
6982 set_syntax_error (_("invalid addressing mode"));
6983 goto failure;
6984 }
6985 if (inst.reloc.type != BFD_RELOC_UNUSED)
6986 {
6987 set_syntax_error (_("relocation not allowed"));
6988 goto failure;
6989 }
6990 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6991 /* addr_off_p */ 1,
6992 /* need_libopcodes_p */ 1,
6993 /* skip_p */ 0);
6994 break;
6995
6996 case AARCH64_OPND_ADDR_SIMM10:
6997 case AARCH64_OPND_ADDR_OFFSET:
6998 po_misc_or_fail (parse_address (&str, info));
6999 if (info->addr.pcrel || info->addr.offset.is_reg
7000 || !info->addr.preind || info->addr.postind)
7001 {
7002 set_syntax_error (_("invalid addressing mode"));
7003 goto failure;
7004 }
7005 if (inst.reloc.type != BFD_RELOC_UNUSED)
7006 {
7007 set_syntax_error (_("relocation not allowed"));
7008 goto failure;
7009 }
7010 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7011 /* addr_off_p */ 1,
7012 /* need_libopcodes_p */ 1,
7013 /* skip_p */ 0);
7014 break;
7015
7016 case AARCH64_OPND_ADDR_UIMM12:
7017 po_misc_or_fail (parse_address (&str, info));
7018 if (info->addr.pcrel || info->addr.offset.is_reg
7019 || !info->addr.preind || info->addr.writeback)
7020 {
7021 set_syntax_error (_("invalid addressing mode"));
7022 goto failure;
7023 }
7024 if (inst.reloc.type == BFD_RELOC_UNUSED)
7025 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7026 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7027 || (inst.reloc.type
7028 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7029 || (inst.reloc.type
7030 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7031 || (inst.reloc.type
7032 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7033 || (inst.reloc.type
7034 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7035 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7036 /* Leave qualifier to be determined by libopcodes. */
7037 break;
7038
7039 case AARCH64_OPND_SIMD_ADDR_POST:
7040 /* [<Xn|SP>], <Xm|#<amount>> */
7041 po_misc_or_fail (parse_address (&str, info));
7042 if (!info->addr.postind || !info->addr.writeback)
7043 {
7044 set_syntax_error (_("invalid addressing mode"));
7045 goto failure;
7046 }
7047 if (!info->addr.offset.is_reg)
7048 {
7049 if (inst.reloc.exp.X_op == O_constant)
7050 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7051 else
7052 {
7053 set_fatal_syntax_error
7054 (_("writeback value must be an immediate constant"));
7055 goto failure;
7056 }
7057 }
7058 /* No qualifier. */
7059 break;
7060
7061 case AARCH64_OPND_SME_SM_ZA:
7062 /* { SM | ZA } */
7063 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7064 {
7065 set_syntax_error (_("unknown or missing PSTATE field name"));
7066 goto failure;
7067 }
7068 info->reg.regno = val;
7069 break;
7070
7071 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7072 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7073 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7074 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7075 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7076 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7077 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7078 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7079 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7080 case AARCH64_OPND_SVE_ADDR_RI_U6:
7081 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7082 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7083 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7084 /* [X<n>{, #imm, MUL VL}]
7085 [X<n>{, #imm}]
7086 but recognizing SVE registers. */
7087 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7088 &offset_qualifier));
7089 if (base_qualifier != AARCH64_OPND_QLF_X)
7090 {
7091 set_syntax_error (_("invalid addressing mode"));
7092 goto failure;
7093 }
7094 sve_regimm:
7095 if (info->addr.pcrel || info->addr.offset.is_reg
7096 || !info->addr.preind || info->addr.writeback)
7097 {
7098 set_syntax_error (_("invalid addressing mode"));
7099 goto failure;
7100 }
7101 if (inst.reloc.type != BFD_RELOC_UNUSED
7102 || inst.reloc.exp.X_op != O_constant)
7103 {
7104 /* Make sure this has priority over
7105 "invalid addressing mode". */
7106 set_fatal_syntax_error (_("constant offset required"));
7107 goto failure;
7108 }
7109 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7110 break;
7111
7112 case AARCH64_OPND_SVE_ADDR_R:
7113 /* [<Xn|SP>{, <R><m>}]
7114 but recognizing SVE registers. */
7115 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7116 &offset_qualifier));
7117 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7118 {
7119 offset_qualifier = AARCH64_OPND_QLF_X;
7120 info->addr.offset.is_reg = 1;
7121 info->addr.offset.regno = 31;
7122 }
7123 else if (base_qualifier != AARCH64_OPND_QLF_X
7124 || offset_qualifier != AARCH64_OPND_QLF_X)
7125 {
7126 set_syntax_error (_("invalid addressing mode"));
7127 goto failure;
7128 }
7129 goto regoff_addr;
7130
7131 case AARCH64_OPND_SVE_ADDR_RR:
7132 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7133 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7134 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7135 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7136 case AARCH64_OPND_SVE_ADDR_RX:
7137 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7138 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7139 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7140 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7141 but recognizing SVE registers. */
7142 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7143 &offset_qualifier));
7144 if (base_qualifier != AARCH64_OPND_QLF_X
7145 || offset_qualifier != AARCH64_OPND_QLF_X)
7146 {
7147 set_syntax_error (_("invalid addressing mode"));
7148 goto failure;
7149 }
7150 goto regoff_addr;
7151
7152 case AARCH64_OPND_SVE_ADDR_RZ:
7153 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7154 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7155 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7156 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7157 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7158 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7159 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7160 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7161 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7162 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7163 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7164 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7165 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7166 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7167 &offset_qualifier));
7168 if (base_qualifier != AARCH64_OPND_QLF_X
7169 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7170 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7171 {
7172 set_syntax_error (_("invalid addressing mode"));
7173 goto failure;
7174 }
7175 info->qualifier = offset_qualifier;
7176 goto regoff_addr;
7177
7178 case AARCH64_OPND_SVE_ADDR_ZX:
7179 /* [Zn.<T>{, <Xm>}]. */
7180 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7181 &offset_qualifier));
7182 /* Things to check:
7183 base_qualifier either S_S or S_D
7184 offset_qualifier must be X
7185 */
7186 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7187 && base_qualifier != AARCH64_OPND_QLF_S_D)
7188 || offset_qualifier != AARCH64_OPND_QLF_X)
7189 {
7190 set_syntax_error (_("invalid addressing mode"));
7191 goto failure;
7192 }
7193 info->qualifier = base_qualifier;
7194 if (!info->addr.offset.is_reg || info->addr.pcrel
7195 || !info->addr.preind || info->addr.writeback
7196 || info->shifter.operator_present != 0)
7197 {
7198 set_syntax_error (_("invalid addressing mode"));
7199 goto failure;
7200 }
7201 info->shifter.kind = AARCH64_MOD_LSL;
7202 break;
7203
7204
7205 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7206 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7207 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7208 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7209 /* [Z<n>.<T>{, #imm}] */
7210 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7211 &offset_qualifier));
7212 if (base_qualifier != AARCH64_OPND_QLF_S_S
7213 && base_qualifier != AARCH64_OPND_QLF_S_D)
7214 {
7215 set_syntax_error (_("invalid addressing mode"));
7216 goto failure;
7217 }
7218 info->qualifier = base_qualifier;
7219 goto sve_regimm;
7220
7221 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7222 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7223 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7224 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7225 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7226
7227 We don't reject:
7228
7229 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7230
7231 here since we get better error messages by leaving it to
7232 the qualifier checking routines. */
7233 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7234 &offset_qualifier));
7235 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7236 && base_qualifier != AARCH64_OPND_QLF_S_D)
7237 || offset_qualifier != base_qualifier)
7238 {
7239 set_syntax_error (_("invalid addressing mode"));
7240 goto failure;
7241 }
7242 info->qualifier = base_qualifier;
7243 goto regoff_addr;
7244
7245 case AARCH64_OPND_SYSREG:
7246 {
7247 uint32_t sysreg_flags;
7248 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7249 &sysreg_flags)) == PARSE_FAIL)
7250 {
7251 set_syntax_error (_("unknown or missing system register name"));
7252 goto failure;
7253 }
7254 inst.base.operands[i].sysreg.value = val;
7255 inst.base.operands[i].sysreg.flags = sysreg_flags;
7256 break;
7257 }
7258
7259 case AARCH64_OPND_PSTATEFIELD:
7260 {
7261 uint32_t sysreg_flags;
7262 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7263 &sysreg_flags)) == PARSE_FAIL)
7264 {
7265 set_syntax_error (_("unknown or missing PSTATE field name"));
7266 goto failure;
7267 }
7268 inst.base.operands[i].pstatefield = val;
7269 inst.base.operands[i].sysreg.flags = sysreg_flags;
7270 break;
7271 }
7272
7273 case AARCH64_OPND_SYSREG_IC:
7274 inst.base.operands[i].sysins_op =
7275 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7276 goto sys_reg_ins;
7277
7278 case AARCH64_OPND_SYSREG_DC:
7279 inst.base.operands[i].sysins_op =
7280 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7281 goto sys_reg_ins;
7282
7283 case AARCH64_OPND_SYSREG_AT:
7284 inst.base.operands[i].sysins_op =
7285 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7286 goto sys_reg_ins;
7287
7288 case AARCH64_OPND_SYSREG_SR:
7289 inst.base.operands[i].sysins_op =
7290 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7291 goto sys_reg_ins;
7292
7293 case AARCH64_OPND_SYSREG_TLBI:
7294 inst.base.operands[i].sysins_op =
7295 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7296 sys_reg_ins:
7297 if (inst.base.operands[i].sysins_op == NULL)
7298 {
7299 set_fatal_syntax_error ( _("unknown or missing operation name"));
7300 goto failure;
7301 }
7302 break;
7303
7304 case AARCH64_OPND_BARRIER:
7305 case AARCH64_OPND_BARRIER_ISB:
7306 val = parse_barrier (&str);
7307 if (val != PARSE_FAIL
7308 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7309 {
7310 /* ISB only accepts options name 'sy'. */
7311 set_syntax_error
7312 (_("the specified option is not accepted in ISB"));
7313 /* Turn off backtrack as this optional operand is present. */
7314 backtrack_pos = 0;
7315 goto failure;
7316 }
7317 if (val != PARSE_FAIL
7318 && operands[i] == AARCH64_OPND_BARRIER)
7319 {
7320 /* Regular barriers accept options CRm (C0-C15).
7321 DSB nXS barrier variant accepts values > 15. */
7322 if (val < 0 || val > 15)
7323 {
7324 set_syntax_error (_("the specified option is not accepted in DSB"));
7325 goto failure;
7326 }
7327 }
7328 /* This is an extension to accept a 0..15 immediate. */
7329 if (val == PARSE_FAIL)
7330 po_imm_or_fail (0, 15);
7331 info->barrier = aarch64_barrier_options + val;
7332 break;
7333
7334 case AARCH64_OPND_BARRIER_DSB_NXS:
7335 val = parse_barrier (&str);
7336 if (val != PARSE_FAIL)
7337 {
7338 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7339 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7340 {
7341 set_syntax_error (_("the specified option is not accepted in DSB"));
7342 /* Turn off backtrack as this optional operand is present. */
7343 backtrack_pos = 0;
7344 goto failure;
7345 }
7346 }
7347 else
7348 {
7349 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7350 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7351 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7352 goto failure;
7353 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7354 {
7355 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7356 goto failure;
7357 }
7358 }
7359 /* Option index is encoded as 2-bit value in val<3:2>. */
7360 val = (val >> 2) - 4;
7361 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7362 break;
7363
7364 case AARCH64_OPND_PRFOP:
7365 val = parse_pldop (&str);
7366 /* This is an extension to accept a 0..31 immediate. */
7367 if (val == PARSE_FAIL)
7368 po_imm_or_fail (0, 31);
7369 inst.base.operands[i].prfop = aarch64_prfops + val;
7370 break;
7371
7372 case AARCH64_OPND_BARRIER_PSB:
7373 val = parse_barrier_psb (&str, &(info->hint_option));
7374 if (val == PARSE_FAIL)
7375 goto failure;
7376 break;
7377
7378 case AARCH64_OPND_BTI_TARGET:
7379 val = parse_bti_operand (&str, &(info->hint_option));
7380 if (val == PARSE_FAIL)
7381 goto failure;
7382 break;
7383
7384 case AARCH64_OPND_SME_ZAda_2b:
7385 case AARCH64_OPND_SME_ZAda_3b:
7386 val = parse_sme_zada_operand (&str, &qualifier);
7387 if (val == PARSE_FAIL)
7388 goto failure;
7389 info->reg.regno = val;
7390 info->qualifier = qualifier;
7391 break;
7392
7393 case AARCH64_OPND_SME_ZA_HV_idx_src:
7394 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7395 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7396 {
7397 enum sme_hv_slice slice_indicator;
7398 int vector_select_register;
7399 int imm;
7400
7401 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7402 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7403 &slice_indicator,
7404 &vector_select_register,
7405 &imm,
7406 &qualifier);
7407 else
7408 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7409 &vector_select_register,
7410 &imm,
7411 &qualifier);
7412 if (val == PARSE_FAIL)
7413 goto failure;
7414 info->za_tile_vector.regno = val;
7415 info->za_tile_vector.index.regno = vector_select_register;
7416 info->za_tile_vector.index.imm = imm;
7417 info->za_tile_vector.v = slice_indicator;
7418 info->qualifier = qualifier;
7419 break;
7420 }
7421
7422 case AARCH64_OPND_SME_list_of_64bit_tiles:
7423 val = parse_sme_list_of_64bit_tiles (&str);
7424 if (val == PARSE_FAIL)
7425 goto failure;
7426 info->imm.value = val;
7427 break;
7428
7429 case AARCH64_OPND_SME_ZA_array:
7430 {
7431 int imm;
7432 val = parse_sme_za_array (&str, &imm);
7433 if (val == PARSE_FAIL)
7434 goto failure;
7435 info->za_tile_vector.index.regno = val;
7436 info->za_tile_vector.index.imm = imm;
7437 break;
7438 }
7439
7440 default:
7441 as_fatal (_("unhandled operand code %d"), operands[i]);
7442 }
7443
7444 /* If we get here, this operand was successfully parsed. */
7445 inst.base.operands[i].present = 1;
7446 continue;
7447
7448 failure:
7449 /* The parse routine should already have set the error, but in case
7450 not, set a default one here. */
7451 if (! error_p ())
7452 set_default_error ();
7453
7454 if (! backtrack_pos)
7455 goto parse_operands_return;
7456
7457 {
7458 /* We reach here because this operand is marked as optional, and
7459 either no operand was supplied or the operand was supplied but it
7460 was syntactically incorrect. In the latter case we report an
7461 error. In the former case we perform a few more checks before
7462 dropping through to the code to insert the default operand. */
7463
7464 char *tmp = backtrack_pos;
7465 char endchar = END_OF_INSN;
7466
7467 if (i != (aarch64_num_of_operands (opcode) - 1))
7468 endchar = ',';
7469 skip_past_char (&tmp, ',');
7470
7471 if (*tmp != endchar)
7472 /* The user has supplied an operand in the wrong format. */
7473 goto parse_operands_return;
7474
7475 /* Make sure there is not a comma before the optional operand.
7476 For example the fifth operand of 'sys' is optional:
7477
7478 sys #0,c0,c0,#0, <--- wrong
7479 sys #0,c0,c0,#0 <--- correct. */
7480 if (comma_skipped_p && i && endchar == END_OF_INSN)
7481 {
7482 set_fatal_syntax_error
7483 (_("unexpected comma before the omitted optional operand"));
7484 goto parse_operands_return;
7485 }
7486 }
7487
7488 /* Reaching here means we are dealing with an optional operand that is
7489 omitted from the assembly line. */
7490 gas_assert (optional_operand_p (opcode, i));
7491 info->present = 0;
7492 process_omitted_operand (operands[i], opcode, i, info);
7493
7494 /* Try again, skipping the optional operand at backtrack_pos. */
7495 str = backtrack_pos;
7496 backtrack_pos = 0;
7497
7498 /* Clear any error record after the omitted optional operand has been
7499 successfully handled. */
7500 clear_error ();
7501 }
7502
7503 /* Check if we have parsed all the operands. */
7504 if (*str != '\0' && ! error_p ())
7505 {
7506 /* Set I to the index of the last present operand; this is
7507 for the purpose of diagnostics. */
7508 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7509 ;
7510 set_fatal_syntax_error
7511 (_("unexpected characters following instruction"));
7512 }
7513
7514 parse_operands_return:
7515
7516 if (error_p ())
7517 {
7518 DEBUG_TRACE ("parsing FAIL: %s - %s",
7519 operand_mismatch_kind_names[get_error_kind ()],
7520 get_error_message ());
7521 /* Record the operand error properly; this is useful when there
7522 are multiple instruction templates for a mnemonic name, so that
7523 later on, we can select the error that most closely describes
7524 the problem. */
7525 record_operand_error (opcode, i, get_error_kind (),
7526 get_error_message ());
7527 return false;
7528 }
7529 else
7530 {
7531 DEBUG_TRACE ("parsing SUCCESS");
7532 return true;
7533 }
7534 }
7535
7536 /* It does some fix-up to provide some programmer friendly feature while
7537 keeping the libopcodes happy, i.e. libopcodes only accepts
7538 the preferred architectural syntax.
7539 Return FALSE if there is any failure; otherwise return TRUE. */
7540
7541 static bool
7542 programmer_friendly_fixup (aarch64_instruction *instr)
7543 {
7544 aarch64_inst *base = &instr->base;
7545 const aarch64_opcode *opcode = base->opcode;
7546 enum aarch64_op op = opcode->op;
7547 aarch64_opnd_info *operands = base->operands;
7548
7549 DEBUG_TRACE ("enter");
7550
7551 switch (opcode->iclass)
7552 {
7553 case testbranch:
7554 /* TBNZ Xn|Wn, #uimm6, label
7555 Test and Branch Not Zero: conditionally jumps to label if bit number
7556 uimm6 in register Xn is not zero. The bit number implies the width of
7557 the register, which may be written and should be disassembled as Wn if
7558 uimm is less than 32. */
7559 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7560 {
7561 if (operands[1].imm.value >= 32)
7562 {
7563 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7564 0, 31);
7565 return false;
7566 }
7567 operands[0].qualifier = AARCH64_OPND_QLF_X;
7568 }
7569 break;
7570 case loadlit:
7571 /* LDR Wt, label | =value
7572 As a convenience assemblers will typically permit the notation
7573 "=value" in conjunction with the pc-relative literal load instructions
7574 to automatically place an immediate value or symbolic address in a
7575 nearby literal pool and generate a hidden label which references it.
7576 ISREG has been set to 0 in the case of =value. */
7577 if (instr->gen_lit_pool
7578 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7579 {
7580 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7581 if (op == OP_LDRSW_LIT)
7582 size = 4;
7583 if (instr->reloc.exp.X_op != O_constant
7584 && instr->reloc.exp.X_op != O_big
7585 && instr->reloc.exp.X_op != O_symbol)
7586 {
7587 record_operand_error (opcode, 1,
7588 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7589 _("constant expression expected"));
7590 return false;
7591 }
7592 if (! add_to_lit_pool (&instr->reloc.exp, size))
7593 {
7594 record_operand_error (opcode, 1,
7595 AARCH64_OPDE_OTHER_ERROR,
7596 _("literal pool insertion failed"));
7597 return false;
7598 }
7599 }
7600 break;
7601 case log_shift:
7602 case bitfield:
7603 /* UXT[BHW] Wd, Wn
7604 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7605 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7606 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7607 A programmer-friendly assembler should accept a destination Xd in
7608 place of Wd, however that is not the preferred form for disassembly.
7609 */
7610 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7611 && operands[1].qualifier == AARCH64_OPND_QLF_W
7612 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7613 operands[0].qualifier = AARCH64_OPND_QLF_W;
7614 break;
7615
7616 case addsub_ext:
7617 {
7618 /* In the 64-bit form, the final register operand is written as Wm
7619 for all but the (possibly omitted) UXTX/LSL and SXTX
7620 operators.
7621 As a programmer-friendly assembler, we accept e.g.
7622 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7623 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7624 int idx = aarch64_operand_index (opcode->operands,
7625 AARCH64_OPND_Rm_EXT);
7626 gas_assert (idx == 1 || idx == 2);
7627 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7628 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7629 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7630 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7631 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7632 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7633 }
7634 break;
7635
7636 default:
7637 break;
7638 }
7639
7640 DEBUG_TRACE ("exit with SUCCESS");
7641 return true;
7642 }
7643
7644 /* Check for loads and stores that will cause unpredictable behavior. */
7645
7646 static void
7647 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7648 {
7649 aarch64_inst *base = &instr->base;
7650 const aarch64_opcode *opcode = base->opcode;
7651 const aarch64_opnd_info *opnds = base->operands;
7652 switch (opcode->iclass)
7653 {
7654 case ldst_pos:
7655 case ldst_imm9:
7656 case ldst_imm10:
7657 case ldst_unscaled:
7658 case ldst_unpriv:
7659 /* Loading/storing the base register is unpredictable if writeback. */
7660 if ((aarch64_get_operand_class (opnds[0].type)
7661 == AARCH64_OPND_CLASS_INT_REG)
7662 && opnds[0].reg.regno == opnds[1].addr.base_regno
7663 && opnds[1].addr.base_regno != REG_SP
7664 /* Exempt STG/STZG/ST2G/STZ2G. */
7665 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7666 && opnds[1].addr.writeback)
7667 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7668 break;
7669
7670 case ldstpair_off:
7671 case ldstnapair_offs:
7672 case ldstpair_indexed:
7673 /* Loading/storing the base register is unpredictable if writeback. */
7674 if ((aarch64_get_operand_class (opnds[0].type)
7675 == AARCH64_OPND_CLASS_INT_REG)
7676 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7677 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7678 && opnds[2].addr.base_regno != REG_SP
7679 /* Exempt STGP. */
7680 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7681 && opnds[2].addr.writeback)
7682 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7683 /* Load operations must load different registers. */
7684 if ((opcode->opcode & (1 << 22))
7685 && opnds[0].reg.regno == opnds[1].reg.regno)
7686 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7687 break;
7688
7689 case ldstexcl:
7690 if ((aarch64_get_operand_class (opnds[0].type)
7691 == AARCH64_OPND_CLASS_INT_REG)
7692 && (aarch64_get_operand_class (opnds[1].type)
7693 == AARCH64_OPND_CLASS_INT_REG))
7694 {
7695 if ((opcode->opcode & (1 << 22)))
7696 {
7697 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7698 if ((opcode->opcode & (1 << 21))
7699 && opnds[0].reg.regno == opnds[1].reg.regno)
7700 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7701 }
7702 else
7703 {
7704 /* Store-Exclusive is unpredictable if Rt == Rs. */
7705 if (opnds[0].reg.regno == opnds[1].reg.regno)
7706 as_warn
7707 (_("unpredictable: identical transfer and status registers"
7708 " --`%s'"),str);
7709
7710 if (opnds[0].reg.regno == opnds[2].reg.regno)
7711 {
7712 if (!(opcode->opcode & (1 << 21)))
7713 /* Store-Exclusive is unpredictable if Rn == Rs. */
7714 as_warn
7715 (_("unpredictable: identical base and status registers"
7716 " --`%s'"),str);
7717 else
7718 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7719 as_warn
7720 (_("unpredictable: "
7721 "identical transfer and status registers"
7722 " --`%s'"),str);
7723 }
7724
7725 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7726 if ((opcode->opcode & (1 << 21))
7727 && opnds[0].reg.regno == opnds[3].reg.regno
7728 && opnds[3].reg.regno != REG_SP)
7729 as_warn (_("unpredictable: identical base and status registers"
7730 " --`%s'"),str);
7731 }
7732 }
7733 break;
7734
7735 default:
7736 break;
7737 }
7738 }
7739
7740 static void
7741 force_automatic_sequence_close (void)
7742 {
7743 if (now_instr_sequence.instr)
7744 {
7745 as_warn (_("previous `%s' sequence has not been closed"),
7746 now_instr_sequence.instr->opcode->name);
7747 init_insn_sequence (NULL, &now_instr_sequence);
7748 }
7749 }
7750
7751 /* A wrapper function to interface with libopcodes on encoding and
7752 record the error message if there is any.
7753
7754 Return TRUE on success; otherwise return FALSE. */
7755
7756 static bool
7757 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7758 aarch64_insn *code)
7759 {
7760 aarch64_operand_error error_info;
7761 memset (&error_info, '\0', sizeof (error_info));
7762 error_info.kind = AARCH64_OPDE_NIL;
7763 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7764 && !error_info.non_fatal)
7765 return true;
7766
7767 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7768 record_operand_error_info (opcode, &error_info);
7769 return error_info.non_fatal;
7770 }
7771
7772 #ifdef DEBUG_AARCH64
7773 static inline void
7774 dump_opcode_operands (const aarch64_opcode *opcode)
7775 {
7776 int i = 0;
7777 while (opcode->operands[i] != AARCH64_OPND_NIL)
7778 {
7779 aarch64_verbose ("\t\t opnd%d: %s", i,
7780 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7781 ? aarch64_get_operand_name (opcode->operands[i])
7782 : aarch64_get_operand_desc (opcode->operands[i]));
7783 ++i;
7784 }
7785 }
7786 #endif /* DEBUG_AARCH64 */
7787
7788 /* This is the guts of the machine-dependent assembler. STR points to a
7789 machine dependent instruction. This function is supposed to emit
7790 the frags/bytes it assembles to. */
7791
7792 void
7793 md_assemble (char *str)
7794 {
7795 char *p = str;
7796 templates *template;
7797 const aarch64_opcode *opcode;
7798 aarch64_inst *inst_base;
7799 unsigned saved_cond;
7800
7801 /* Align the previous label if needed. */
7802 if (last_label_seen != NULL)
7803 {
7804 symbol_set_frag (last_label_seen, frag_now);
7805 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7806 S_SET_SEGMENT (last_label_seen, now_seg);
7807 }
7808
7809 /* Update the current insn_sequence from the segment. */
7810 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7811
7812 inst.reloc.type = BFD_RELOC_UNUSED;
7813
7814 DEBUG_TRACE ("\n\n");
7815 DEBUG_TRACE ("==============================");
7816 DEBUG_TRACE ("Enter md_assemble with %s", str);
7817
7818 template = opcode_lookup (&p);
7819 if (!template)
7820 {
7821 /* It wasn't an instruction, but it might be a register alias of
7822 the form alias .req reg directive. */
7823 if (!create_register_alias (str, p))
7824 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7825 str);
7826 return;
7827 }
7828
7829 skip_whitespace (p);
7830 if (*p == ',')
7831 {
7832 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7833 get_mnemonic_name (str), str);
7834 return;
7835 }
7836
7837 init_operand_error_report ();
7838
7839 /* Sections are assumed to start aligned. In executable section, there is no
7840 MAP_DATA symbol pending. So we only align the address during
7841 MAP_DATA --> MAP_INSN transition.
7842 For other sections, this is not guaranteed. */
7843 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7844 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7845 frag_align_code (2, 0);
7846
7847 saved_cond = inst.cond;
7848 reset_aarch64_instruction (&inst);
7849 inst.cond = saved_cond;
7850
7851 /* Iterate through all opcode entries with the same mnemonic name. */
7852 do
7853 {
7854 opcode = template->opcode;
7855
7856 DEBUG_TRACE ("opcode %s found", opcode->name);
7857 #ifdef DEBUG_AARCH64
7858 if (debug_dump)
7859 dump_opcode_operands (opcode);
7860 #endif /* DEBUG_AARCH64 */
7861
7862 mapping_state (MAP_INSN);
7863
7864 inst_base = &inst.base;
7865 inst_base->opcode = opcode;
7866
7867 /* Truly conditionally executed instructions, e.g. b.cond. */
7868 if (opcode->flags & F_COND)
7869 {
7870 gas_assert (inst.cond != COND_ALWAYS);
7871 inst_base->cond = get_cond_from_value (inst.cond);
7872 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7873 }
7874 else if (inst.cond != COND_ALWAYS)
7875 {
7876 /* It shouldn't arrive here, where the assembly looks like a
7877 conditional instruction but the found opcode is unconditional. */
7878 gas_assert (0);
7879 continue;
7880 }
7881
7882 if (parse_operands (p, opcode)
7883 && programmer_friendly_fixup (&inst)
7884 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7885 {
7886 /* Check that this instruction is supported for this CPU. */
7887 if (!opcode->avariant
7888 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7889 {
7890 as_bad (_("selected processor does not support `%s'"), str);
7891 return;
7892 }
7893
7894 warn_unpredictable_ldst (&inst, str);
7895
7896 if (inst.reloc.type == BFD_RELOC_UNUSED
7897 || !inst.reloc.need_libopcodes_p)
7898 output_inst (NULL);
7899 else
7900 {
7901 /* If there is relocation generated for the instruction,
7902 store the instruction information for the future fix-up. */
7903 struct aarch64_inst *copy;
7904 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7905 copy = XNEW (struct aarch64_inst);
7906 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7907 output_inst (copy);
7908 }
7909
7910 /* Issue non-fatal messages if any. */
7911 output_operand_error_report (str, true);
7912 return;
7913 }
7914
7915 template = template->next;
7916 if (template != NULL)
7917 {
7918 reset_aarch64_instruction (&inst);
7919 inst.cond = saved_cond;
7920 }
7921 }
7922 while (template != NULL);
7923
7924 /* Issue the error messages if any. */
7925 output_operand_error_report (str, false);
7926 }
7927
7928 /* Various frobbings of labels and their addresses. */
7929
7930 void
7931 aarch64_start_line_hook (void)
7932 {
7933 last_label_seen = NULL;
7934 }
7935
7936 void
7937 aarch64_frob_label (symbolS * sym)
7938 {
7939 last_label_seen = sym;
7940
7941 dwarf2_emit_label (sym);
7942 }
7943
7944 void
7945 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7946 {
7947 /* Check to see if we have a block to close. */
7948 force_automatic_sequence_close ();
7949 }
7950
7951 int
7952 aarch64_data_in_code (void)
7953 {
7954 if (startswith (input_line_pointer + 1, "data:"))
7955 {
7956 *input_line_pointer = '/';
7957 input_line_pointer += 5;
7958 *input_line_pointer = 0;
7959 return 1;
7960 }
7961
7962 return 0;
7963 }
7964
7965 char *
7966 aarch64_canonicalize_symbol_name (char *name)
7967 {
7968 int len;
7969
7970 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7971 *(name + len - 5) = 0;
7972
7973 return name;
7974 }
7975 \f
7976 /* Table of all register names defined by default. The user can
7977 define additional names with .req. Note that all register names
7978 should appear in both upper and lowercase variants. Some registers
7979 also have mixed-case names. */
7980
7981 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7982 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7983 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7984 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
7985 #define REGSET16(p,t) \
7986 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7987 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7988 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7989 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7990 #define REGSET16S(p,s,t) \
7991 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
7992 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
7993 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
7994 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
7995 #define REGSET31(p,t) \
7996 REGSET16(p, t), \
7997 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7998 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7999 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8000 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8001 #define REGSET(p,t) \
8002 REGSET31(p,t), REGNUM(p,31,t)
8003
8004 /* These go into aarch64_reg_hsh hash-table. */
8005 static const reg_entry reg_names[] = {
8006 /* Integer registers. */
8007 REGSET31 (x, R_64), REGSET31 (X, R_64),
8008 REGSET31 (w, R_32), REGSET31 (W, R_32),
8009
8010 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8011 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8012 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8013 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8014 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8015 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8016
8017 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8018 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8019
8020 /* Floating-point single precision registers. */
8021 REGSET (s, FP_S), REGSET (S, FP_S),
8022
8023 /* Floating-point double precision registers. */
8024 REGSET (d, FP_D), REGSET (D, FP_D),
8025
8026 /* Floating-point half precision registers. */
8027 REGSET (h, FP_H), REGSET (H, FP_H),
8028
8029 /* Floating-point byte precision registers. */
8030 REGSET (b, FP_B), REGSET (B, FP_B),
8031
8032 /* Floating-point quad precision registers. */
8033 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8034
8035 /* FP/SIMD registers. */
8036 REGSET (v, VN), REGSET (V, VN),
8037
8038 /* SVE vector registers. */
8039 REGSET (z, ZN), REGSET (Z, ZN),
8040
8041 /* SVE predicate registers. */
8042 REGSET16 (p, PN), REGSET16 (P, PN),
8043
8044 /* SME ZA tile registers. */
8045 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8046
8047 /* SME ZA tile registers (horizontal slice). */
8048 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8049
8050 /* SME ZA tile registers (vertical slice). */
8051 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8052 };
8053
8054 #undef REGDEF
8055 #undef REGDEF_ALIAS
8056 #undef REGNUM
8057 #undef REGSET16
8058 #undef REGSET31
8059 #undef REGSET
8060
8061 #define N 1
8062 #define n 0
8063 #define Z 1
8064 #define z 0
8065 #define C 1
8066 #define c 0
8067 #define V 1
8068 #define v 0
8069 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8070 static const asm_nzcv nzcv_names[] = {
8071 {"nzcv", B (n, z, c, v)},
8072 {"nzcV", B (n, z, c, V)},
8073 {"nzCv", B (n, z, C, v)},
8074 {"nzCV", B (n, z, C, V)},
8075 {"nZcv", B (n, Z, c, v)},
8076 {"nZcV", B (n, Z, c, V)},
8077 {"nZCv", B (n, Z, C, v)},
8078 {"nZCV", B (n, Z, C, V)},
8079 {"Nzcv", B (N, z, c, v)},
8080 {"NzcV", B (N, z, c, V)},
8081 {"NzCv", B (N, z, C, v)},
8082 {"NzCV", B (N, z, C, V)},
8083 {"NZcv", B (N, Z, c, v)},
8084 {"NZcV", B (N, Z, c, V)},
8085 {"NZCv", B (N, Z, C, v)},
8086 {"NZCV", B (N, Z, C, V)}
8087 };
8088
8089 #undef N
8090 #undef n
8091 #undef Z
8092 #undef z
8093 #undef C
8094 #undef c
8095 #undef V
8096 #undef v
8097 #undef B
8098 \f
8099 /* MD interface: bits in the object file. */
8100
8101 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8102 for use in the a.out file, and stores them in the array pointed to by buf.
8103 This knows about the endian-ness of the target machine and does
8104 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8105 2 (short) and 4 (long) Floating numbers are put out as a series of
8106 LITTLENUMS (shorts, here at least). */
8107
8108 void
8109 md_number_to_chars (char *buf, valueT val, int n)
8110 {
8111 if (target_big_endian)
8112 number_to_chars_bigendian (buf, val, n);
8113 else
8114 number_to_chars_littleendian (buf, val, n);
8115 }
8116
8117 /* MD interface: Sections. */
8118
8119 /* Estimate the size of a frag before relaxing. Assume everything fits in
8120 4 bytes. */
8121
8122 int
8123 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8124 {
8125 fragp->fr_var = 4;
8126 return 4;
8127 }
8128
8129 /* Round up a section size to the appropriate boundary. */
8130
8131 valueT
8132 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8133 {
8134 return size;
8135 }
8136
8137 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8138 of an rs_align_code fragment.
8139
8140 Here we fill the frag with the appropriate info for padding the
8141 output stream. The resulting frag will consist of a fixed (fr_fix)
8142 and of a repeating (fr_var) part.
8143
8144 The fixed content is always emitted before the repeating content and
8145 these two parts are used as follows in constructing the output:
8146 - the fixed part will be used to align to a valid instruction word
8147 boundary, in case that we start at a misaligned address; as no
8148 executable instruction can live at the misaligned location, we
8149 simply fill with zeros;
8150 - the variable part will be used to cover the remaining padding and
8151 we fill using the AArch64 NOP instruction.
8152
8153 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8154 enough storage space for up to 3 bytes for padding the back to a valid
8155 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8156
8157 void
8158 aarch64_handle_align (fragS * fragP)
8159 {
8160 /* NOP = d503201f */
8161 /* AArch64 instructions are always little-endian. */
8162 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8163
8164 int bytes, fix, noop_size;
8165 char *p;
8166
8167 if (fragP->fr_type != rs_align_code)
8168 return;
8169
8170 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8171 p = fragP->fr_literal + fragP->fr_fix;
8172
8173 #ifdef OBJ_ELF
8174 gas_assert (fragP->tc_frag_data.recorded);
8175 #endif
8176
8177 noop_size = sizeof (aarch64_noop);
8178
8179 fix = bytes & (noop_size - 1);
8180 if (fix)
8181 {
8182 #ifdef OBJ_ELF
8183 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8184 #endif
8185 memset (p, 0, fix);
8186 p += fix;
8187 fragP->fr_fix += fix;
8188 }
8189
8190 if (noop_size)
8191 memcpy (p, aarch64_noop, noop_size);
8192 fragP->fr_var = noop_size;
8193 }
8194
8195 /* Perform target specific initialisation of a frag.
8196 Note - despite the name this initialisation is not done when the frag
8197 is created, but only when its type is assigned. A frag can be created
8198 and used a long time before its type is set, so beware of assuming that
8199 this initialisation is performed first. */
8200
8201 #ifndef OBJ_ELF
8202 void
8203 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8204 int max_chars ATTRIBUTE_UNUSED)
8205 {
8206 }
8207
8208 #else /* OBJ_ELF is defined. */
8209 void
8210 aarch64_init_frag (fragS * fragP, int max_chars)
8211 {
8212 /* Record a mapping symbol for alignment frags. We will delete this
8213 later if the alignment ends up empty. */
8214 if (!fragP->tc_frag_data.recorded)
8215 fragP->tc_frag_data.recorded = 1;
8216
8217 /* PR 21809: Do not set a mapping state for debug sections
8218 - it just confuses other tools. */
8219 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8220 return;
8221
8222 switch (fragP->fr_type)
8223 {
8224 case rs_align_test:
8225 case rs_fill:
8226 mapping_state_2 (MAP_DATA, max_chars);
8227 break;
8228 case rs_align:
8229 /* PR 20364: We can get alignment frags in code sections,
8230 so do not just assume that we should use the MAP_DATA state. */
8231 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8232 break;
8233 case rs_align_code:
8234 mapping_state_2 (MAP_INSN, max_chars);
8235 break;
8236 default:
8237 break;
8238 }
8239 }
8240 \f
8241 /* Initialize the DWARF-2 unwind information for this procedure. */
8242
8243 void
8244 tc_aarch64_frame_initial_instructions (void)
8245 {
8246 cfi_add_CFA_def_cfa (REG_SP, 0);
8247 }
8248 #endif /* OBJ_ELF */
8249
8250 /* Convert REGNAME to a DWARF-2 register number. */
8251
8252 int
8253 tc_aarch64_regname_to_dw2regnum (char *regname)
8254 {
8255 const reg_entry *reg = parse_reg (&regname);
8256 if (reg == NULL)
8257 return -1;
8258
8259 switch (reg->type)
8260 {
8261 case REG_TYPE_SP_32:
8262 case REG_TYPE_SP_64:
8263 case REG_TYPE_R_32:
8264 case REG_TYPE_R_64:
8265 return reg->number;
8266
8267 case REG_TYPE_FP_B:
8268 case REG_TYPE_FP_H:
8269 case REG_TYPE_FP_S:
8270 case REG_TYPE_FP_D:
8271 case REG_TYPE_FP_Q:
8272 return reg->number + 64;
8273
8274 default:
8275 break;
8276 }
8277 return -1;
8278 }
8279
8280 /* Implement DWARF2_ADDR_SIZE. */
8281
8282 int
8283 aarch64_dwarf2_addr_size (void)
8284 {
8285 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8286 if (ilp32_p)
8287 return 4;
8288 #endif
8289 return bfd_arch_bits_per_address (stdoutput) / 8;
8290 }
8291
8292 /* MD interface: Symbol and relocation handling. */
8293
8294 /* Return the address within the segment that a PC-relative fixup is
8295 relative to. For AArch64 PC-relative fixups applied to instructions
8296 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8297
8298 long
8299 md_pcrel_from_section (fixS * fixP, segT seg)
8300 {
8301 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8302
8303 /* If this is pc-relative and we are going to emit a relocation
8304 then we just want to put out any pipeline compensation that the linker
8305 will need. Otherwise we want to use the calculated base. */
8306 if (fixP->fx_pcrel
8307 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8308 || aarch64_force_relocation (fixP)))
8309 base = 0;
8310
8311 /* AArch64 should be consistent for all pc-relative relocations. */
8312 return base + AARCH64_PCREL_OFFSET;
8313 }
8314
8315 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8316 Otherwise we have no need to default values of symbols. */
8317
8318 symbolS *
8319 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8320 {
8321 #ifdef OBJ_ELF
8322 if (name[0] == '_' && name[1] == 'G'
8323 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8324 {
8325 if (!GOT_symbol)
8326 {
8327 if (symbol_find (name))
8328 as_bad (_("GOT already in the symbol table"));
8329
8330 GOT_symbol = symbol_new (name, undefined_section,
8331 &zero_address_frag, 0);
8332 }
8333
8334 return GOT_symbol;
8335 }
8336 #endif
8337
8338 return 0;
8339 }
8340
8341 /* Return non-zero if the indicated VALUE has overflowed the maximum
8342 range expressible by a unsigned number with the indicated number of
8343 BITS. */
8344
8345 static bool
8346 unsigned_overflow (valueT value, unsigned bits)
8347 {
8348 valueT lim;
8349 if (bits >= sizeof (valueT) * 8)
8350 return false;
8351 lim = (valueT) 1 << bits;
8352 return (value >= lim);
8353 }
8354
8355
8356 /* Return non-zero if the indicated VALUE has overflowed the maximum
8357 range expressible by an signed number with the indicated number of
8358 BITS. */
8359
8360 static bool
8361 signed_overflow (offsetT value, unsigned bits)
8362 {
8363 offsetT lim;
8364 if (bits >= sizeof (offsetT) * 8)
8365 return false;
8366 lim = (offsetT) 1 << (bits - 1);
8367 return (value < -lim || value >= lim);
8368 }
8369
8370 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8371 unsigned immediate offset load/store instruction, try to encode it as
8372 an unscaled, 9-bit, signed immediate offset load/store instruction.
8373 Return TRUE if it is successful; otherwise return FALSE.
8374
8375 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8376 in response to the standard LDR/STR mnemonics when the immediate offset is
8377 unambiguous, i.e. when it is negative or unaligned. */
8378
8379 static bool
8380 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8381 {
8382 int idx;
8383 enum aarch64_op new_op;
8384 const aarch64_opcode *new_opcode;
8385
8386 gas_assert (instr->opcode->iclass == ldst_pos);
8387
8388 switch (instr->opcode->op)
8389 {
8390 case OP_LDRB_POS:new_op = OP_LDURB; break;
8391 case OP_STRB_POS: new_op = OP_STURB; break;
8392 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8393 case OP_LDRH_POS: new_op = OP_LDURH; break;
8394 case OP_STRH_POS: new_op = OP_STURH; break;
8395 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8396 case OP_LDR_POS: new_op = OP_LDUR; break;
8397 case OP_STR_POS: new_op = OP_STUR; break;
8398 case OP_LDRF_POS: new_op = OP_LDURV; break;
8399 case OP_STRF_POS: new_op = OP_STURV; break;
8400 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8401 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8402 default: new_op = OP_NIL; break;
8403 }
8404
8405 if (new_op == OP_NIL)
8406 return false;
8407
8408 new_opcode = aarch64_get_opcode (new_op);
8409 gas_assert (new_opcode != NULL);
8410
8411 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8412 instr->opcode->op, new_opcode->op);
8413
8414 aarch64_replace_opcode (instr, new_opcode);
8415
8416 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8417 qualifier matching may fail because the out-of-date qualifier will
8418 prevent the operand being updated with a new and correct qualifier. */
8419 idx = aarch64_operand_index (instr->opcode->operands,
8420 AARCH64_OPND_ADDR_SIMM9);
8421 gas_assert (idx == 1);
8422 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8423
8424 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8425
8426 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8427 insn_sequence))
8428 return false;
8429
8430 return true;
8431 }
8432
8433 /* Called by fix_insn to fix a MOV immediate alias instruction.
8434
8435 Operand for a generic move immediate instruction, which is an alias
8436 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8437 a 32-bit/64-bit immediate value into general register. An assembler error
8438 shall result if the immediate cannot be created by a single one of these
8439 instructions. If there is a choice, then to ensure reversability an
8440 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8441
8442 static void
8443 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8444 {
8445 const aarch64_opcode *opcode;
8446
8447 /* Need to check if the destination is SP/ZR. The check has to be done
8448 before any aarch64_replace_opcode. */
8449 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8450 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8451
8452 instr->operands[1].imm.value = value;
8453 instr->operands[1].skip = 0;
8454
8455 if (try_mov_wide_p)
8456 {
8457 /* Try the MOVZ alias. */
8458 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8459 aarch64_replace_opcode (instr, opcode);
8460 if (aarch64_opcode_encode (instr->opcode, instr,
8461 &instr->value, NULL, NULL, insn_sequence))
8462 {
8463 put_aarch64_insn (buf, instr->value);
8464 return;
8465 }
8466 /* Try the MOVK alias. */
8467 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8468 aarch64_replace_opcode (instr, opcode);
8469 if (aarch64_opcode_encode (instr->opcode, instr,
8470 &instr->value, NULL, NULL, insn_sequence))
8471 {
8472 put_aarch64_insn (buf, instr->value);
8473 return;
8474 }
8475 }
8476
8477 if (try_mov_bitmask_p)
8478 {
8479 /* Try the ORR alias. */
8480 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8481 aarch64_replace_opcode (instr, opcode);
8482 if (aarch64_opcode_encode (instr->opcode, instr,
8483 &instr->value, NULL, NULL, insn_sequence))
8484 {
8485 put_aarch64_insn (buf, instr->value);
8486 return;
8487 }
8488 }
8489
8490 as_bad_where (fixP->fx_file, fixP->fx_line,
8491 _("immediate cannot be moved by a single instruction"));
8492 }
8493
8494 /* An instruction operand which is immediate related may have symbol used
8495 in the assembly, e.g.
8496
8497 mov w0, u32
8498 .set u32, 0x00ffff00
8499
8500 At the time when the assembly instruction is parsed, a referenced symbol,
8501 like 'u32' in the above example may not have been seen; a fixS is created
8502 in such a case and is handled here after symbols have been resolved.
8503 Instruction is fixed up with VALUE using the information in *FIXP plus
8504 extra information in FLAGS.
8505
8506 This function is called by md_apply_fix to fix up instructions that need
8507 a fix-up described above but does not involve any linker-time relocation. */
8508
8509 static void
8510 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8511 {
8512 int idx;
8513 uint32_t insn;
8514 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8515 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8516 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8517
8518 if (new_inst)
8519 {
8520 /* Now the instruction is about to be fixed-up, so the operand that
8521 was previously marked as 'ignored' needs to be unmarked in order
8522 to get the encoding done properly. */
8523 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8524 new_inst->operands[idx].skip = 0;
8525 }
8526
8527 gas_assert (opnd != AARCH64_OPND_NIL);
8528
8529 switch (opnd)
8530 {
8531 case AARCH64_OPND_EXCEPTION:
8532 case AARCH64_OPND_UNDEFINED:
8533 if (unsigned_overflow (value, 16))
8534 as_bad_where (fixP->fx_file, fixP->fx_line,
8535 _("immediate out of range"));
8536 insn = get_aarch64_insn (buf);
8537 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8538 put_aarch64_insn (buf, insn);
8539 break;
8540
8541 case AARCH64_OPND_AIMM:
8542 /* ADD or SUB with immediate.
8543 NOTE this assumes we come here with a add/sub shifted reg encoding
8544 3 322|2222|2 2 2 21111 111111
8545 1 098|7654|3 2 1 09876 543210 98765 43210
8546 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8547 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8548 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8549 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8550 ->
8551 3 322|2222|2 2 221111111111
8552 1 098|7654|3 2 109876543210 98765 43210
8553 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8554 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8555 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8556 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8557 Fields sf Rn Rd are already set. */
8558 insn = get_aarch64_insn (buf);
8559 if (value < 0)
8560 {
8561 /* Add <-> sub. */
8562 insn = reencode_addsub_switch_add_sub (insn);
8563 value = -value;
8564 }
8565
8566 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8567 && unsigned_overflow (value, 12))
8568 {
8569 /* Try to shift the value by 12 to make it fit. */
8570 if (((value >> 12) << 12) == value
8571 && ! unsigned_overflow (value, 12 + 12))
8572 {
8573 value >>= 12;
8574 insn |= encode_addsub_imm_shift_amount (1);
8575 }
8576 }
8577
8578 if (unsigned_overflow (value, 12))
8579 as_bad_where (fixP->fx_file, fixP->fx_line,
8580 _("immediate out of range"));
8581
8582 insn |= encode_addsub_imm (value);
8583
8584 put_aarch64_insn (buf, insn);
8585 break;
8586
8587 case AARCH64_OPND_SIMD_IMM:
8588 case AARCH64_OPND_SIMD_IMM_SFT:
8589 case AARCH64_OPND_LIMM:
8590 /* Bit mask immediate. */
8591 gas_assert (new_inst != NULL);
8592 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8593 new_inst->operands[idx].imm.value = value;
8594 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8595 &new_inst->value, NULL, NULL, insn_sequence))
8596 put_aarch64_insn (buf, new_inst->value);
8597 else
8598 as_bad_where (fixP->fx_file, fixP->fx_line,
8599 _("invalid immediate"));
8600 break;
8601
8602 case AARCH64_OPND_HALF:
8603 /* 16-bit unsigned immediate. */
8604 if (unsigned_overflow (value, 16))
8605 as_bad_where (fixP->fx_file, fixP->fx_line,
8606 _("immediate out of range"));
8607 insn = get_aarch64_insn (buf);
8608 insn |= encode_movw_imm (value & 0xffff);
8609 put_aarch64_insn (buf, insn);
8610 break;
8611
8612 case AARCH64_OPND_IMM_MOV:
8613 /* Operand for a generic move immediate instruction, which is
8614 an alias instruction that generates a single MOVZ, MOVN or ORR
8615 instruction to loads a 32-bit/64-bit immediate value into general
8616 register. An assembler error shall result if the immediate cannot be
8617 created by a single one of these instructions. If there is a choice,
8618 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8619 and MOVZ or MOVN to ORR. */
8620 gas_assert (new_inst != NULL);
8621 fix_mov_imm_insn (fixP, buf, new_inst, value);
8622 break;
8623
8624 case AARCH64_OPND_ADDR_SIMM7:
8625 case AARCH64_OPND_ADDR_SIMM9:
8626 case AARCH64_OPND_ADDR_SIMM9_2:
8627 case AARCH64_OPND_ADDR_SIMM10:
8628 case AARCH64_OPND_ADDR_UIMM12:
8629 case AARCH64_OPND_ADDR_SIMM11:
8630 case AARCH64_OPND_ADDR_SIMM13:
8631 /* Immediate offset in an address. */
8632 insn = get_aarch64_insn (buf);
8633
8634 gas_assert (new_inst != NULL && new_inst->value == insn);
8635 gas_assert (new_inst->opcode->operands[1] == opnd
8636 || new_inst->opcode->operands[2] == opnd);
8637
8638 /* Get the index of the address operand. */
8639 if (new_inst->opcode->operands[1] == opnd)
8640 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8641 idx = 1;
8642 else
8643 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8644 idx = 2;
8645
8646 /* Update the resolved offset value. */
8647 new_inst->operands[idx].addr.offset.imm = value;
8648
8649 /* Encode/fix-up. */
8650 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8651 &new_inst->value, NULL, NULL, insn_sequence))
8652 {
8653 put_aarch64_insn (buf, new_inst->value);
8654 break;
8655 }
8656 else if (new_inst->opcode->iclass == ldst_pos
8657 && try_to_encode_as_unscaled_ldst (new_inst))
8658 {
8659 put_aarch64_insn (buf, new_inst->value);
8660 break;
8661 }
8662
8663 as_bad_where (fixP->fx_file, fixP->fx_line,
8664 _("immediate offset out of range"));
8665 break;
8666
8667 default:
8668 gas_assert (0);
8669 as_fatal (_("unhandled operand code %d"), opnd);
8670 }
8671 }
8672
8673 /* Apply a fixup (fixP) to segment data, once it has been determined
8674 by our caller that we have all the info we need to fix it up.
8675
8676 Parameter valP is the pointer to the value of the bits. */
8677
8678 void
8679 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8680 {
8681 offsetT value = *valP;
8682 uint32_t insn;
8683 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8684 int scale;
8685 unsigned flags = fixP->fx_addnumber;
8686
8687 DEBUG_TRACE ("\n\n");
8688 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8689 DEBUG_TRACE ("Enter md_apply_fix");
8690
8691 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8692
8693 /* Note whether this will delete the relocation. */
8694
8695 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8696 fixP->fx_done = 1;
8697
8698 /* Process the relocations. */
8699 switch (fixP->fx_r_type)
8700 {
8701 case BFD_RELOC_NONE:
8702 /* This will need to go in the object file. */
8703 fixP->fx_done = 0;
8704 break;
8705
8706 case BFD_RELOC_8:
8707 case BFD_RELOC_8_PCREL:
8708 if (fixP->fx_done || !seg->use_rela_p)
8709 md_number_to_chars (buf, value, 1);
8710 break;
8711
8712 case BFD_RELOC_16:
8713 case BFD_RELOC_16_PCREL:
8714 if (fixP->fx_done || !seg->use_rela_p)
8715 md_number_to_chars (buf, value, 2);
8716 break;
8717
8718 case BFD_RELOC_32:
8719 case BFD_RELOC_32_PCREL:
8720 if (fixP->fx_done || !seg->use_rela_p)
8721 md_number_to_chars (buf, value, 4);
8722 break;
8723
8724 case BFD_RELOC_64:
8725 case BFD_RELOC_64_PCREL:
8726 if (fixP->fx_done || !seg->use_rela_p)
8727 md_number_to_chars (buf, value, 8);
8728 break;
8729
8730 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8731 /* We claim that these fixups have been processed here, even if
8732 in fact we generate an error because we do not have a reloc
8733 for them, so tc_gen_reloc() will reject them. */
8734 fixP->fx_done = 1;
8735 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8736 {
8737 as_bad_where (fixP->fx_file, fixP->fx_line,
8738 _("undefined symbol %s used as an immediate value"),
8739 S_GET_NAME (fixP->fx_addsy));
8740 goto apply_fix_return;
8741 }
8742 fix_insn (fixP, flags, value);
8743 break;
8744
8745 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8746 if (fixP->fx_done || !seg->use_rela_p)
8747 {
8748 if (value & 3)
8749 as_bad_where (fixP->fx_file, fixP->fx_line,
8750 _("pc-relative load offset not word aligned"));
8751 if (signed_overflow (value, 21))
8752 as_bad_where (fixP->fx_file, fixP->fx_line,
8753 _("pc-relative load offset out of range"));
8754 insn = get_aarch64_insn (buf);
8755 insn |= encode_ld_lit_ofs_19 (value >> 2);
8756 put_aarch64_insn (buf, insn);
8757 }
8758 break;
8759
8760 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8761 if (fixP->fx_done || !seg->use_rela_p)
8762 {
8763 if (signed_overflow (value, 21))
8764 as_bad_where (fixP->fx_file, fixP->fx_line,
8765 _("pc-relative address offset out of range"));
8766 insn = get_aarch64_insn (buf);
8767 insn |= encode_adr_imm (value);
8768 put_aarch64_insn (buf, insn);
8769 }
8770 break;
8771
8772 case BFD_RELOC_AARCH64_BRANCH19:
8773 if (fixP->fx_done || !seg->use_rela_p)
8774 {
8775 if (value & 3)
8776 as_bad_where (fixP->fx_file, fixP->fx_line,
8777 _("conditional branch target not word aligned"));
8778 if (signed_overflow (value, 21))
8779 as_bad_where (fixP->fx_file, fixP->fx_line,
8780 _("conditional branch out of range"));
8781 insn = get_aarch64_insn (buf);
8782 insn |= encode_cond_branch_ofs_19 (value >> 2);
8783 put_aarch64_insn (buf, insn);
8784 }
8785 break;
8786
8787 case BFD_RELOC_AARCH64_TSTBR14:
8788 if (fixP->fx_done || !seg->use_rela_p)
8789 {
8790 if (value & 3)
8791 as_bad_where (fixP->fx_file, fixP->fx_line,
8792 _("conditional branch target not word aligned"));
8793 if (signed_overflow (value, 16))
8794 as_bad_where (fixP->fx_file, fixP->fx_line,
8795 _("conditional branch out of range"));
8796 insn = get_aarch64_insn (buf);
8797 insn |= encode_tst_branch_ofs_14 (value >> 2);
8798 put_aarch64_insn (buf, insn);
8799 }
8800 break;
8801
8802 case BFD_RELOC_AARCH64_CALL26:
8803 case BFD_RELOC_AARCH64_JUMP26:
8804 if (fixP->fx_done || !seg->use_rela_p)
8805 {
8806 if (value & 3)
8807 as_bad_where (fixP->fx_file, fixP->fx_line,
8808 _("branch target not word aligned"));
8809 if (signed_overflow (value, 28))
8810 as_bad_where (fixP->fx_file, fixP->fx_line,
8811 _("branch out of range"));
8812 insn = get_aarch64_insn (buf);
8813 insn |= encode_branch_ofs_26 (value >> 2);
8814 put_aarch64_insn (buf, insn);
8815 }
8816 break;
8817
8818 case BFD_RELOC_AARCH64_MOVW_G0:
8819 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8820 case BFD_RELOC_AARCH64_MOVW_G0_S:
8821 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8822 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8823 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8824 scale = 0;
8825 goto movw_common;
8826 case BFD_RELOC_AARCH64_MOVW_G1:
8827 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8828 case BFD_RELOC_AARCH64_MOVW_G1_S:
8829 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8830 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8831 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8832 scale = 16;
8833 goto movw_common;
8834 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8835 scale = 0;
8836 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8837 /* Should always be exported to object file, see
8838 aarch64_force_relocation(). */
8839 gas_assert (!fixP->fx_done);
8840 gas_assert (seg->use_rela_p);
8841 goto movw_common;
8842 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8843 scale = 16;
8844 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8845 /* Should always be exported to object file, see
8846 aarch64_force_relocation(). */
8847 gas_assert (!fixP->fx_done);
8848 gas_assert (seg->use_rela_p);
8849 goto movw_common;
8850 case BFD_RELOC_AARCH64_MOVW_G2:
8851 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8852 case BFD_RELOC_AARCH64_MOVW_G2_S:
8853 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8854 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8855 scale = 32;
8856 goto movw_common;
8857 case BFD_RELOC_AARCH64_MOVW_G3:
8858 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8859 scale = 48;
8860 movw_common:
8861 if (fixP->fx_done || !seg->use_rela_p)
8862 {
8863 insn = get_aarch64_insn (buf);
8864
8865 if (!fixP->fx_done)
8866 {
8867 /* REL signed addend must fit in 16 bits */
8868 if (signed_overflow (value, 16))
8869 as_bad_where (fixP->fx_file, fixP->fx_line,
8870 _("offset out of range"));
8871 }
8872 else
8873 {
8874 /* Check for overflow and scale. */
8875 switch (fixP->fx_r_type)
8876 {
8877 case BFD_RELOC_AARCH64_MOVW_G0:
8878 case BFD_RELOC_AARCH64_MOVW_G1:
8879 case BFD_RELOC_AARCH64_MOVW_G2:
8880 case BFD_RELOC_AARCH64_MOVW_G3:
8881 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8882 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8883 if (unsigned_overflow (value, scale + 16))
8884 as_bad_where (fixP->fx_file, fixP->fx_line,
8885 _("unsigned value out of range"));
8886 break;
8887 case BFD_RELOC_AARCH64_MOVW_G0_S:
8888 case BFD_RELOC_AARCH64_MOVW_G1_S:
8889 case BFD_RELOC_AARCH64_MOVW_G2_S:
8890 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8891 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8892 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8893 /* NOTE: We can only come here with movz or movn. */
8894 if (signed_overflow (value, scale + 16))
8895 as_bad_where (fixP->fx_file, fixP->fx_line,
8896 _("signed value out of range"));
8897 if (value < 0)
8898 {
8899 /* Force use of MOVN. */
8900 value = ~value;
8901 insn = reencode_movzn_to_movn (insn);
8902 }
8903 else
8904 {
8905 /* Force use of MOVZ. */
8906 insn = reencode_movzn_to_movz (insn);
8907 }
8908 break;
8909 default:
8910 /* Unchecked relocations. */
8911 break;
8912 }
8913 value >>= scale;
8914 }
8915
8916 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8917 insn |= encode_movw_imm (value & 0xffff);
8918
8919 put_aarch64_insn (buf, insn);
8920 }
8921 break;
8922
8923 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8924 fixP->fx_r_type = (ilp32_p
8925 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8926 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8927 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8928 /* Should always be exported to object file, see
8929 aarch64_force_relocation(). */
8930 gas_assert (!fixP->fx_done);
8931 gas_assert (seg->use_rela_p);
8932 break;
8933
8934 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8935 fixP->fx_r_type = (ilp32_p
8936 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8937 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8938 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8939 /* Should always be exported to object file, see
8940 aarch64_force_relocation(). */
8941 gas_assert (!fixP->fx_done);
8942 gas_assert (seg->use_rela_p);
8943 break;
8944
8945 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8946 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8947 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8948 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8949 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8950 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8951 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8952 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8953 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8954 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8955 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8956 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8957 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8958 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8959 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8960 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8961 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8962 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8963 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8964 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8965 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8966 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8967 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8968 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8969 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8970 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8971 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8972 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8973 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8974 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8975 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8976 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8977 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8978 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8979 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8980 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8981 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8982 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8983 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8984 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8985 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8986 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8987 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8988 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8989 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8990 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8991 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8992 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8993 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8994 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8995 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8996 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8997 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8998 /* Should always be exported to object file, see
8999 aarch64_force_relocation(). */
9000 gas_assert (!fixP->fx_done);
9001 gas_assert (seg->use_rela_p);
9002 break;
9003
9004 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9005 /* Should always be exported to object file, see
9006 aarch64_force_relocation(). */
9007 fixP->fx_r_type = (ilp32_p
9008 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9009 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9010 gas_assert (!fixP->fx_done);
9011 gas_assert (seg->use_rela_p);
9012 break;
9013
9014 case BFD_RELOC_AARCH64_ADD_LO12:
9015 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9016 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9017 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9018 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9019 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9020 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9021 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9022 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9023 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9024 case BFD_RELOC_AARCH64_LDST128_LO12:
9025 case BFD_RELOC_AARCH64_LDST16_LO12:
9026 case BFD_RELOC_AARCH64_LDST32_LO12:
9027 case BFD_RELOC_AARCH64_LDST64_LO12:
9028 case BFD_RELOC_AARCH64_LDST8_LO12:
9029 /* Should always be exported to object file, see
9030 aarch64_force_relocation(). */
9031 gas_assert (!fixP->fx_done);
9032 gas_assert (seg->use_rela_p);
9033 break;
9034
9035 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9036 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9037 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9038 break;
9039
9040 case BFD_RELOC_UNUSED:
9041 /* An error will already have been reported. */
9042 break;
9043
9044 default:
9045 as_bad_where (fixP->fx_file, fixP->fx_line,
9046 _("unexpected %s fixup"),
9047 bfd_get_reloc_code_name (fixP->fx_r_type));
9048 break;
9049 }
9050
9051 apply_fix_return:
9052 /* Free the allocated the struct aarch64_inst.
9053 N.B. currently there are very limited number of fix-up types actually use
9054 this field, so the impact on the performance should be minimal . */
9055 free (fixP->tc_fix_data.inst);
9056
9057 return;
9058 }
9059
9060 /* Translate internal representation of relocation info to BFD target
9061 format. */
9062
9063 arelent *
9064 tc_gen_reloc (asection * section, fixS * fixp)
9065 {
9066 arelent *reloc;
9067 bfd_reloc_code_real_type code;
9068
9069 reloc = XNEW (arelent);
9070
9071 reloc->sym_ptr_ptr = XNEW (asymbol *);
9072 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9073 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9074
9075 if (fixp->fx_pcrel)
9076 {
9077 if (section->use_rela_p)
9078 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9079 else
9080 fixp->fx_offset = reloc->address;
9081 }
9082 reloc->addend = fixp->fx_offset;
9083
9084 code = fixp->fx_r_type;
9085 switch (code)
9086 {
9087 case BFD_RELOC_16:
9088 if (fixp->fx_pcrel)
9089 code = BFD_RELOC_16_PCREL;
9090 break;
9091
9092 case BFD_RELOC_32:
9093 if (fixp->fx_pcrel)
9094 code = BFD_RELOC_32_PCREL;
9095 break;
9096
9097 case BFD_RELOC_64:
9098 if (fixp->fx_pcrel)
9099 code = BFD_RELOC_64_PCREL;
9100 break;
9101
9102 default:
9103 break;
9104 }
9105
9106 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9107 if (reloc->howto == NULL)
9108 {
9109 as_bad_where (fixp->fx_file, fixp->fx_line,
9110 _
9111 ("cannot represent %s relocation in this object file format"),
9112 bfd_get_reloc_code_name (code));
9113 return NULL;
9114 }
9115
9116 return reloc;
9117 }
9118
9119 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9120
9121 void
9122 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9123 {
9124 bfd_reloc_code_real_type type;
9125 int pcrel = 0;
9126
9127 /* Pick a reloc.
9128 FIXME: @@ Should look at CPU word size. */
9129 switch (size)
9130 {
9131 case 1:
9132 type = BFD_RELOC_8;
9133 break;
9134 case 2:
9135 type = BFD_RELOC_16;
9136 break;
9137 case 4:
9138 type = BFD_RELOC_32;
9139 break;
9140 case 8:
9141 type = BFD_RELOC_64;
9142 break;
9143 default:
9144 as_bad (_("cannot do %u-byte relocation"), size);
9145 type = BFD_RELOC_UNUSED;
9146 break;
9147 }
9148
9149 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9150 }
9151
9152 #ifdef OBJ_ELF
9153
9154 /* Implement md_after_parse_args. This is the earliest time we need to decide
9155 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9156
9157 void
9158 aarch64_after_parse_args (void)
9159 {
9160 if (aarch64_abi != AARCH64_ABI_NONE)
9161 return;
9162
9163 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9164 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9165 aarch64_abi = AARCH64_ABI_ILP32;
9166 else
9167 aarch64_abi = AARCH64_ABI_LP64;
9168 }
9169
9170 const char *
9171 elf64_aarch64_target_format (void)
9172 {
9173 #ifdef TE_CLOUDABI
9174 /* FIXME: What to do for ilp32_p ? */
9175 if (target_big_endian)
9176 return "elf64-bigaarch64-cloudabi";
9177 else
9178 return "elf64-littleaarch64-cloudabi";
9179 #else
9180 if (target_big_endian)
9181 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9182 else
9183 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9184 #endif
9185 }
9186
9187 void
9188 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9189 {
9190 elf_frob_symbol (symp, puntp);
9191 }
9192 #endif
9193
9194 /* MD interface: Finalization. */
9195
9196 /* A good place to do this, although this was probably not intended
9197 for this kind of use. We need to dump the literal pool before
9198 references are made to a null symbol pointer. */
9199
9200 void
9201 aarch64_cleanup (void)
9202 {
9203 literal_pool *pool;
9204
9205 for (pool = list_of_pools; pool; pool = pool->next)
9206 {
9207 /* Put it at the end of the relevant section. */
9208 subseg_set (pool->section, pool->sub_section);
9209 s_ltorg (0);
9210 }
9211 }
9212
9213 #ifdef OBJ_ELF
9214 /* Remove any excess mapping symbols generated for alignment frags in
9215 SEC. We may have created a mapping symbol before a zero byte
9216 alignment; remove it if there's a mapping symbol after the
9217 alignment. */
9218 static void
9219 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9220 void *dummy ATTRIBUTE_UNUSED)
9221 {
9222 segment_info_type *seginfo = seg_info (sec);
9223 fragS *fragp;
9224
9225 if (seginfo == NULL || seginfo->frchainP == NULL)
9226 return;
9227
9228 for (fragp = seginfo->frchainP->frch_root;
9229 fragp != NULL; fragp = fragp->fr_next)
9230 {
9231 symbolS *sym = fragp->tc_frag_data.last_map;
9232 fragS *next = fragp->fr_next;
9233
9234 /* Variable-sized frags have been converted to fixed size by
9235 this point. But if this was variable-sized to start with,
9236 there will be a fixed-size frag after it. So don't handle
9237 next == NULL. */
9238 if (sym == NULL || next == NULL)
9239 continue;
9240
9241 if (S_GET_VALUE (sym) < next->fr_address)
9242 /* Not at the end of this frag. */
9243 continue;
9244 know (S_GET_VALUE (sym) == next->fr_address);
9245
9246 do
9247 {
9248 if (next->tc_frag_data.first_map != NULL)
9249 {
9250 /* Next frag starts with a mapping symbol. Discard this
9251 one. */
9252 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9253 break;
9254 }
9255
9256 if (next->fr_next == NULL)
9257 {
9258 /* This mapping symbol is at the end of the section. Discard
9259 it. */
9260 know (next->fr_fix == 0 && next->fr_var == 0);
9261 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9262 break;
9263 }
9264
9265 /* As long as we have empty frags without any mapping symbols,
9266 keep looking. */
9267 /* If the next frag is non-empty and does not start with a
9268 mapping symbol, then this mapping symbol is required. */
9269 if (next->fr_address != next->fr_next->fr_address)
9270 break;
9271
9272 next = next->fr_next;
9273 }
9274 while (next != NULL);
9275 }
9276 }
9277 #endif
9278
9279 /* Adjust the symbol table. */
9280
9281 void
9282 aarch64_adjust_symtab (void)
9283 {
9284 #ifdef OBJ_ELF
9285 /* Remove any overlapping mapping symbols generated by alignment frags. */
9286 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9287 /* Now do generic ELF adjustments. */
9288 elf_adjust_symtab ();
9289 #endif
9290 }
9291
9292 static void
9293 checked_hash_insert (htab_t table, const char *key, void *value)
9294 {
9295 str_hash_insert (table, key, value, 0);
9296 }
9297
9298 static void
9299 sysreg_hash_insert (htab_t table, const char *key, void *value)
9300 {
9301 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9302 checked_hash_insert (table, key, value);
9303 }
9304
9305 static void
9306 fill_instruction_hash_table (void)
9307 {
9308 const aarch64_opcode *opcode = aarch64_opcode_table;
9309
9310 while (opcode->name != NULL)
9311 {
9312 templates *templ, *new_templ;
9313 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9314
9315 new_templ = XNEW (templates);
9316 new_templ->opcode = opcode;
9317 new_templ->next = NULL;
9318
9319 if (!templ)
9320 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9321 else
9322 {
9323 new_templ->next = templ->next;
9324 templ->next = new_templ;
9325 }
9326 ++opcode;
9327 }
9328 }
9329
9330 static inline void
9331 convert_to_upper (char *dst, const char *src, size_t num)
9332 {
9333 unsigned int i;
9334 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9335 *dst = TOUPPER (*src);
9336 *dst = '\0';
9337 }
9338
9339 /* Assume STR point to a lower-case string, allocate, convert and return
9340 the corresponding upper-case string. */
9341 static inline const char*
9342 get_upper_str (const char *str)
9343 {
9344 char *ret;
9345 size_t len = strlen (str);
9346 ret = XNEWVEC (char, len + 1);
9347 convert_to_upper (ret, str, len);
9348 return ret;
9349 }
9350
9351 /* MD interface: Initialization. */
9352
9353 void
9354 md_begin (void)
9355 {
9356 unsigned mach;
9357 unsigned int i;
9358
9359 aarch64_ops_hsh = str_htab_create ();
9360 aarch64_cond_hsh = str_htab_create ();
9361 aarch64_shift_hsh = str_htab_create ();
9362 aarch64_sys_regs_hsh = str_htab_create ();
9363 aarch64_pstatefield_hsh = str_htab_create ();
9364 aarch64_sys_regs_ic_hsh = str_htab_create ();
9365 aarch64_sys_regs_dc_hsh = str_htab_create ();
9366 aarch64_sys_regs_at_hsh = str_htab_create ();
9367 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9368 aarch64_sys_regs_sr_hsh = str_htab_create ();
9369 aarch64_reg_hsh = str_htab_create ();
9370 aarch64_barrier_opt_hsh = str_htab_create ();
9371 aarch64_nzcv_hsh = str_htab_create ();
9372 aarch64_pldop_hsh = str_htab_create ();
9373 aarch64_hint_opt_hsh = str_htab_create ();
9374
9375 fill_instruction_hash_table ();
9376
9377 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9378 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9379 (void *) (aarch64_sys_regs + i));
9380
9381 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9382 sysreg_hash_insert (aarch64_pstatefield_hsh,
9383 aarch64_pstatefields[i].name,
9384 (void *) (aarch64_pstatefields + i));
9385
9386 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9387 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9388 aarch64_sys_regs_ic[i].name,
9389 (void *) (aarch64_sys_regs_ic + i));
9390
9391 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9392 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9393 aarch64_sys_regs_dc[i].name,
9394 (void *) (aarch64_sys_regs_dc + i));
9395
9396 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9397 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9398 aarch64_sys_regs_at[i].name,
9399 (void *) (aarch64_sys_regs_at + i));
9400
9401 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9402 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9403 aarch64_sys_regs_tlbi[i].name,
9404 (void *) (aarch64_sys_regs_tlbi + i));
9405
9406 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9407 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9408 aarch64_sys_regs_sr[i].name,
9409 (void *) (aarch64_sys_regs_sr + i));
9410
9411 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9412 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9413 (void *) (reg_names + i));
9414
9415 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9416 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9417 (void *) (nzcv_names + i));
9418
9419 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9420 {
9421 const char *name = aarch64_operand_modifiers[i].name;
9422 checked_hash_insert (aarch64_shift_hsh, name,
9423 (void *) (aarch64_operand_modifiers + i));
9424 /* Also hash the name in the upper case. */
9425 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9426 (void *) (aarch64_operand_modifiers + i));
9427 }
9428
9429 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9430 {
9431 unsigned int j;
9432 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9433 the same condition code. */
9434 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9435 {
9436 const char *name = aarch64_conds[i].names[j];
9437 if (name == NULL)
9438 break;
9439 checked_hash_insert (aarch64_cond_hsh, name,
9440 (void *) (aarch64_conds + i));
9441 /* Also hash the name in the upper case. */
9442 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9443 (void *) (aarch64_conds + i));
9444 }
9445 }
9446
9447 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9448 {
9449 const char *name = aarch64_barrier_options[i].name;
9450 /* Skip xx00 - the unallocated values of option. */
9451 if ((i & 0x3) == 0)
9452 continue;
9453 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9454 (void *) (aarch64_barrier_options + i));
9455 /* Also hash the name in the upper case. */
9456 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9457 (void *) (aarch64_barrier_options + i));
9458 }
9459
9460 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9461 {
9462 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9463 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9464 (void *) (aarch64_barrier_dsb_nxs_options + i));
9465 /* Also hash the name in the upper case. */
9466 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9467 (void *) (aarch64_barrier_dsb_nxs_options + i));
9468 }
9469
9470 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9471 {
9472 const char* name = aarch64_prfops[i].name;
9473 /* Skip the unallocated hint encodings. */
9474 if (name == NULL)
9475 continue;
9476 checked_hash_insert (aarch64_pldop_hsh, name,
9477 (void *) (aarch64_prfops + i));
9478 /* Also hash the name in the upper case. */
9479 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9480 (void *) (aarch64_prfops + i));
9481 }
9482
9483 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9484 {
9485 const char* name = aarch64_hint_options[i].name;
9486 const char* upper_name = get_upper_str(name);
9487
9488 checked_hash_insert (aarch64_hint_opt_hsh, name,
9489 (void *) (aarch64_hint_options + i));
9490
9491 /* Also hash the name in the upper case if not the same. */
9492 if (strcmp (name, upper_name) != 0)
9493 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9494 (void *) (aarch64_hint_options + i));
9495 }
9496
9497 /* Set the cpu variant based on the command-line options. */
9498 if (!mcpu_cpu_opt)
9499 mcpu_cpu_opt = march_cpu_opt;
9500
9501 if (!mcpu_cpu_opt)
9502 mcpu_cpu_opt = &cpu_default;
9503
9504 cpu_variant = *mcpu_cpu_opt;
9505
9506 /* Record the CPU type. */
9507 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9508
9509 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9510 }
9511
9512 /* Command line processing. */
9513
9514 const char *md_shortopts = "m:";
9515
9516 #ifdef AARCH64_BI_ENDIAN
9517 #define OPTION_EB (OPTION_MD_BASE + 0)
9518 #define OPTION_EL (OPTION_MD_BASE + 1)
9519 #else
9520 #if TARGET_BYTES_BIG_ENDIAN
9521 #define OPTION_EB (OPTION_MD_BASE + 0)
9522 #else
9523 #define OPTION_EL (OPTION_MD_BASE + 1)
9524 #endif
9525 #endif
9526
9527 struct option md_longopts[] = {
9528 #ifdef OPTION_EB
9529 {"EB", no_argument, NULL, OPTION_EB},
9530 #endif
9531 #ifdef OPTION_EL
9532 {"EL", no_argument, NULL, OPTION_EL},
9533 #endif
9534 {NULL, no_argument, NULL, 0}
9535 };
9536
9537 size_t md_longopts_size = sizeof (md_longopts);
9538
9539 struct aarch64_option_table
9540 {
9541 const char *option; /* Option name to match. */
9542 const char *help; /* Help information. */
9543 int *var; /* Variable to change. */
9544 int value; /* What to change it to. */
9545 char *deprecated; /* If non-null, print this message. */
9546 };
9547
9548 static struct aarch64_option_table aarch64_opts[] = {
9549 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9550 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9551 NULL},
9552 #ifdef DEBUG_AARCH64
9553 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9554 #endif /* DEBUG_AARCH64 */
9555 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9556 NULL},
9557 {"mno-verbose-error", N_("do not output verbose error messages"),
9558 &verbose_error_p, 0, NULL},
9559 {NULL, NULL, NULL, 0, NULL}
9560 };
9561
9562 struct aarch64_cpu_option_table
9563 {
9564 const char *name;
9565 const aarch64_feature_set value;
9566 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9567 case. */
9568 const char *canonical_name;
9569 };
9570
9571 /* This list should, at a minimum, contain all the cpu names
9572 recognized by GCC. */
9573 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9574 {"all", AARCH64_ANY, NULL},
9575 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9576 AARCH64_FEATURE_CRC), "Cortex-A34"},
9577 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9578 AARCH64_FEATURE_CRC), "Cortex-A35"},
9579 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9580 AARCH64_FEATURE_CRC), "Cortex-A53"},
9581 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9582 AARCH64_FEATURE_CRC), "Cortex-A57"},
9583 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9584 AARCH64_FEATURE_CRC), "Cortex-A72"},
9585 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9586 AARCH64_FEATURE_CRC), "Cortex-A73"},
9587 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9588 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9589 "Cortex-A55"},
9590 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9591 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9592 "Cortex-A75"},
9593 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9594 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9595 "Cortex-A76"},
9596 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9597 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9598 | AARCH64_FEATURE_DOTPROD
9599 | AARCH64_FEATURE_SSBS),
9600 "Cortex-A76AE"},
9601 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9602 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9603 | AARCH64_FEATURE_DOTPROD
9604 | AARCH64_FEATURE_SSBS),
9605 "Cortex-A77"},
9606 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9607 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9608 | AARCH64_FEATURE_DOTPROD
9609 | AARCH64_FEATURE_SSBS),
9610 "Cortex-A65"},
9611 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9612 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9613 | AARCH64_FEATURE_DOTPROD
9614 | AARCH64_FEATURE_SSBS),
9615 "Cortex-A65AE"},
9616 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9617 AARCH64_FEATURE_F16
9618 | AARCH64_FEATURE_RCPC
9619 | AARCH64_FEATURE_DOTPROD
9620 | AARCH64_FEATURE_SSBS
9621 | AARCH64_FEATURE_PROFILE),
9622 "Cortex-A78"},
9623 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9624 AARCH64_FEATURE_F16
9625 | AARCH64_FEATURE_RCPC
9626 | AARCH64_FEATURE_DOTPROD
9627 | AARCH64_FEATURE_SSBS
9628 | AARCH64_FEATURE_PROFILE),
9629 "Cortex-A78AE"},
9630 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9631 AARCH64_FEATURE_DOTPROD
9632 | AARCH64_FEATURE_F16
9633 | AARCH64_FEATURE_FLAGM
9634 | AARCH64_FEATURE_PAC
9635 | AARCH64_FEATURE_PROFILE
9636 | AARCH64_FEATURE_RCPC
9637 | AARCH64_FEATURE_SSBS),
9638 "Cortex-A78C"},
9639 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9640 AARCH64_FEATURE_BFLOAT16
9641 | AARCH64_FEATURE_I8MM
9642 | AARCH64_FEATURE_MEMTAG
9643 | AARCH64_FEATURE_SVE2_BITPERM),
9644 "Cortex-A510"},
9645 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9646 AARCH64_FEATURE_BFLOAT16
9647 | AARCH64_FEATURE_I8MM
9648 | AARCH64_FEATURE_MEMTAG
9649 | AARCH64_FEATURE_SVE2_BITPERM),
9650 "Cortex-A710"},
9651 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9652 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9653 | AARCH64_FEATURE_DOTPROD
9654 | AARCH64_FEATURE_PROFILE),
9655 "Ares"},
9656 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9657 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9658 "Samsung Exynos M1"},
9659 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9660 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9661 | AARCH64_FEATURE_RDMA),
9662 "Qualcomm Falkor"},
9663 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9664 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9665 | AARCH64_FEATURE_DOTPROD
9666 | AARCH64_FEATURE_SSBS),
9667 "Neoverse E1"},
9668 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9669 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9670 | AARCH64_FEATURE_DOTPROD
9671 | AARCH64_FEATURE_PROFILE),
9672 "Neoverse N1"},
9673 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9674 AARCH64_FEATURE_BFLOAT16
9675 | AARCH64_FEATURE_I8MM
9676 | AARCH64_FEATURE_F16
9677 | AARCH64_FEATURE_SVE
9678 | AARCH64_FEATURE_SVE2
9679 | AARCH64_FEATURE_SVE2_BITPERM
9680 | AARCH64_FEATURE_MEMTAG
9681 | AARCH64_FEATURE_RNG),
9682 "Neoverse N2"},
9683 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9684 AARCH64_FEATURE_PROFILE
9685 | AARCH64_FEATURE_CVADP
9686 | AARCH64_FEATURE_SVE
9687 | AARCH64_FEATURE_SSBS
9688 | AARCH64_FEATURE_RNG
9689 | AARCH64_FEATURE_F16
9690 | AARCH64_FEATURE_BFLOAT16
9691 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9692 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9693 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9694 | AARCH64_FEATURE_RDMA),
9695 "Qualcomm QDF24XX"},
9696 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9697 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9698 "Qualcomm Saphira"},
9699 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9700 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9701 "Cavium ThunderX"},
9702 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9703 AARCH64_FEATURE_CRYPTO),
9704 "Broadcom Vulcan"},
9705 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9706 in earlier releases and is superseded by 'xgene1' in all
9707 tools. */
9708 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9709 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9710 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9711 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9712 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9713 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9714 AARCH64_FEATURE_F16
9715 | AARCH64_FEATURE_RCPC
9716 | AARCH64_FEATURE_DOTPROD
9717 | AARCH64_FEATURE_SSBS
9718 | AARCH64_FEATURE_PROFILE),
9719 "Cortex-X1"},
9720 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9721 AARCH64_FEATURE_BFLOAT16
9722 | AARCH64_FEATURE_I8MM
9723 | AARCH64_FEATURE_MEMTAG
9724 | AARCH64_FEATURE_SVE2_BITPERM),
9725 "Cortex-X2"},
9726 {"generic", AARCH64_ARCH_V8, NULL},
9727
9728 {NULL, AARCH64_ARCH_NONE, NULL}
9729 };
9730
9731 struct aarch64_arch_option_table
9732 {
9733 const char *name;
9734 const aarch64_feature_set value;
9735 };
9736
9737 /* This list should, at a minimum, contain all the architecture names
9738 recognized by GCC. */
9739 static const struct aarch64_arch_option_table aarch64_archs[] = {
9740 {"all", AARCH64_ANY},
9741 {"armv8-a", AARCH64_ARCH_V8},
9742 {"armv8.1-a", AARCH64_ARCH_V8_1},
9743 {"armv8.2-a", AARCH64_ARCH_V8_2},
9744 {"armv8.3-a", AARCH64_ARCH_V8_3},
9745 {"armv8.4-a", AARCH64_ARCH_V8_4},
9746 {"armv8.5-a", AARCH64_ARCH_V8_5},
9747 {"armv8.6-a", AARCH64_ARCH_V8_6},
9748 {"armv8.7-a", AARCH64_ARCH_V8_7},
9749 {"armv8-r", AARCH64_ARCH_V8_R},
9750 {"armv9-a", AARCH64_ARCH_V9},
9751 {NULL, AARCH64_ARCH_NONE}
9752 };
9753
9754 /* ISA extensions. */
9755 struct aarch64_option_cpu_value_table
9756 {
9757 const char *name;
9758 const aarch64_feature_set value;
9759 const aarch64_feature_set require; /* Feature dependencies. */
9760 };
9761
9762 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9763 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9764 AARCH64_ARCH_NONE},
9765 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9766 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9767 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9768 AARCH64_ARCH_NONE},
9769 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9770 AARCH64_ARCH_NONE},
9771 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9772 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9773 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9774 AARCH64_ARCH_NONE},
9775 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9776 AARCH64_ARCH_NONE},
9777 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9778 AARCH64_ARCH_NONE},
9779 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9780 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9781 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9782 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9783 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9784 AARCH64_FEATURE (AARCH64_FEATURE_FP
9785 | AARCH64_FEATURE_F16, 0)},
9786 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9787 AARCH64_ARCH_NONE},
9788 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9789 AARCH64_FEATURE (AARCH64_FEATURE_F16
9790 | AARCH64_FEATURE_SIMD
9791 | AARCH64_FEATURE_COMPNUM, 0)},
9792 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9793 AARCH64_ARCH_NONE},
9794 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9795 AARCH64_FEATURE (AARCH64_FEATURE_F16
9796 | AARCH64_FEATURE_SIMD, 0)},
9797 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9798 AARCH64_ARCH_NONE},
9799 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9800 AARCH64_ARCH_NONE},
9801 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9802 AARCH64_ARCH_NONE},
9803 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9804 AARCH64_ARCH_NONE},
9805 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9806 AARCH64_ARCH_NONE},
9807 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9808 AARCH64_ARCH_NONE},
9809 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9810 AARCH64_ARCH_NONE},
9811 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9812 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9813 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9814 AARCH64_ARCH_NONE},
9815 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9816 AARCH64_ARCH_NONE},
9817 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9818 AARCH64_ARCH_NONE},
9819 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9820 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9821 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9822 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9823 | AARCH64_FEATURE_SM4, 0)},
9824 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9825 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9826 | AARCH64_FEATURE_AES, 0)},
9827 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9828 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9829 | AARCH64_FEATURE_SHA3, 0)},
9830 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9831 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9832 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9833 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9834 | AARCH64_FEATURE_BFLOAT16, 0)},
9835 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9836 AARCH64_FEATURE (AARCH64_FEATURE_SME
9837 | AARCH64_FEATURE_SVE2
9838 | AARCH64_FEATURE_BFLOAT16, 0)},
9839 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9840 AARCH64_FEATURE (AARCH64_FEATURE_SME
9841 | AARCH64_FEATURE_SVE2
9842 | AARCH64_FEATURE_BFLOAT16, 0)},
9843 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9844 AARCH64_ARCH_NONE},
9845 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9846 AARCH64_ARCH_NONE},
9847 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9848 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9849 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9850 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9851 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9852 AARCH64_ARCH_NONE},
9853 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9854 AARCH64_ARCH_NONE},
9855 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9856 AARCH64_ARCH_NONE},
9857 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9858 };
9859
9860 struct aarch64_long_option_table
9861 {
9862 const char *option; /* Substring to match. */
9863 const char *help; /* Help information. */
9864 int (*func) (const char *subopt); /* Function to decode sub-option. */
9865 char *deprecated; /* If non-null, print this message. */
9866 };
9867
9868 /* Transitive closure of features depending on set. */
9869 static aarch64_feature_set
9870 aarch64_feature_disable_set (aarch64_feature_set set)
9871 {
9872 const struct aarch64_option_cpu_value_table *opt;
9873 aarch64_feature_set prev = 0;
9874
9875 while (prev != set) {
9876 prev = set;
9877 for (opt = aarch64_features; opt->name != NULL; opt++)
9878 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9879 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9880 }
9881 return set;
9882 }
9883
9884 /* Transitive closure of dependencies of set. */
9885 static aarch64_feature_set
9886 aarch64_feature_enable_set (aarch64_feature_set set)
9887 {
9888 const struct aarch64_option_cpu_value_table *opt;
9889 aarch64_feature_set prev = 0;
9890
9891 while (prev != set) {
9892 prev = set;
9893 for (opt = aarch64_features; opt->name != NULL; opt++)
9894 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9895 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9896 }
9897 return set;
9898 }
9899
9900 static int
9901 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9902 bool ext_only)
9903 {
9904 /* We insist on extensions being added before being removed. We achieve
9905 this by using the ADDING_VALUE variable to indicate whether we are
9906 adding an extension (1) or removing it (0) and only allowing it to
9907 change in the order -1 -> 1 -> 0. */
9908 int adding_value = -1;
9909 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9910
9911 /* Copy the feature set, so that we can modify it. */
9912 *ext_set = **opt_p;
9913 *opt_p = ext_set;
9914
9915 while (str != NULL && *str != 0)
9916 {
9917 const struct aarch64_option_cpu_value_table *opt;
9918 const char *ext = NULL;
9919 int optlen;
9920
9921 if (!ext_only)
9922 {
9923 if (*str != '+')
9924 {
9925 as_bad (_("invalid architectural extension"));
9926 return 0;
9927 }
9928
9929 ext = strchr (++str, '+');
9930 }
9931
9932 if (ext != NULL)
9933 optlen = ext - str;
9934 else
9935 optlen = strlen (str);
9936
9937 if (optlen >= 2 && startswith (str, "no"))
9938 {
9939 if (adding_value != 0)
9940 adding_value = 0;
9941 optlen -= 2;
9942 str += 2;
9943 }
9944 else if (optlen > 0)
9945 {
9946 if (adding_value == -1)
9947 adding_value = 1;
9948 else if (adding_value != 1)
9949 {
9950 as_bad (_("must specify extensions to add before specifying "
9951 "those to remove"));
9952 return false;
9953 }
9954 }
9955
9956 if (optlen == 0)
9957 {
9958 as_bad (_("missing architectural extension"));
9959 return 0;
9960 }
9961
9962 gas_assert (adding_value != -1);
9963
9964 for (opt = aarch64_features; opt->name != NULL; opt++)
9965 if (strncmp (opt->name, str, optlen) == 0)
9966 {
9967 aarch64_feature_set set;
9968
9969 /* Add or remove the extension. */
9970 if (adding_value)
9971 {
9972 set = aarch64_feature_enable_set (opt->value);
9973 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9974 }
9975 else
9976 {
9977 set = aarch64_feature_disable_set (opt->value);
9978 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9979 }
9980 break;
9981 }
9982
9983 if (opt->name == NULL)
9984 {
9985 as_bad (_("unknown architectural extension `%s'"), str);
9986 return 0;
9987 }
9988
9989 str = ext;
9990 };
9991
9992 return 1;
9993 }
9994
9995 static int
9996 aarch64_parse_cpu (const char *str)
9997 {
9998 const struct aarch64_cpu_option_table *opt;
9999 const char *ext = strchr (str, '+');
10000 size_t optlen;
10001
10002 if (ext != NULL)
10003 optlen = ext - str;
10004 else
10005 optlen = strlen (str);
10006
10007 if (optlen == 0)
10008 {
10009 as_bad (_("missing cpu name `%s'"), str);
10010 return 0;
10011 }
10012
10013 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10014 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10015 {
10016 mcpu_cpu_opt = &opt->value;
10017 if (ext != NULL)
10018 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10019
10020 return 1;
10021 }
10022
10023 as_bad (_("unknown cpu `%s'"), str);
10024 return 0;
10025 }
10026
10027 static int
10028 aarch64_parse_arch (const char *str)
10029 {
10030 const struct aarch64_arch_option_table *opt;
10031 const char *ext = strchr (str, '+');
10032 size_t optlen;
10033
10034 if (ext != NULL)
10035 optlen = ext - str;
10036 else
10037 optlen = strlen (str);
10038
10039 if (optlen == 0)
10040 {
10041 as_bad (_("missing architecture name `%s'"), str);
10042 return 0;
10043 }
10044
10045 for (opt = aarch64_archs; opt->name != NULL; opt++)
10046 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10047 {
10048 march_cpu_opt = &opt->value;
10049 if (ext != NULL)
10050 return aarch64_parse_features (ext, &march_cpu_opt, false);
10051
10052 return 1;
10053 }
10054
10055 as_bad (_("unknown architecture `%s'\n"), str);
10056 return 0;
10057 }
10058
10059 /* ABIs. */
10060 struct aarch64_option_abi_value_table
10061 {
10062 const char *name;
10063 enum aarch64_abi_type value;
10064 };
10065
10066 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10067 {"ilp32", AARCH64_ABI_ILP32},
10068 {"lp64", AARCH64_ABI_LP64},
10069 };
10070
10071 static int
10072 aarch64_parse_abi (const char *str)
10073 {
10074 unsigned int i;
10075
10076 if (str[0] == '\0')
10077 {
10078 as_bad (_("missing abi name `%s'"), str);
10079 return 0;
10080 }
10081
10082 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10083 if (strcmp (str, aarch64_abis[i].name) == 0)
10084 {
10085 aarch64_abi = aarch64_abis[i].value;
10086 return 1;
10087 }
10088
10089 as_bad (_("unknown abi `%s'\n"), str);
10090 return 0;
10091 }
10092
10093 static struct aarch64_long_option_table aarch64_long_opts[] = {
10094 #ifdef OBJ_ELF
10095 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10096 aarch64_parse_abi, NULL},
10097 #endif /* OBJ_ELF */
10098 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10099 aarch64_parse_cpu, NULL},
10100 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10101 aarch64_parse_arch, NULL},
10102 {NULL, NULL, 0, NULL}
10103 };
10104
10105 int
10106 md_parse_option (int c, const char *arg)
10107 {
10108 struct aarch64_option_table *opt;
10109 struct aarch64_long_option_table *lopt;
10110
10111 switch (c)
10112 {
10113 #ifdef OPTION_EB
10114 case OPTION_EB:
10115 target_big_endian = 1;
10116 break;
10117 #endif
10118
10119 #ifdef OPTION_EL
10120 case OPTION_EL:
10121 target_big_endian = 0;
10122 break;
10123 #endif
10124
10125 case 'a':
10126 /* Listing option. Just ignore these, we don't support additional
10127 ones. */
10128 return 0;
10129
10130 default:
10131 for (opt = aarch64_opts; opt->option != NULL; opt++)
10132 {
10133 if (c == opt->option[0]
10134 && ((arg == NULL && opt->option[1] == 0)
10135 || streq (arg, opt->option + 1)))
10136 {
10137 /* If the option is deprecated, tell the user. */
10138 if (opt->deprecated != NULL)
10139 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10140 arg ? arg : "", _(opt->deprecated));
10141
10142 if (opt->var != NULL)
10143 *opt->var = opt->value;
10144
10145 return 1;
10146 }
10147 }
10148
10149 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10150 {
10151 /* These options are expected to have an argument. */
10152 if (c == lopt->option[0]
10153 && arg != NULL
10154 && startswith (arg, lopt->option + 1))
10155 {
10156 /* If the option is deprecated, tell the user. */
10157 if (lopt->deprecated != NULL)
10158 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10159 _(lopt->deprecated));
10160
10161 /* Call the sup-option parser. */
10162 return lopt->func (arg + strlen (lopt->option) - 1);
10163 }
10164 }
10165
10166 return 0;
10167 }
10168
10169 return 1;
10170 }
10171
10172 void
10173 md_show_usage (FILE * fp)
10174 {
10175 struct aarch64_option_table *opt;
10176 struct aarch64_long_option_table *lopt;
10177
10178 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10179
10180 for (opt = aarch64_opts; opt->option != NULL; opt++)
10181 if (opt->help != NULL)
10182 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10183
10184 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10185 if (lopt->help != NULL)
10186 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10187
10188 #ifdef OPTION_EB
10189 fprintf (fp, _("\
10190 -EB assemble code for a big-endian cpu\n"));
10191 #endif
10192
10193 #ifdef OPTION_EL
10194 fprintf (fp, _("\
10195 -EL assemble code for a little-endian cpu\n"));
10196 #endif
10197 }
10198
10199 /* Parse a .cpu directive. */
10200
10201 static void
10202 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10203 {
10204 const struct aarch64_cpu_option_table *opt;
10205 char saved_char;
10206 char *name;
10207 char *ext;
10208 size_t optlen;
10209
10210 name = input_line_pointer;
10211 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10212 input_line_pointer++;
10213 saved_char = *input_line_pointer;
10214 *input_line_pointer = 0;
10215
10216 ext = strchr (name, '+');
10217
10218 if (ext != NULL)
10219 optlen = ext - name;
10220 else
10221 optlen = strlen (name);
10222
10223 /* Skip the first "all" entry. */
10224 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10225 if (strlen (opt->name) == optlen
10226 && strncmp (name, opt->name, optlen) == 0)
10227 {
10228 mcpu_cpu_opt = &opt->value;
10229 if (ext != NULL)
10230 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10231 return;
10232
10233 cpu_variant = *mcpu_cpu_opt;
10234
10235 *input_line_pointer = saved_char;
10236 demand_empty_rest_of_line ();
10237 return;
10238 }
10239 as_bad (_("unknown cpu `%s'"), name);
10240 *input_line_pointer = saved_char;
10241 ignore_rest_of_line ();
10242 }
10243
10244
10245 /* Parse a .arch directive. */
10246
10247 static void
10248 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10249 {
10250 const struct aarch64_arch_option_table *opt;
10251 char saved_char;
10252 char *name;
10253 char *ext;
10254 size_t optlen;
10255
10256 name = input_line_pointer;
10257 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10258 input_line_pointer++;
10259 saved_char = *input_line_pointer;
10260 *input_line_pointer = 0;
10261
10262 ext = strchr (name, '+');
10263
10264 if (ext != NULL)
10265 optlen = ext - name;
10266 else
10267 optlen = strlen (name);
10268
10269 /* Skip the first "all" entry. */
10270 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10271 if (strlen (opt->name) == optlen
10272 && strncmp (name, opt->name, optlen) == 0)
10273 {
10274 mcpu_cpu_opt = &opt->value;
10275 if (ext != NULL)
10276 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10277 return;
10278
10279 cpu_variant = *mcpu_cpu_opt;
10280
10281 *input_line_pointer = saved_char;
10282 demand_empty_rest_of_line ();
10283 return;
10284 }
10285
10286 as_bad (_("unknown architecture `%s'\n"), name);
10287 *input_line_pointer = saved_char;
10288 ignore_rest_of_line ();
10289 }
10290
10291 /* Parse a .arch_extension directive. */
10292
10293 static void
10294 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10295 {
10296 char saved_char;
10297 char *ext = input_line_pointer;;
10298
10299 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10300 input_line_pointer++;
10301 saved_char = *input_line_pointer;
10302 *input_line_pointer = 0;
10303
10304 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10305 return;
10306
10307 cpu_variant = *mcpu_cpu_opt;
10308
10309 *input_line_pointer = saved_char;
10310 demand_empty_rest_of_line ();
10311 }
10312
10313 /* Copy symbol information. */
10314
10315 void
10316 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10317 {
10318 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10319 }
10320
10321 #ifdef OBJ_ELF
10322 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10323 This is needed so AArch64 specific st_other values can be independently
10324 specified for an IFUNC resolver (that is called by the dynamic linker)
10325 and the symbol it resolves (aliased to the resolver). In particular,
10326 if a function symbol has special st_other value set via directives,
10327 then attaching an IFUNC resolver to that symbol should not override
10328 the st_other setting. Requiring the directive on the IFUNC resolver
10329 symbol would be unexpected and problematic in C code, where the two
10330 symbols appear as two independent function declarations. */
10331
10332 void
10333 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10334 {
10335 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10336 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10337 if (srcelf->size)
10338 {
10339 if (destelf->size == NULL)
10340 destelf->size = XNEW (expressionS);
10341 *destelf->size = *srcelf->size;
10342 }
10343 else
10344 {
10345 free (destelf->size);
10346 destelf->size = NULL;
10347 }
10348 S_SET_SIZE (dest, S_GET_SIZE (src));
10349 }
10350 #endif