]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
gas: Port "copy st_size only if unset" to aarch64 and riscv
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 /* Diagnostics inline function utilities.
164
165 These are lightweight utilities which should only be called by parse_operands
166 and other parsers. GAS processes each assembly line by parsing it against
167 instruction template(s), in the case of multiple templates (for the same
168 mnemonic name), those templates are tried one by one until one succeeds or
169 all fail. An assembly line may fail a few templates before being
170 successfully parsed; an error saved here in most cases is not a user error
171 but an error indicating the current template is not the right template.
172 Therefore it is very important that errors can be saved at a low cost during
173 the parsing; we don't want to slow down the whole parsing by recording
174 non-user errors in detail.
175
176 Remember that the objective is to help GAS pick up the most appropriate
177 error message in the case of multiple templates, e.g. FMOV which has 8
178 templates. */
179
180 static inline void
181 clear_error (void)
182 {
183 inst.parsing_error.kind = AARCH64_OPDE_NIL;
184 inst.parsing_error.error = NULL;
185 }
186
187 static inline bool
188 error_p (void)
189 {
190 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192
193 static inline const char *
194 get_error_message (void)
195 {
196 return inst.parsing_error.error;
197 }
198
199 static inline enum aarch64_operand_error_kind
200 get_error_kind (void)
201 {
202 return inst.parsing_error.kind;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 inst.parsing_error.kind = kind;
209 inst.parsing_error.error = error;
210 }
211
212 static inline void
213 set_recoverable_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219 the error message. */
220 static inline void
221 set_default_error (void)
222 {
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225
226 static inline void
227 set_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231
232 static inline void
233 set_first_syntax_error (const char *error)
234 {
235 if (! error_p ())
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_fatal_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244 \f
245 /* Return value for certain parsers when the parsing fails; those parsers
246 return the information of the parsed result, e.g. register number, on
247 success. */
248 #define PARSE_FAIL -1
249
250 /* This is an invalid condition code that means no conditional field is
251 present. */
252 #define COND_ALWAYS 0x10
253
254 typedef struct
255 {
256 const char *template;
257 uint32_t value;
258 } asm_nzcv;
259
260 struct reloc_entry
261 {
262 char *name;
263 bfd_reloc_code_real_type reloc;
264 };
265
266 /* Macros to define the register types and masks for the purpose
267 of parsing. */
268
269 #undef AARCH64_REG_TYPES
270 #define AARCH64_REG_TYPES \
271 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
272 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
273 BASIC_REG_TYPE(SP_32) /* wsp */ \
274 BASIC_REG_TYPE(SP_64) /* sp */ \
275 BASIC_REG_TYPE(Z_32) /* wzr */ \
276 BASIC_REG_TYPE(Z_64) /* xzr */ \
277 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
278 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
279 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
280 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
281 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
282 BASIC_REG_TYPE(VN) /* v[0-31] */ \
283 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
284 BASIC_REG_TYPE(PN) /* p[0-15] */ \
285 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
286 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
287 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
288 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
289 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
290 /* Typecheck: same, plus SVE registers. */ \
291 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
294 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: same, plus SVE registers. */ \
297 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
301 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
303 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Typecheck: any [BHSDQ]P FP. */ \
308 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
309 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
310 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
315 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
316 be used for SVE instructions, since Zn and Pn are valid symbols \
317 in other contexts. */ \
318 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
321 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
322 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
323 | REG_TYPE(ZN) | REG_TYPE(PN)) \
324 /* Any integer register; used for error messages only. */ \
325 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
328 /* Pseudo type to mark the end of the enumerator sequence. */ \
329 BASIC_REG_TYPE(MAX)
330
331 #undef BASIC_REG_TYPE
332 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
333 #undef MULTI_REG_TYPE
334 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
335
336 /* Register type enumerators. */
337 typedef enum aarch64_reg_type_
338 {
339 /* A list of REG_TYPE_*. */
340 AARCH64_REG_TYPES
341 } aarch64_reg_type;
342
343 #undef BASIC_REG_TYPE
344 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
345 #undef REG_TYPE
346 #define REG_TYPE(T) (1 << REG_TYPE_##T)
347 #undef MULTI_REG_TYPE
348 #define MULTI_REG_TYPE(T,V) V,
349
350 /* Structure for a hash table entry for a register. */
351 typedef struct
352 {
353 const char *name;
354 unsigned char number;
355 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
356 unsigned char builtin;
357 } reg_entry;
358
359 /* Values indexed by aarch64_reg_type to assist the type checking. */
360 static const unsigned reg_type_masks[] =
361 {
362 AARCH64_REG_TYPES
363 };
364
365 #undef BASIC_REG_TYPE
366 #undef REG_TYPE
367 #undef MULTI_REG_TYPE
368 #undef AARCH64_REG_TYPES
369
370 /* Diagnostics used when we don't get a register of the expected type.
371 Note: this has to synchronized with aarch64_reg_type definitions
372 above. */
373 static const char *
374 get_reg_expected_msg (aarch64_reg_type reg_type)
375 {
376 const char *msg;
377
378 switch (reg_type)
379 {
380 case REG_TYPE_R_32:
381 msg = N_("integer 32-bit register expected");
382 break;
383 case REG_TYPE_R_64:
384 msg = N_("integer 64-bit register expected");
385 break;
386 case REG_TYPE_R_N:
387 msg = N_("integer register expected");
388 break;
389 case REG_TYPE_R64_SP:
390 msg = N_("64-bit integer or SP register expected");
391 break;
392 case REG_TYPE_SVE_BASE:
393 msg = N_("base register expected");
394 break;
395 case REG_TYPE_R_Z:
396 msg = N_("integer or zero register expected");
397 break;
398 case REG_TYPE_SVE_OFFSET:
399 msg = N_("offset register expected");
400 break;
401 case REG_TYPE_R_SP:
402 msg = N_("integer or SP register expected");
403 break;
404 case REG_TYPE_R_Z_SP:
405 msg = N_("integer, zero or SP register expected");
406 break;
407 case REG_TYPE_FP_B:
408 msg = N_("8-bit SIMD scalar register expected");
409 break;
410 case REG_TYPE_FP_H:
411 msg = N_("16-bit SIMD scalar or floating-point half precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_S:
415 msg = N_("32-bit SIMD scalar or floating-point single precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_D:
419 msg = N_("64-bit SIMD scalar or floating-point double precision "
420 "register expected");
421 break;
422 case REG_TYPE_FP_Q:
423 msg = N_("128-bit SIMD scalar or floating-point quad precision "
424 "register expected");
425 break;
426 case REG_TYPE_R_Z_BHSDQ_V:
427 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
428 msg = N_("register expected");
429 break;
430 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
431 msg = N_("SIMD scalar or floating-point register expected");
432 break;
433 case REG_TYPE_VN: /* any V reg */
434 msg = N_("vector register expected");
435 break;
436 case REG_TYPE_ZN:
437 msg = N_("SVE vector register expected");
438 break;
439 case REG_TYPE_PN:
440 msg = N_("SVE predicate register expected");
441 break;
442 default:
443 as_fatal (_("invalid register type %d"), reg_type);
444 }
445 return msg;
446 }
447
448 /* Some well known registers that we refer to directly elsewhere. */
449 #define REG_SP 31
450 #define REG_ZR 31
451
452 /* Instructions take 4 bytes in the object file. */
453 #define INSN_SIZE 4
454
455 static htab_t aarch64_ops_hsh;
456 static htab_t aarch64_cond_hsh;
457 static htab_t aarch64_shift_hsh;
458 static htab_t aarch64_sys_regs_hsh;
459 static htab_t aarch64_pstatefield_hsh;
460 static htab_t aarch64_sys_regs_ic_hsh;
461 static htab_t aarch64_sys_regs_dc_hsh;
462 static htab_t aarch64_sys_regs_at_hsh;
463 static htab_t aarch64_sys_regs_tlbi_hsh;
464 static htab_t aarch64_sys_regs_sr_hsh;
465 static htab_t aarch64_reg_hsh;
466 static htab_t aarch64_barrier_opt_hsh;
467 static htab_t aarch64_nzcv_hsh;
468 static htab_t aarch64_pldop_hsh;
469 static htab_t aarch64_hint_opt_hsh;
470
471 /* Stuff needed to resolve the label ambiguity
472 As:
473 ...
474 label: <insn>
475 may differ from:
476 ...
477 label:
478 <insn> */
479
480 static symbolS *last_label_seen;
481
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
484
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488 expressionS exp;
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
492
493 typedef struct literal_pool
494 {
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
497 unsigned int id;
498 symbolS *symbol;
499 segT section;
500 subsegT sub_section;
501 int size;
502 struct literal_pool *next;
503 } literal_pool;
504
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
507 \f
508 /* Pure syntax. */
509
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
513
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
522
523 const char line_separator_chars[] = ";";
524
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
528
529 /* Chars that mean this number is a floating point constant. */
530 /* As in 0f12.456 */
531 /* or 0d1.2345e12 */
532
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
534
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
537
538 /* Separator character handling. */
539
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
541
542 static inline bool
543 skip_past_char (char **str, char c)
544 {
545 if (**str == c)
546 {
547 (*str)++;
548 return true;
549 }
550 else
551 return false;
552 }
553
554 #define skip_past_comma(str) skip_past_char (str, ',')
555
556 /* Arithmetic expressions (possibly involving symbols). */
557
558 static bool in_aarch64_get_expression = false;
559
560 /* Third argument to aarch64_get_expression. */
561 #define GE_NO_PREFIX false
562 #define GE_OPT_PREFIX true
563
564 /* Fourth argument to aarch64_get_expression. */
565 #define ALLOW_ABSENT false
566 #define REJECT_ABSENT true
567
568 /* Fifth argument to aarch64_get_expression. */
569 #define NORMAL_RESOLUTION false
570
571 /* Return TRUE if the string pointed by *STR is successfully parsed
572 as an valid expression; *EP will be filled with the information of
573 such an expression. Otherwise return FALSE.
574
575 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
576 If REJECT_ABSENT is true then trat missing expressions as an error.
577 If DEFER_RESOLUTION is true, then do not resolve expressions against
578 constant symbols. Necessary if the expression is part of a fixup
579 that uses a reloc that must be emitted. */
580
581 static bool
582 aarch64_get_expression (expressionS * ep,
583 char ** str,
584 bool allow_immediate_prefix,
585 bool reject_absent,
586 bool defer_resolution)
587 {
588 char *save_in;
589 segT seg;
590 bool prefix_present = false;
591
592 if (allow_immediate_prefix)
593 {
594 if (is_immediate_prefix (**str))
595 {
596 (*str)++;
597 prefix_present = true;
598 }
599 }
600
601 memset (ep, 0, sizeof (expressionS));
602
603 save_in = input_line_pointer;
604 input_line_pointer = *str;
605 in_aarch64_get_expression = true;
606 if (defer_resolution)
607 seg = deferred_expression (ep);
608 else
609 seg = expression (ep);
610 in_aarch64_get_expression = false;
611
612 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
613 {
614 /* We found a bad expression in md_operand(). */
615 *str = input_line_pointer;
616 input_line_pointer = save_in;
617 if (prefix_present && ! error_p ())
618 set_fatal_syntax_error (_("bad expression"));
619 else
620 set_first_syntax_error (_("bad expression"));
621 return false;
622 }
623
624 #ifdef OBJ_AOUT
625 if (seg != absolute_section
626 && seg != text_section
627 && seg != data_section
628 && seg != bss_section
629 && seg != undefined_section)
630 {
631 set_syntax_error (_("bad segment"));
632 *str = input_line_pointer;
633 input_line_pointer = save_in;
634 return false;
635 }
636 #else
637 (void) seg;
638 #endif
639
640 *str = input_line_pointer;
641 input_line_pointer = save_in;
642 return true;
643 }
644
645 /* Turn a string in input_line_pointer into a floating point constant
646 of type TYPE, and store the appropriate bytes in *LITP. The number
647 of LITTLENUMS emitted is stored in *SIZEP. An error message is
648 returned, or NULL on OK. */
649
650 const char *
651 md_atof (int type, char *litP, int *sizeP)
652 {
653 return ieee_md_atof (type, litP, sizeP, target_big_endian);
654 }
655
656 /* We handle all bad expressions here, so that we can report the faulty
657 instruction in the error message. */
658 void
659 md_operand (expressionS * exp)
660 {
661 if (in_aarch64_get_expression)
662 exp->X_op = O_illegal;
663 }
664
665 /* Immediate values. */
666
667 /* Errors may be set multiple times during parsing or bit encoding
668 (particularly in the Neon bits), but usually the earliest error which is set
669 will be the most meaningful. Avoid overwriting it with later (cascading)
670 errors by calling this function. */
671
672 static void
673 first_error (const char *error)
674 {
675 if (! error_p ())
676 set_syntax_error (error);
677 }
678
679 /* Similar to first_error, but this function accepts formatted error
680 message. */
681 static void
682 first_error_fmt (const char *format, ...)
683 {
684 va_list args;
685 enum
686 { size = 100 };
687 /* N.B. this single buffer will not cause error messages for different
688 instructions to pollute each other; this is because at the end of
689 processing of each assembly line, error message if any will be
690 collected by as_bad. */
691 static char buffer[size];
692
693 if (! error_p ())
694 {
695 int ret ATTRIBUTE_UNUSED;
696 va_start (args, format);
697 ret = vsnprintf (buffer, size, format, args);
698 know (ret <= size - 1 && ret >= 0);
699 va_end (args);
700 set_syntax_error (buffer);
701 }
702 }
703
704 /* Register parsing. */
705
706 /* Generic register parser which is called by other specialized
707 register parsers.
708 CCP points to what should be the beginning of a register name.
709 If it is indeed a valid register name, advance CCP over it and
710 return the reg_entry structure; otherwise return NULL.
711 It does not issue diagnostics. */
712
713 static reg_entry *
714 parse_reg (char **ccp)
715 {
716 char *start = *ccp;
717 char *p;
718 reg_entry *reg;
719
720 #ifdef REGISTER_PREFIX
721 if (*start != REGISTER_PREFIX)
722 return NULL;
723 start++;
724 #endif
725
726 p = start;
727 if (!ISALPHA (*p) || !is_name_beginner (*p))
728 return NULL;
729
730 do
731 p++;
732 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
733
734 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
735
736 if (!reg)
737 return NULL;
738
739 *ccp = p;
740 return reg;
741 }
742
743 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
744 return FALSE. */
745 static bool
746 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
747 {
748 return (reg_type_masks[type] & (1 << reg->type)) != 0;
749 }
750
751 /* Try to parse a base or offset register. Allow SVE base and offset
752 registers if REG_TYPE includes SVE registers. Return the register
753 entry on success, setting *QUALIFIER to the register qualifier.
754 Return null otherwise.
755
756 Note that this function does not issue any diagnostics. */
757
758 static const reg_entry *
759 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
760 aarch64_opnd_qualifier_t *qualifier)
761 {
762 char *str = *ccp;
763 const reg_entry *reg = parse_reg (&str);
764
765 if (reg == NULL)
766 return NULL;
767
768 switch (reg->type)
769 {
770 case REG_TYPE_R_32:
771 case REG_TYPE_SP_32:
772 case REG_TYPE_Z_32:
773 *qualifier = AARCH64_OPND_QLF_W;
774 break;
775
776 case REG_TYPE_R_64:
777 case REG_TYPE_SP_64:
778 case REG_TYPE_Z_64:
779 *qualifier = AARCH64_OPND_QLF_X;
780 break;
781
782 case REG_TYPE_ZN:
783 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
784 || str[0] != '.')
785 return NULL;
786 switch (TOLOWER (str[1]))
787 {
788 case 's':
789 *qualifier = AARCH64_OPND_QLF_S_S;
790 break;
791 case 'd':
792 *qualifier = AARCH64_OPND_QLF_S_D;
793 break;
794 default:
795 return NULL;
796 }
797 str += 2;
798 break;
799
800 default:
801 return NULL;
802 }
803
804 *ccp = str;
805
806 return reg;
807 }
808
809 /* Try to parse a base or offset register. Return the register entry
810 on success, setting *QUALIFIER to the register qualifier. Return null
811 otherwise.
812
813 Note that this function does not issue any diagnostics. */
814
815 static const reg_entry *
816 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
817 {
818 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
819 }
820
821 /* Parse the qualifier of a vector register or vector element of type
822 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
823 succeeds; otherwise return FALSE.
824
825 Accept only one occurrence of:
826 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
827 b h s d q */
828 static bool
829 parse_vector_type_for_operand (aarch64_reg_type reg_type,
830 struct vector_type_el *parsed_type, char **str)
831 {
832 char *ptr = *str;
833 unsigned width;
834 unsigned element_size;
835 enum vector_el_type type;
836
837 /* skip '.' */
838 gas_assert (*ptr == '.');
839 ptr++;
840
841 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
842 {
843 width = 0;
844 goto elt_size;
845 }
846 width = strtoul (ptr, &ptr, 10);
847 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
848 {
849 first_error_fmt (_("bad size %d in vector width specifier"), width);
850 return false;
851 }
852
853 elt_size:
854 switch (TOLOWER (*ptr))
855 {
856 case 'b':
857 type = NT_b;
858 element_size = 8;
859 break;
860 case 'h':
861 type = NT_h;
862 element_size = 16;
863 break;
864 case 's':
865 type = NT_s;
866 element_size = 32;
867 break;
868 case 'd':
869 type = NT_d;
870 element_size = 64;
871 break;
872 case 'q':
873 if (reg_type == REG_TYPE_ZN || width == 1)
874 {
875 type = NT_q;
876 element_size = 128;
877 break;
878 }
879 /* fall through. */
880 default:
881 if (*ptr != '\0')
882 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
883 else
884 first_error (_("missing element size"));
885 return false;
886 }
887 if (width != 0 && width * element_size != 64
888 && width * element_size != 128
889 && !(width == 2 && element_size == 16)
890 && !(width == 4 && element_size == 8))
891 {
892 first_error_fmt (_
893 ("invalid element size %d and vector size combination %c"),
894 width, *ptr);
895 return false;
896 }
897 ptr++;
898
899 parsed_type->type = type;
900 parsed_type->width = width;
901
902 *str = ptr;
903
904 return true;
905 }
906
907 /* *STR contains an SVE zero/merge predication suffix. Parse it into
908 *PARSED_TYPE and point *STR at the end of the suffix. */
909
910 static bool
911 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
912 {
913 char *ptr = *str;
914
915 /* Skip '/'. */
916 gas_assert (*ptr == '/');
917 ptr++;
918 switch (TOLOWER (*ptr))
919 {
920 case 'z':
921 parsed_type->type = NT_zero;
922 break;
923 case 'm':
924 parsed_type->type = NT_merge;
925 break;
926 default:
927 if (*ptr != '\0' && *ptr != ',')
928 first_error_fmt (_("unexpected character `%c' in predication type"),
929 *ptr);
930 else
931 first_error (_("missing predication type"));
932 return false;
933 }
934 parsed_type->width = 0;
935 *str = ptr + 1;
936 return true;
937 }
938
939 /* Parse a register of the type TYPE.
940
941 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
942 name or the parsed register is not of TYPE.
943
944 Otherwise return the register number, and optionally fill in the actual
945 type of the register in *RTYPE when multiple alternatives were given, and
946 return the register shape and element index information in *TYPEINFO.
947
948 IN_REG_LIST should be set with TRUE if the caller is parsing a register
949 list. */
950
951 static int
952 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
953 struct vector_type_el *typeinfo, bool in_reg_list)
954 {
955 char *str = *ccp;
956 const reg_entry *reg = parse_reg (&str);
957 struct vector_type_el atype;
958 struct vector_type_el parsetype;
959 bool is_typed_vecreg = false;
960
961 atype.defined = 0;
962 atype.type = NT_invtype;
963 atype.width = -1;
964 atype.index = 0;
965
966 if (reg == NULL)
967 {
968 if (typeinfo)
969 *typeinfo = atype;
970 set_default_error ();
971 return PARSE_FAIL;
972 }
973
974 if (! aarch64_check_reg_type (reg, type))
975 {
976 DEBUG_TRACE ("reg type check failed");
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980 type = reg->type;
981
982 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
983 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
984 {
985 if (*str == '.')
986 {
987 if (!parse_vector_type_for_operand (type, &parsetype, &str))
988 return PARSE_FAIL;
989 }
990 else
991 {
992 if (!parse_predication_for_operand (&parsetype, &str))
993 return PARSE_FAIL;
994 }
995
996 /* Register if of the form Vn.[bhsdq]. */
997 is_typed_vecreg = true;
998
999 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1000 {
1001 /* The width is always variable; we don't allow an integer width
1002 to be specified. */
1003 gas_assert (parsetype.width == 0);
1004 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1005 }
1006 else if (parsetype.width == 0)
1007 /* Expect index. In the new scheme we cannot have
1008 Vn.[bhsdq] represent a scalar. Therefore any
1009 Vn.[bhsdq] should have an index following it.
1010 Except in reglists of course. */
1011 atype.defined |= NTA_HASINDEX;
1012 else
1013 atype.defined |= NTA_HASTYPE;
1014
1015 atype.type = parsetype.type;
1016 atype.width = parsetype.width;
1017 }
1018
1019 if (skip_past_char (&str, '['))
1020 {
1021 expressionS exp;
1022
1023 /* Reject Sn[index] syntax. */
1024 if (!is_typed_vecreg)
1025 {
1026 first_error (_("this type of register can't be indexed"));
1027 return PARSE_FAIL;
1028 }
1029
1030 if (in_reg_list)
1031 {
1032 first_error (_("index not allowed inside register list"));
1033 return PARSE_FAIL;
1034 }
1035
1036 atype.defined |= NTA_HASINDEX;
1037
1038 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1039 NORMAL_RESOLUTION);
1040
1041 if (exp.X_op != O_constant)
1042 {
1043 first_error (_("constant expression required"));
1044 return PARSE_FAIL;
1045 }
1046
1047 if (! skip_past_char (&str, ']'))
1048 return PARSE_FAIL;
1049
1050 atype.index = exp.X_add_number;
1051 }
1052 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1053 {
1054 /* Indexed vector register expected. */
1055 first_error (_("indexed vector register expected"));
1056 return PARSE_FAIL;
1057 }
1058
1059 /* A vector reg Vn should be typed or indexed. */
1060 if (type == REG_TYPE_VN && atype.defined == 0)
1061 {
1062 first_error (_("invalid use of vector register"));
1063 }
1064
1065 if (typeinfo)
1066 *typeinfo = atype;
1067
1068 if (rtype)
1069 *rtype = type;
1070
1071 *ccp = str;
1072
1073 return reg->number;
1074 }
1075
1076 /* Parse register.
1077
1078 Return the register number on success; return PARSE_FAIL otherwise.
1079
1080 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1081 the register (e.g. NEON double or quad reg when either has been requested).
1082
1083 If this is a NEON vector register with additional type information, fill
1084 in the struct pointed to by VECTYPE (if non-NULL).
1085
1086 This parser does not handle register list. */
1087
1088 static int
1089 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1090 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1091 {
1092 struct vector_type_el atype;
1093 char *str = *ccp;
1094 int reg = parse_typed_reg (&str, type, rtype, &atype,
1095 /*in_reg_list= */ false);
1096
1097 if (reg == PARSE_FAIL)
1098 return PARSE_FAIL;
1099
1100 if (vectype)
1101 *vectype = atype;
1102
1103 *ccp = str;
1104
1105 return reg;
1106 }
1107
1108 static inline bool
1109 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1110 {
1111 return
1112 e1.type == e2.type
1113 && e1.defined == e2.defined
1114 && e1.width == e2.width && e1.index == e2.index;
1115 }
1116
1117 /* This function parses a list of vector registers of type TYPE.
1118 On success, it returns the parsed register list information in the
1119 following encoded format:
1120
1121 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1122 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1123
1124 The information of the register shape and/or index is returned in
1125 *VECTYPE.
1126
1127 It returns PARSE_FAIL if the register list is invalid.
1128
1129 The list contains one to four registers.
1130 Each register can be one of:
1131 <Vt>.<T>[<index>]
1132 <Vt>.<T>
1133 All <T> should be identical.
1134 All <index> should be identical.
1135 There are restrictions on <Vt> numbers which are checked later
1136 (by reg_list_valid_p). */
1137
1138 static int
1139 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1140 struct vector_type_el *vectype)
1141 {
1142 char *str = *ccp;
1143 int nb_regs;
1144 struct vector_type_el typeinfo, typeinfo_first;
1145 int val, val_range;
1146 int in_range;
1147 int ret_val;
1148 int i;
1149 bool error = false;
1150 bool expect_index = false;
1151
1152 if (*str != '{')
1153 {
1154 set_syntax_error (_("expecting {"));
1155 return PARSE_FAIL;
1156 }
1157 str++;
1158
1159 nb_regs = 0;
1160 typeinfo_first.defined = 0;
1161 typeinfo_first.type = NT_invtype;
1162 typeinfo_first.width = -1;
1163 typeinfo_first.index = 0;
1164 ret_val = 0;
1165 val = -1;
1166 val_range = -1;
1167 in_range = 0;
1168 do
1169 {
1170 if (in_range)
1171 {
1172 str++; /* skip over '-' */
1173 val_range = val;
1174 }
1175 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1176 /*in_reg_list= */ true);
1177 if (val == PARSE_FAIL)
1178 {
1179 set_first_syntax_error (_("invalid vector register in list"));
1180 error = true;
1181 continue;
1182 }
1183 /* reject [bhsd]n */
1184 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1185 {
1186 set_first_syntax_error (_("invalid scalar register in list"));
1187 error = true;
1188 continue;
1189 }
1190
1191 if (typeinfo.defined & NTA_HASINDEX)
1192 expect_index = true;
1193
1194 if (in_range)
1195 {
1196 if (val < val_range)
1197 {
1198 set_first_syntax_error
1199 (_("invalid range in vector register list"));
1200 error = true;
1201 }
1202 val_range++;
1203 }
1204 else
1205 {
1206 val_range = val;
1207 if (nb_regs == 0)
1208 typeinfo_first = typeinfo;
1209 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1210 {
1211 set_first_syntax_error
1212 (_("type mismatch in vector register list"));
1213 error = true;
1214 }
1215 }
1216 if (! error)
1217 for (i = val_range; i <= val; i++)
1218 {
1219 ret_val |= i << (5 * nb_regs);
1220 nb_regs++;
1221 }
1222 in_range = 0;
1223 }
1224 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1225
1226 skip_whitespace (str);
1227 if (*str != '}')
1228 {
1229 set_first_syntax_error (_("end of vector register list not found"));
1230 error = true;
1231 }
1232 str++;
1233
1234 skip_whitespace (str);
1235
1236 if (expect_index)
1237 {
1238 if (skip_past_char (&str, '['))
1239 {
1240 expressionS exp;
1241
1242 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1243 NORMAL_RESOLUTION);
1244 if (exp.X_op != O_constant)
1245 {
1246 set_first_syntax_error (_("constant expression required."));
1247 error = true;
1248 }
1249 if (! skip_past_char (&str, ']'))
1250 error = true;
1251 else
1252 typeinfo_first.index = exp.X_add_number;
1253 }
1254 else
1255 {
1256 set_first_syntax_error (_("expected index"));
1257 error = true;
1258 }
1259 }
1260
1261 if (nb_regs > 4)
1262 {
1263 set_first_syntax_error (_("too many registers in vector register list"));
1264 error = true;
1265 }
1266 else if (nb_regs == 0)
1267 {
1268 set_first_syntax_error (_("empty vector register list"));
1269 error = true;
1270 }
1271
1272 *ccp = str;
1273 if (! error)
1274 *vectype = typeinfo_first;
1275
1276 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1277 }
1278
1279 /* Directives: register aliases. */
1280
1281 static reg_entry *
1282 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1283 {
1284 reg_entry *new;
1285 const char *name;
1286
1287 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1288 {
1289 if (new->builtin)
1290 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1291 str);
1292
1293 /* Only warn about a redefinition if it's not defined as the
1294 same register. */
1295 else if (new->number != number || new->type != type)
1296 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1297
1298 return NULL;
1299 }
1300
1301 name = xstrdup (str);
1302 new = XNEW (reg_entry);
1303
1304 new->name = name;
1305 new->number = number;
1306 new->type = type;
1307 new->builtin = false;
1308
1309 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1310
1311 return new;
1312 }
1313
1314 /* Look for the .req directive. This is of the form:
1315
1316 new_register_name .req existing_register_name
1317
1318 If we find one, or if it looks sufficiently like one that we want to
1319 handle any error here, return TRUE. Otherwise return FALSE. */
1320
1321 static bool
1322 create_register_alias (char *newname, char *p)
1323 {
1324 const reg_entry *old;
1325 char *oldname, *nbuf;
1326 size_t nlen;
1327
1328 /* The input scrubber ensures that whitespace after the mnemonic is
1329 collapsed to single spaces. */
1330 oldname = p;
1331 if (!startswith (oldname, " .req "))
1332 return false;
1333
1334 oldname += 6;
1335 if (*oldname == '\0')
1336 return false;
1337
1338 old = str_hash_find (aarch64_reg_hsh, oldname);
1339 if (!old)
1340 {
1341 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1342 return true;
1343 }
1344
1345 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1346 the desired alias name, and p points to its end. If not, then
1347 the desired alias name is in the global original_case_string. */
1348 #ifdef TC_CASE_SENSITIVE
1349 nlen = p - newname;
1350 #else
1351 newname = original_case_string;
1352 nlen = strlen (newname);
1353 #endif
1354
1355 nbuf = xmemdup0 (newname, nlen);
1356
1357 /* Create aliases under the new name as stated; an all-lowercase
1358 version of the new name; and an all-uppercase version of the new
1359 name. */
1360 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1361 {
1362 for (p = nbuf; *p; p++)
1363 *p = TOUPPER (*p);
1364
1365 if (strncmp (nbuf, newname, nlen))
1366 {
1367 /* If this attempt to create an additional alias fails, do not bother
1368 trying to create the all-lower case alias. We will fail and issue
1369 a second, duplicate error message. This situation arises when the
1370 programmer does something like:
1371 foo .req r0
1372 Foo .req r1
1373 The second .req creates the "Foo" alias but then fails to create
1374 the artificial FOO alias because it has already been created by the
1375 first .req. */
1376 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1377 {
1378 free (nbuf);
1379 return true;
1380 }
1381 }
1382
1383 for (p = nbuf; *p; p++)
1384 *p = TOLOWER (*p);
1385
1386 if (strncmp (nbuf, newname, nlen))
1387 insert_reg_alias (nbuf, old->number, old->type);
1388 }
1389
1390 free (nbuf);
1391 return true;
1392 }
1393
1394 /* Should never be called, as .req goes between the alias and the
1395 register name, not at the beginning of the line. */
1396 static void
1397 s_req (int a ATTRIBUTE_UNUSED)
1398 {
1399 as_bad (_("invalid syntax for .req directive"));
1400 }
1401
1402 /* The .unreq directive deletes an alias which was previously defined
1403 by .req. For example:
1404
1405 my_alias .req r11
1406 .unreq my_alias */
1407
1408 static void
1409 s_unreq (int a ATTRIBUTE_UNUSED)
1410 {
1411 char *name;
1412 char saved_char;
1413
1414 name = input_line_pointer;
1415
1416 while (*input_line_pointer != 0
1417 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1418 ++input_line_pointer;
1419
1420 saved_char = *input_line_pointer;
1421 *input_line_pointer = 0;
1422
1423 if (!*name)
1424 as_bad (_("invalid syntax for .unreq directive"));
1425 else
1426 {
1427 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1428
1429 if (!reg)
1430 as_bad (_("unknown register alias '%s'"), name);
1431 else if (reg->builtin)
1432 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1433 name);
1434 else
1435 {
1436 char *p;
1437 char *nbuf;
1438
1439 str_hash_delete (aarch64_reg_hsh, name);
1440 free ((char *) reg->name);
1441 free (reg);
1442
1443 /* Also locate the all upper case and all lower case versions.
1444 Do not complain if we cannot find one or the other as it
1445 was probably deleted above. */
1446
1447 nbuf = strdup (name);
1448 for (p = nbuf; *p; p++)
1449 *p = TOUPPER (*p);
1450 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1451 if (reg)
1452 {
1453 str_hash_delete (aarch64_reg_hsh, nbuf);
1454 free ((char *) reg->name);
1455 free (reg);
1456 }
1457
1458 for (p = nbuf; *p; p++)
1459 *p = TOLOWER (*p);
1460 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1461 if (reg)
1462 {
1463 str_hash_delete (aarch64_reg_hsh, nbuf);
1464 free ((char *) reg->name);
1465 free (reg);
1466 }
1467
1468 free (nbuf);
1469 }
1470 }
1471
1472 *input_line_pointer = saved_char;
1473 demand_empty_rest_of_line ();
1474 }
1475
1476 /* Directives: Instruction set selection. */
1477
1478 #ifdef OBJ_ELF
1479 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1480 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1481 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1482 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1483
1484 /* Create a new mapping symbol for the transition to STATE. */
1485
1486 static void
1487 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1488 {
1489 symbolS *symbolP;
1490 const char *symname;
1491 int type;
1492
1493 switch (state)
1494 {
1495 case MAP_DATA:
1496 symname = "$d";
1497 type = BSF_NO_FLAGS;
1498 break;
1499 case MAP_INSN:
1500 symname = "$x";
1501 type = BSF_NO_FLAGS;
1502 break;
1503 default:
1504 abort ();
1505 }
1506
1507 symbolP = symbol_new (symname, now_seg, frag, value);
1508 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1509
1510 /* Save the mapping symbols for future reference. Also check that
1511 we do not place two mapping symbols at the same offset within a
1512 frag. We'll handle overlap between frags in
1513 check_mapping_symbols.
1514
1515 If .fill or other data filling directive generates zero sized data,
1516 the mapping symbol for the following code will have the same value
1517 as the one generated for the data filling directive. In this case,
1518 we replace the old symbol with the new one at the same address. */
1519 if (value == 0)
1520 {
1521 if (frag->tc_frag_data.first_map != NULL)
1522 {
1523 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1524 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1525 &symbol_lastP);
1526 }
1527 frag->tc_frag_data.first_map = symbolP;
1528 }
1529 if (frag->tc_frag_data.last_map != NULL)
1530 {
1531 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1532 S_GET_VALUE (symbolP));
1533 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1534 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1535 &symbol_lastP);
1536 }
1537 frag->tc_frag_data.last_map = symbolP;
1538 }
1539
1540 /* We must sometimes convert a region marked as code to data during
1541 code alignment, if an odd number of bytes have to be padded. The
1542 code mapping symbol is pushed to an aligned address. */
1543
1544 static void
1545 insert_data_mapping_symbol (enum mstate state,
1546 valueT value, fragS * frag, offsetT bytes)
1547 {
1548 /* If there was already a mapping symbol, remove it. */
1549 if (frag->tc_frag_data.last_map != NULL
1550 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1551 frag->fr_address + value)
1552 {
1553 symbolS *symp = frag->tc_frag_data.last_map;
1554
1555 if (value == 0)
1556 {
1557 know (frag->tc_frag_data.first_map == symp);
1558 frag->tc_frag_data.first_map = NULL;
1559 }
1560 frag->tc_frag_data.last_map = NULL;
1561 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1562 }
1563
1564 make_mapping_symbol (MAP_DATA, value, frag);
1565 make_mapping_symbol (state, value + bytes, frag);
1566 }
1567
1568 static void mapping_state_2 (enum mstate state, int max_chars);
1569
1570 /* Set the mapping state to STATE. Only call this when about to
1571 emit some STATE bytes to the file. */
1572
1573 void
1574 mapping_state (enum mstate state)
1575 {
1576 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1577
1578 if (state == MAP_INSN)
1579 /* AArch64 instructions require 4-byte alignment. When emitting
1580 instructions into any section, record the appropriate section
1581 alignment. */
1582 record_alignment (now_seg, 2);
1583
1584 if (mapstate == state)
1585 /* The mapping symbol has already been emitted.
1586 There is nothing else to do. */
1587 return;
1588
1589 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1590 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1591 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1592 evaluated later in the next else. */
1593 return;
1594 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1595 {
1596 /* Only add the symbol if the offset is > 0:
1597 if we're at the first frag, check it's size > 0;
1598 if we're not at the first frag, then for sure
1599 the offset is > 0. */
1600 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1601 const int add_symbol = (frag_now != frag_first)
1602 || (frag_now_fix () > 0);
1603
1604 if (add_symbol)
1605 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1606 }
1607 #undef TRANSITION
1608
1609 mapping_state_2 (state, 0);
1610 }
1611
1612 /* Same as mapping_state, but MAX_CHARS bytes have already been
1613 allocated. Put the mapping symbol that far back. */
1614
1615 static void
1616 mapping_state_2 (enum mstate state, int max_chars)
1617 {
1618 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1619
1620 if (!SEG_NORMAL (now_seg))
1621 return;
1622
1623 if (mapstate == state)
1624 /* The mapping symbol has already been emitted.
1625 There is nothing else to do. */
1626 return;
1627
1628 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1629 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1630 }
1631 #else
1632 #define mapping_state(x) /* nothing */
1633 #define mapping_state_2(x, y) /* nothing */
1634 #endif
1635
1636 /* Directives: sectioning and alignment. */
1637
1638 static void
1639 s_bss (int ignore ATTRIBUTE_UNUSED)
1640 {
1641 /* We don't support putting frags in the BSS segment, we fake it by
1642 marking in_bss, then looking at s_skip for clues. */
1643 subseg_set (bss_section, 0);
1644 demand_empty_rest_of_line ();
1645 mapping_state (MAP_DATA);
1646 }
1647
1648 static void
1649 s_even (int ignore ATTRIBUTE_UNUSED)
1650 {
1651 /* Never make frag if expect extra pass. */
1652 if (!need_pass_2)
1653 frag_align (1, 0, 0);
1654
1655 record_alignment (now_seg, 1);
1656
1657 demand_empty_rest_of_line ();
1658 }
1659
1660 /* Directives: Literal pools. */
1661
1662 static literal_pool *
1663 find_literal_pool (int size)
1664 {
1665 literal_pool *pool;
1666
1667 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1668 {
1669 if (pool->section == now_seg
1670 && pool->sub_section == now_subseg && pool->size == size)
1671 break;
1672 }
1673
1674 return pool;
1675 }
1676
1677 static literal_pool *
1678 find_or_make_literal_pool (int size)
1679 {
1680 /* Next literal pool ID number. */
1681 static unsigned int latest_pool_num = 1;
1682 literal_pool *pool;
1683
1684 pool = find_literal_pool (size);
1685
1686 if (pool == NULL)
1687 {
1688 /* Create a new pool. */
1689 pool = XNEW (literal_pool);
1690 if (!pool)
1691 return NULL;
1692
1693 /* Currently we always put the literal pool in the current text
1694 section. If we were generating "small" model code where we
1695 knew that all code and initialised data was within 1MB then
1696 we could output literals to mergeable, read-only data
1697 sections. */
1698
1699 pool->next_free_entry = 0;
1700 pool->section = now_seg;
1701 pool->sub_section = now_subseg;
1702 pool->size = size;
1703 pool->next = list_of_pools;
1704 pool->symbol = NULL;
1705
1706 /* Add it to the list. */
1707 list_of_pools = pool;
1708 }
1709
1710 /* New pools, and emptied pools, will have a NULL symbol. */
1711 if (pool->symbol == NULL)
1712 {
1713 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1714 &zero_address_frag, 0);
1715 pool->id = latest_pool_num++;
1716 }
1717
1718 /* Done. */
1719 return pool;
1720 }
1721
1722 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1723 Return TRUE on success, otherwise return FALSE. */
1724 static bool
1725 add_to_lit_pool (expressionS *exp, int size)
1726 {
1727 literal_pool *pool;
1728 unsigned int entry;
1729
1730 pool = find_or_make_literal_pool (size);
1731
1732 /* Check if this literal value is already in the pool. */
1733 for (entry = 0; entry < pool->next_free_entry; entry++)
1734 {
1735 expressionS * litexp = & pool->literals[entry].exp;
1736
1737 if ((litexp->X_op == exp->X_op)
1738 && (exp->X_op == O_constant)
1739 && (litexp->X_add_number == exp->X_add_number)
1740 && (litexp->X_unsigned == exp->X_unsigned))
1741 break;
1742
1743 if ((litexp->X_op == exp->X_op)
1744 && (exp->X_op == O_symbol)
1745 && (litexp->X_add_number == exp->X_add_number)
1746 && (litexp->X_add_symbol == exp->X_add_symbol)
1747 && (litexp->X_op_symbol == exp->X_op_symbol))
1748 break;
1749 }
1750
1751 /* Do we need to create a new entry? */
1752 if (entry == pool->next_free_entry)
1753 {
1754 if (entry >= MAX_LITERAL_POOL_SIZE)
1755 {
1756 set_syntax_error (_("literal pool overflow"));
1757 return false;
1758 }
1759
1760 pool->literals[entry].exp = *exp;
1761 pool->next_free_entry += 1;
1762 if (exp->X_op == O_big)
1763 {
1764 /* PR 16688: Bignums are held in a single global array. We must
1765 copy and preserve that value now, before it is overwritten. */
1766 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1767 exp->X_add_number);
1768 memcpy (pool->literals[entry].bignum, generic_bignum,
1769 CHARS_PER_LITTLENUM * exp->X_add_number);
1770 }
1771 else
1772 pool->literals[entry].bignum = NULL;
1773 }
1774
1775 exp->X_op = O_symbol;
1776 exp->X_add_number = ((int) entry) * size;
1777 exp->X_add_symbol = pool->symbol;
1778
1779 return true;
1780 }
1781
1782 /* Can't use symbol_new here, so have to create a symbol and then at
1783 a later date assign it a value. That's what these functions do. */
1784
1785 static void
1786 symbol_locate (symbolS * symbolP,
1787 const char *name,/* It is copied, the caller can modify. */
1788 segT segment, /* Segment identifier (SEG_<something>). */
1789 valueT valu, /* Symbol value. */
1790 fragS * frag) /* Associated fragment. */
1791 {
1792 size_t name_length;
1793 char *preserved_copy_of_name;
1794
1795 name_length = strlen (name) + 1; /* +1 for \0. */
1796 obstack_grow (&notes, name, name_length);
1797 preserved_copy_of_name = obstack_finish (&notes);
1798
1799 #ifdef tc_canonicalize_symbol_name
1800 preserved_copy_of_name =
1801 tc_canonicalize_symbol_name (preserved_copy_of_name);
1802 #endif
1803
1804 S_SET_NAME (symbolP, preserved_copy_of_name);
1805
1806 S_SET_SEGMENT (symbolP, segment);
1807 S_SET_VALUE (symbolP, valu);
1808 symbol_clear_list_pointers (symbolP);
1809
1810 symbol_set_frag (symbolP, frag);
1811
1812 /* Link to end of symbol chain. */
1813 {
1814 extern int symbol_table_frozen;
1815
1816 if (symbol_table_frozen)
1817 abort ();
1818 }
1819
1820 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1821
1822 obj_symbol_new_hook (symbolP);
1823
1824 #ifdef tc_symbol_new_hook
1825 tc_symbol_new_hook (symbolP);
1826 #endif
1827
1828 #ifdef DEBUG_SYMS
1829 verify_symbol_chain (symbol_rootP, symbol_lastP);
1830 #endif /* DEBUG_SYMS */
1831 }
1832
1833
1834 static void
1835 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1836 {
1837 unsigned int entry;
1838 literal_pool *pool;
1839 char sym_name[20];
1840 int align;
1841
1842 for (align = 2; align <= 4; align++)
1843 {
1844 int size = 1 << align;
1845
1846 pool = find_literal_pool (size);
1847 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1848 continue;
1849
1850 /* Align pool as you have word accesses.
1851 Only make a frag if we have to. */
1852 if (!need_pass_2)
1853 frag_align (align, 0, 0);
1854
1855 mapping_state (MAP_DATA);
1856
1857 record_alignment (now_seg, align);
1858
1859 sprintf (sym_name, "$$lit_\002%x", pool->id);
1860
1861 symbol_locate (pool->symbol, sym_name, now_seg,
1862 (valueT) frag_now_fix (), frag_now);
1863 symbol_table_insert (pool->symbol);
1864
1865 for (entry = 0; entry < pool->next_free_entry; entry++)
1866 {
1867 expressionS * exp = & pool->literals[entry].exp;
1868
1869 if (exp->X_op == O_big)
1870 {
1871 /* PR 16688: Restore the global bignum value. */
1872 gas_assert (pool->literals[entry].bignum != NULL);
1873 memcpy (generic_bignum, pool->literals[entry].bignum,
1874 CHARS_PER_LITTLENUM * exp->X_add_number);
1875 }
1876
1877 /* First output the expression in the instruction to the pool. */
1878 emit_expr (exp, size); /* .word|.xword */
1879
1880 if (exp->X_op == O_big)
1881 {
1882 free (pool->literals[entry].bignum);
1883 pool->literals[entry].bignum = NULL;
1884 }
1885 }
1886
1887 /* Mark the pool as empty. */
1888 pool->next_free_entry = 0;
1889 pool->symbol = NULL;
1890 }
1891 }
1892
1893 #ifdef OBJ_ELF
1894 /* Forward declarations for functions below, in the MD interface
1895 section. */
1896 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1897 static struct reloc_table_entry * find_reloc_table_entry (char **);
1898
1899 /* Directives: Data. */
1900 /* N.B. the support for relocation suffix in this directive needs to be
1901 implemented properly. */
1902
1903 static void
1904 s_aarch64_elf_cons (int nbytes)
1905 {
1906 expressionS exp;
1907
1908 #ifdef md_flush_pending_output
1909 md_flush_pending_output ();
1910 #endif
1911
1912 if (is_it_end_of_statement ())
1913 {
1914 demand_empty_rest_of_line ();
1915 return;
1916 }
1917
1918 #ifdef md_cons_align
1919 md_cons_align (nbytes);
1920 #endif
1921
1922 mapping_state (MAP_DATA);
1923 do
1924 {
1925 struct reloc_table_entry *reloc;
1926
1927 expression (&exp);
1928
1929 if (exp.X_op != O_symbol)
1930 emit_expr (&exp, (unsigned int) nbytes);
1931 else
1932 {
1933 skip_past_char (&input_line_pointer, '#');
1934 if (skip_past_char (&input_line_pointer, ':'))
1935 {
1936 reloc = find_reloc_table_entry (&input_line_pointer);
1937 if (reloc == NULL)
1938 as_bad (_("unrecognized relocation suffix"));
1939 else
1940 as_bad (_("unimplemented relocation suffix"));
1941 ignore_rest_of_line ();
1942 return;
1943 }
1944 else
1945 emit_expr (&exp, (unsigned int) nbytes);
1946 }
1947 }
1948 while (*input_line_pointer++ == ',');
1949
1950 /* Put terminator back into stream. */
1951 input_line_pointer--;
1952 demand_empty_rest_of_line ();
1953 }
1954
1955 /* Mark symbol that it follows a variant PCS convention. */
1956
1957 static void
1958 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1959 {
1960 char *name;
1961 char c;
1962 symbolS *sym;
1963 asymbol *bfdsym;
1964 elf_symbol_type *elfsym;
1965
1966 c = get_symbol_name (&name);
1967 if (!*name)
1968 as_bad (_("Missing symbol name in directive"));
1969 sym = symbol_find_or_make (name);
1970 restore_line_pointer (c);
1971 demand_empty_rest_of_line ();
1972 bfdsym = symbol_get_bfdsym (sym);
1973 elfsym = elf_symbol_from (bfdsym);
1974 gas_assert (elfsym);
1975 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1976 }
1977 #endif /* OBJ_ELF */
1978
1979 /* Output a 32-bit word, but mark as an instruction. */
1980
1981 static void
1982 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1983 {
1984 expressionS exp;
1985 unsigned n = 0;
1986
1987 #ifdef md_flush_pending_output
1988 md_flush_pending_output ();
1989 #endif
1990
1991 if (is_it_end_of_statement ())
1992 {
1993 demand_empty_rest_of_line ();
1994 return;
1995 }
1996
1997 /* Sections are assumed to start aligned. In executable section, there is no
1998 MAP_DATA symbol pending. So we only align the address during
1999 MAP_DATA --> MAP_INSN transition.
2000 For other sections, this is not guaranteed. */
2001 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2002 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2003 frag_align_code (2, 0);
2004
2005 #ifdef OBJ_ELF
2006 mapping_state (MAP_INSN);
2007 #endif
2008
2009 do
2010 {
2011 expression (&exp);
2012 if (exp.X_op != O_constant)
2013 {
2014 as_bad (_("constant expression required"));
2015 ignore_rest_of_line ();
2016 return;
2017 }
2018
2019 if (target_big_endian)
2020 {
2021 unsigned int val = exp.X_add_number;
2022 exp.X_add_number = SWAP_32 (val);
2023 }
2024 emit_expr (&exp, INSN_SIZE);
2025 ++n;
2026 }
2027 while (*input_line_pointer++ == ',');
2028
2029 dwarf2_emit_insn (n * INSN_SIZE);
2030
2031 /* Put terminator back into stream. */
2032 input_line_pointer--;
2033 demand_empty_rest_of_line ();
2034 }
2035
2036 static void
2037 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2038 {
2039 demand_empty_rest_of_line ();
2040 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2041 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2042 }
2043
2044 #ifdef OBJ_ELF
2045 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2046
2047 static void
2048 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2049 {
2050 expressionS exp;
2051
2052 expression (&exp);
2053 frag_grow (4);
2054 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2055 BFD_RELOC_AARCH64_TLSDESC_ADD);
2056
2057 demand_empty_rest_of_line ();
2058 }
2059
2060 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2061
2062 static void
2063 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2064 {
2065 expressionS exp;
2066
2067 /* Since we're just labelling the code, there's no need to define a
2068 mapping symbol. */
2069 expression (&exp);
2070 /* Make sure there is enough room in this frag for the following
2071 blr. This trick only works if the blr follows immediately after
2072 the .tlsdesc directive. */
2073 frag_grow (4);
2074 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2075 BFD_RELOC_AARCH64_TLSDESC_CALL);
2076
2077 demand_empty_rest_of_line ();
2078 }
2079
2080 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2081
2082 static void
2083 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2084 {
2085 expressionS exp;
2086
2087 expression (&exp);
2088 frag_grow (4);
2089 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2090 BFD_RELOC_AARCH64_TLSDESC_LDR);
2091
2092 demand_empty_rest_of_line ();
2093 }
2094 #endif /* OBJ_ELF */
2095
2096 static void s_aarch64_arch (int);
2097 static void s_aarch64_cpu (int);
2098 static void s_aarch64_arch_extension (int);
2099
2100 /* This table describes all the machine specific pseudo-ops the assembler
2101 has to support. The fields are:
2102 pseudo-op name without dot
2103 function to call to execute this pseudo-op
2104 Integer arg to pass to the function. */
2105
2106 const pseudo_typeS md_pseudo_table[] = {
2107 /* Never called because '.req' does not start a line. */
2108 {"req", s_req, 0},
2109 {"unreq", s_unreq, 0},
2110 {"bss", s_bss, 0},
2111 {"even", s_even, 0},
2112 {"ltorg", s_ltorg, 0},
2113 {"pool", s_ltorg, 0},
2114 {"cpu", s_aarch64_cpu, 0},
2115 {"arch", s_aarch64_arch, 0},
2116 {"arch_extension", s_aarch64_arch_extension, 0},
2117 {"inst", s_aarch64_inst, 0},
2118 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2119 #ifdef OBJ_ELF
2120 {"tlsdescadd", s_tlsdescadd, 0},
2121 {"tlsdesccall", s_tlsdesccall, 0},
2122 {"tlsdescldr", s_tlsdescldr, 0},
2123 {"word", s_aarch64_elf_cons, 4},
2124 {"long", s_aarch64_elf_cons, 4},
2125 {"xword", s_aarch64_elf_cons, 8},
2126 {"dword", s_aarch64_elf_cons, 8},
2127 {"variant_pcs", s_variant_pcs, 0},
2128 #endif
2129 {"float16", float_cons, 'h'},
2130 {"bfloat16", float_cons, 'b'},
2131 {0, 0, 0}
2132 };
2133 \f
2134
2135 /* Check whether STR points to a register name followed by a comma or the
2136 end of line; REG_TYPE indicates which register types are checked
2137 against. Return TRUE if STR is such a register name; otherwise return
2138 FALSE. The function does not intend to produce any diagnostics, but since
2139 the register parser aarch64_reg_parse, which is called by this function,
2140 does produce diagnostics, we call clear_error to clear any diagnostics
2141 that may be generated by aarch64_reg_parse.
2142 Also, the function returns FALSE directly if there is any user error
2143 present at the function entry. This prevents the existing diagnostics
2144 state from being spoiled.
2145 The function currently serves parse_constant_immediate and
2146 parse_big_immediate only. */
2147 static bool
2148 reg_name_p (char *str, aarch64_reg_type reg_type)
2149 {
2150 int reg;
2151
2152 /* Prevent the diagnostics state from being spoiled. */
2153 if (error_p ())
2154 return false;
2155
2156 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2157
2158 /* Clear the parsing error that may be set by the reg parser. */
2159 clear_error ();
2160
2161 if (reg == PARSE_FAIL)
2162 return false;
2163
2164 skip_whitespace (str);
2165 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2166 return true;
2167
2168 return false;
2169 }
2170
2171 /* Parser functions used exclusively in instruction operands. */
2172
2173 /* Parse an immediate expression which may not be constant.
2174
2175 To prevent the expression parser from pushing a register name
2176 into the symbol table as an undefined symbol, firstly a check is
2177 done to find out whether STR is a register of type REG_TYPE followed
2178 by a comma or the end of line. Return FALSE if STR is such a string. */
2179
2180 static bool
2181 parse_immediate_expression (char **str, expressionS *exp,
2182 aarch64_reg_type reg_type)
2183 {
2184 if (reg_name_p (*str, reg_type))
2185 {
2186 set_recoverable_error (_("immediate operand required"));
2187 return false;
2188 }
2189
2190 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2191 NORMAL_RESOLUTION);
2192
2193 if (exp->X_op == O_absent)
2194 {
2195 set_fatal_syntax_error (_("missing immediate expression"));
2196 return false;
2197 }
2198
2199 return true;
2200 }
2201
2202 /* Constant immediate-value read function for use in insn parsing.
2203 STR points to the beginning of the immediate (with the optional
2204 leading #); *VAL receives the value. REG_TYPE says which register
2205 names should be treated as registers rather than as symbolic immediates.
2206
2207 Return TRUE on success; otherwise return FALSE. */
2208
2209 static bool
2210 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2211 {
2212 expressionS exp;
2213
2214 if (! parse_immediate_expression (str, &exp, reg_type))
2215 return false;
2216
2217 if (exp.X_op != O_constant)
2218 {
2219 set_syntax_error (_("constant expression required"));
2220 return false;
2221 }
2222
2223 *val = exp.X_add_number;
2224 return true;
2225 }
2226
2227 static uint32_t
2228 encode_imm_float_bits (uint32_t imm)
2229 {
2230 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2231 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2232 }
2233
2234 /* Return TRUE if the single-precision floating-point value encoded in IMM
2235 can be expressed in the AArch64 8-bit signed floating-point format with
2236 3-bit exponent and normalized 4 bits of precision; in other words, the
2237 floating-point value must be expressable as
2238 (+/-) n / 16 * power (2, r)
2239 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2240
2241 static bool
2242 aarch64_imm_float_p (uint32_t imm)
2243 {
2244 /* If a single-precision floating-point value has the following bit
2245 pattern, it can be expressed in the AArch64 8-bit floating-point
2246 format:
2247
2248 3 32222222 2221111111111
2249 1 09876543 21098765432109876543210
2250 n Eeeeeexx xxxx0000000000000000000
2251
2252 where n, e and each x are either 0 or 1 independently, with
2253 E == ~ e. */
2254
2255 uint32_t pattern;
2256
2257 /* Prepare the pattern for 'Eeeeee'. */
2258 if (((imm >> 30) & 0x1) == 0)
2259 pattern = 0x3e000000;
2260 else
2261 pattern = 0x40000000;
2262
2263 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2264 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2265 }
2266
2267 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2268 as an IEEE float without any loss of precision. Store the value in
2269 *FPWORD if so. */
2270
2271 static bool
2272 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2273 {
2274 /* If a double-precision floating-point value has the following bit
2275 pattern, it can be expressed in a float:
2276
2277 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2278 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2279 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2280
2281 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2282 if Eeee_eeee != 1111_1111
2283
2284 where n, e, s and S are either 0 or 1 independently and where ~ is the
2285 inverse of E. */
2286
2287 uint32_t pattern;
2288 uint32_t high32 = imm >> 32;
2289 uint32_t low32 = imm;
2290
2291 /* Lower 29 bits need to be 0s. */
2292 if ((imm & 0x1fffffff) != 0)
2293 return false;
2294
2295 /* Prepare the pattern for 'Eeeeeeeee'. */
2296 if (((high32 >> 30) & 0x1) == 0)
2297 pattern = 0x38000000;
2298 else
2299 pattern = 0x40000000;
2300
2301 /* Check E~~~. */
2302 if ((high32 & 0x78000000) != pattern)
2303 return false;
2304
2305 /* Check Eeee_eeee != 1111_1111. */
2306 if ((high32 & 0x7ff00000) == 0x47f00000)
2307 return false;
2308
2309 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2310 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2311 | (low32 >> 29)); /* 3 S bits. */
2312 return true;
2313 }
2314
2315 /* Return true if we should treat OPERAND as a double-precision
2316 floating-point operand rather than a single-precision one. */
2317 static bool
2318 double_precision_operand_p (const aarch64_opnd_info *operand)
2319 {
2320 /* Check for unsuffixed SVE registers, which are allowed
2321 for LDR and STR but not in instructions that require an
2322 immediate. We get better error messages if we arbitrarily
2323 pick one size, parse the immediate normally, and then
2324 report the match failure in the normal way. */
2325 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2326 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2327 }
2328
2329 /* Parse a floating-point immediate. Return TRUE on success and return the
2330 value in *IMMED in the format of IEEE754 single-precision encoding.
2331 *CCP points to the start of the string; DP_P is TRUE when the immediate
2332 is expected to be in double-precision (N.B. this only matters when
2333 hexadecimal representation is involved). REG_TYPE says which register
2334 names should be treated as registers rather than as symbolic immediates.
2335
2336 This routine accepts any IEEE float; it is up to the callers to reject
2337 invalid ones. */
2338
2339 static bool
2340 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2341 aarch64_reg_type reg_type)
2342 {
2343 char *str = *ccp;
2344 char *fpnum;
2345 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2346 int64_t val = 0;
2347 unsigned fpword = 0;
2348 bool hex_p = false;
2349
2350 skip_past_char (&str, '#');
2351
2352 fpnum = str;
2353 skip_whitespace (fpnum);
2354
2355 if (startswith (fpnum, "0x"))
2356 {
2357 /* Support the hexadecimal representation of the IEEE754 encoding.
2358 Double-precision is expected when DP_P is TRUE, otherwise the
2359 representation should be in single-precision. */
2360 if (! parse_constant_immediate (&str, &val, reg_type))
2361 goto invalid_fp;
2362
2363 if (dp_p)
2364 {
2365 if (!can_convert_double_to_float (val, &fpword))
2366 goto invalid_fp;
2367 }
2368 else if ((uint64_t) val > 0xffffffff)
2369 goto invalid_fp;
2370 else
2371 fpword = val;
2372
2373 hex_p = true;
2374 }
2375 else if (reg_name_p (str, reg_type))
2376 {
2377 set_recoverable_error (_("immediate operand required"));
2378 return false;
2379 }
2380
2381 if (! hex_p)
2382 {
2383 int i;
2384
2385 if ((str = atof_ieee (str, 's', words)) == NULL)
2386 goto invalid_fp;
2387
2388 /* Our FP word must be 32 bits (single-precision FP). */
2389 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2390 {
2391 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2392 fpword |= words[i];
2393 }
2394 }
2395
2396 *immed = fpword;
2397 *ccp = str;
2398 return true;
2399
2400 invalid_fp:
2401 set_fatal_syntax_error (_("invalid floating-point constant"));
2402 return false;
2403 }
2404
2405 /* Less-generic immediate-value read function with the possibility of loading
2406 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2407 instructions.
2408
2409 To prevent the expression parser from pushing a register name into the
2410 symbol table as an undefined symbol, a check is firstly done to find
2411 out whether STR is a register of type REG_TYPE followed by a comma or
2412 the end of line. Return FALSE if STR is such a register. */
2413
2414 static bool
2415 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2416 {
2417 char *ptr = *str;
2418
2419 if (reg_name_p (ptr, reg_type))
2420 {
2421 set_syntax_error (_("immediate operand required"));
2422 return false;
2423 }
2424
2425 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2426 NORMAL_RESOLUTION);
2427
2428 if (inst.reloc.exp.X_op == O_constant)
2429 *imm = inst.reloc.exp.X_add_number;
2430
2431 *str = ptr;
2432
2433 return true;
2434 }
2435
2436 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2437 if NEED_LIBOPCODES is non-zero, the fixup will need
2438 assistance from the libopcodes. */
2439
2440 static inline void
2441 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2442 const aarch64_opnd_info *operand,
2443 int need_libopcodes_p)
2444 {
2445 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2446 reloc->opnd = operand->type;
2447 if (need_libopcodes_p)
2448 reloc->need_libopcodes_p = 1;
2449 };
2450
2451 /* Return TRUE if the instruction needs to be fixed up later internally by
2452 the GAS; otherwise return FALSE. */
2453
2454 static inline bool
2455 aarch64_gas_internal_fixup_p (void)
2456 {
2457 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2458 }
2459
2460 /* Assign the immediate value to the relevant field in *OPERAND if
2461 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2462 needs an internal fixup in a later stage.
2463 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2464 IMM.VALUE that may get assigned with the constant. */
2465 static inline void
2466 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2467 aarch64_opnd_info *operand,
2468 int addr_off_p,
2469 int need_libopcodes_p,
2470 int skip_p)
2471 {
2472 if (reloc->exp.X_op == O_constant)
2473 {
2474 if (addr_off_p)
2475 operand->addr.offset.imm = reloc->exp.X_add_number;
2476 else
2477 operand->imm.value = reloc->exp.X_add_number;
2478 reloc->type = BFD_RELOC_UNUSED;
2479 }
2480 else
2481 {
2482 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2483 /* Tell libopcodes to ignore this operand or not. This is helpful
2484 when one of the operands needs to be fixed up later but we need
2485 libopcodes to check the other operands. */
2486 operand->skip = skip_p;
2487 }
2488 }
2489
2490 /* Relocation modifiers. Each entry in the table contains the textual
2491 name for the relocation which may be placed before a symbol used as
2492 a load/store offset, or add immediate. It must be surrounded by a
2493 leading and trailing colon, for example:
2494
2495 ldr x0, [x1, #:rello:varsym]
2496 add x0, x1, #:rello:varsym */
2497
2498 struct reloc_table_entry
2499 {
2500 const char *name;
2501 int pc_rel;
2502 bfd_reloc_code_real_type adr_type;
2503 bfd_reloc_code_real_type adrp_type;
2504 bfd_reloc_code_real_type movw_type;
2505 bfd_reloc_code_real_type add_type;
2506 bfd_reloc_code_real_type ldst_type;
2507 bfd_reloc_code_real_type ld_literal_type;
2508 };
2509
2510 static struct reloc_table_entry reloc_table[] =
2511 {
2512 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2513 {"lo12", 0,
2514 0, /* adr_type */
2515 0,
2516 0,
2517 BFD_RELOC_AARCH64_ADD_LO12,
2518 BFD_RELOC_AARCH64_LDST_LO12,
2519 0},
2520
2521 /* Higher 21 bits of pc-relative page offset: ADRP */
2522 {"pg_hi21", 1,
2523 0, /* adr_type */
2524 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2525 0,
2526 0,
2527 0,
2528 0},
2529
2530 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2531 {"pg_hi21_nc", 1,
2532 0, /* adr_type */
2533 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2534 0,
2535 0,
2536 0,
2537 0},
2538
2539 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2540 {"abs_g0", 0,
2541 0, /* adr_type */
2542 0,
2543 BFD_RELOC_AARCH64_MOVW_G0,
2544 0,
2545 0,
2546 0},
2547
2548 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2549 {"abs_g0_s", 0,
2550 0, /* adr_type */
2551 0,
2552 BFD_RELOC_AARCH64_MOVW_G0_S,
2553 0,
2554 0,
2555 0},
2556
2557 /* Less significant bits 0-15 of address/value: MOVK, no check */
2558 {"abs_g0_nc", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G0_NC,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2567 {"abs_g1", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G1,
2571 0,
2572 0,
2573 0},
2574
2575 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2576 {"abs_g1_s", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G1_S,
2580 0,
2581 0,
2582 0},
2583
2584 /* Less significant bits 16-31 of address/value: MOVK, no check */
2585 {"abs_g1_nc", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_G1_NC,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2594 {"abs_g2", 0,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_G2,
2598 0,
2599 0,
2600 0},
2601
2602 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2603 {"abs_g2_s", 0,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_G2_S,
2607 0,
2608 0,
2609 0},
2610
2611 /* Less significant bits 32-47 of address/value: MOVK, no check */
2612 {"abs_g2_nc", 0,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_G2_NC,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2621 {"abs_g3", 0,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_G3,
2625 0,
2626 0,
2627 0},
2628
2629 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2630 {"prel_g0", 1,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2639 {"prel_g0_nc", 1,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2643 0,
2644 0,
2645 0},
2646
2647 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2648 {"prel_g1", 1,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2657 {"prel_g1_nc", 1,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2661 0,
2662 0,
2663 0},
2664
2665 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2666 {"prel_g2", 1,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2675 {"prel_g2_nc", 1,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2679 0,
2680 0,
2681 0},
2682
2683 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2684 {"prel_g3", 1,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2688 0,
2689 0,
2690 0},
2691
2692 /* Get to the page containing GOT entry for a symbol. */
2693 {"got", 1,
2694 0, /* adr_type */
2695 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2696 0,
2697 0,
2698 0,
2699 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2700
2701 /* 12 bit offset into the page containing GOT entry for that symbol. */
2702 {"got_lo12", 0,
2703 0, /* adr_type */
2704 0,
2705 0,
2706 0,
2707 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2708 0},
2709
2710 /* 0-15 bits of address/value: MOVk, no check. */
2711 {"gotoff_g0_nc", 0,
2712 0, /* adr_type */
2713 0,
2714 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2715 0,
2716 0,
2717 0},
2718
2719 /* Most significant bits 16-31 of address/value: MOVZ. */
2720 {"gotoff_g1", 0,
2721 0, /* adr_type */
2722 0,
2723 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2724 0,
2725 0,
2726 0},
2727
2728 /* 15 bit offset into the page containing GOT entry for that symbol. */
2729 {"gotoff_lo15", 0,
2730 0, /* adr_type */
2731 0,
2732 0,
2733 0,
2734 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2735 0},
2736
2737 /* Get to the page containing GOT TLS entry for a symbol */
2738 {"gottprel_g0_nc", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2742 0,
2743 0,
2744 0},
2745
2746 /* Get to the page containing GOT TLS entry for a symbol */
2747 {"gottprel_g1", 0,
2748 0, /* adr_type */
2749 0,
2750 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2751 0,
2752 0,
2753 0},
2754
2755 /* Get to the page containing GOT TLS entry for a symbol */
2756 {"tlsgd", 0,
2757 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2758 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2759 0,
2760 0,
2761 0,
2762 0},
2763
2764 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2765 {"tlsgd_lo12", 0,
2766 0, /* adr_type */
2767 0,
2768 0,
2769 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2770 0,
2771 0},
2772
2773 /* Lower 16 bits address/value: MOVk. */
2774 {"tlsgd_g0_nc", 0,
2775 0, /* adr_type */
2776 0,
2777 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2778 0,
2779 0,
2780 0},
2781
2782 /* Most significant bits 16-31 of address/value: MOVZ. */
2783 {"tlsgd_g1", 0,
2784 0, /* adr_type */
2785 0,
2786 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2787 0,
2788 0,
2789 0},
2790
2791 /* Get to the page containing GOT TLS entry for a symbol */
2792 {"tlsdesc", 0,
2793 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2794 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2795 0,
2796 0,
2797 0,
2798 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2799
2800 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2801 {"tlsdesc_lo12", 0,
2802 0, /* adr_type */
2803 0,
2804 0,
2805 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2806 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol.
2810 The same as GD, we allocate two consecutive GOT slots
2811 for module index and module offset, the only difference
2812 with GD is the module offset should be initialized to
2813 zero without any outstanding runtime relocation. */
2814 {"tlsldm", 0,
2815 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2816 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2817 0,
2818 0,
2819 0,
2820 0},
2821
2822 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2823 {"tlsldm_lo12_nc", 0,
2824 0, /* adr_type */
2825 0,
2826 0,
2827 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2828 0,
2829 0},
2830
2831 /* 12 bit offset into the module TLS base address. */
2832 {"dtprel_lo12", 0,
2833 0, /* adr_type */
2834 0,
2835 0,
2836 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2837 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2838 0},
2839
2840 /* Same as dtprel_lo12, no overflow check. */
2841 {"dtprel_lo12_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 0,
2845 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2846 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2847 0},
2848
2849 /* bits[23:12] of offset to the module TLS base address. */
2850 {"dtprel_hi12", 0,
2851 0, /* adr_type */
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2855 0,
2856 0},
2857
2858 /* bits[15:0] of offset to the module TLS base address. */
2859 {"dtprel_g0", 0,
2860 0, /* adr_type */
2861 0,
2862 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2863 0,
2864 0,
2865 0},
2866
2867 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2868 {"dtprel_g0_nc", 0,
2869 0, /* adr_type */
2870 0,
2871 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2872 0,
2873 0,
2874 0},
2875
2876 /* bits[31:16] of offset to the module TLS base address. */
2877 {"dtprel_g1", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2881 0,
2882 0,
2883 0},
2884
2885 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2886 {"dtprel_g1_nc", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2890 0,
2891 0,
2892 0},
2893
2894 /* bits[47:32] of offset to the module TLS base address. */
2895 {"dtprel_g2", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2899 0,
2900 0,
2901 0},
2902
2903 /* Lower 16 bit offset into GOT entry for a symbol */
2904 {"tlsdesc_off_g0_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* Higher 16 bit offset into GOT entry for a symbol */
2913 {"tlsdesc_off_g1", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2917 0,
2918 0,
2919 0},
2920
2921 /* Get to the page containing GOT TLS entry for a symbol */
2922 {"gottprel", 0,
2923 0, /* adr_type */
2924 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2925 0,
2926 0,
2927 0,
2928 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2929
2930 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2931 {"gottprel_lo12", 0,
2932 0, /* adr_type */
2933 0,
2934 0,
2935 0,
2936 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2937 0},
2938
2939 /* Get tp offset for a symbol. */
2940 {"tprel", 0,
2941 0, /* adr_type */
2942 0,
2943 0,
2944 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2945 0,
2946 0},
2947
2948 /* Get tp offset for a symbol. */
2949 {"tprel_lo12", 0,
2950 0, /* adr_type */
2951 0,
2952 0,
2953 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2954 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2955 0},
2956
2957 /* Get tp offset for a symbol. */
2958 {"tprel_hi12", 0,
2959 0, /* adr_type */
2960 0,
2961 0,
2962 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2963 0,
2964 0},
2965
2966 /* Get tp offset for a symbol. */
2967 {"tprel_lo12_nc", 0,
2968 0, /* adr_type */
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2972 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2973 0},
2974
2975 /* Most significant bits 32-47 of address/value: MOVZ. */
2976 {"tprel_g2", 0,
2977 0, /* adr_type */
2978 0,
2979 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2980 0,
2981 0,
2982 0},
2983
2984 /* Most significant bits 16-31 of address/value: MOVZ. */
2985 {"tprel_g1", 0,
2986 0, /* adr_type */
2987 0,
2988 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2989 0,
2990 0,
2991 0},
2992
2993 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2994 {"tprel_g1_nc", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 0-15 of address/value: MOVZ. */
3003 {"tprel_g0", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3007 0,
3008 0,
3009 0},
3010
3011 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3012 {"tprel_g0_nc", 0,
3013 0, /* adr_type */
3014 0,
3015 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3016 0,
3017 0,
3018 0},
3019
3020 /* 15bit offset from got entry to base address of GOT table. */
3021 {"gotpage_lo15", 0,
3022 0,
3023 0,
3024 0,
3025 0,
3026 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3027 0},
3028
3029 /* 14bit offset from got entry to base address of GOT table. */
3030 {"gotpage_lo14", 0,
3031 0,
3032 0,
3033 0,
3034 0,
3035 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3036 0},
3037 };
3038
3039 /* Given the address of a pointer pointing to the textual name of a
3040 relocation as may appear in assembler source, attempt to find its
3041 details in reloc_table. The pointer will be updated to the character
3042 after the trailing colon. On failure, NULL will be returned;
3043 otherwise return the reloc_table_entry. */
3044
3045 static struct reloc_table_entry *
3046 find_reloc_table_entry (char **str)
3047 {
3048 unsigned int i;
3049 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3050 {
3051 int length = strlen (reloc_table[i].name);
3052
3053 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3054 && (*str)[length] == ':')
3055 {
3056 *str += (length + 1);
3057 return &reloc_table[i];
3058 }
3059 }
3060
3061 return NULL;
3062 }
3063
3064 /* Returns 0 if the relocation should never be forced,
3065 1 if the relocation must be forced, and -1 if either
3066 result is OK. */
3067
3068 static signed int
3069 aarch64_force_reloc (unsigned int type)
3070 {
3071 switch (type)
3072 {
3073 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3074 /* Perform these "immediate" internal relocations
3075 even if the symbol is extern or weak. */
3076 return 0;
3077
3078 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3079 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3080 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3081 /* Pseudo relocs that need to be fixed up according to
3082 ilp32_p. */
3083 return 0;
3084
3085 case BFD_RELOC_AARCH64_ADD_LO12:
3086 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3087 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3088 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3089 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3090 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3091 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3092 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3093 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3094 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3095 case BFD_RELOC_AARCH64_LDST128_LO12:
3096 case BFD_RELOC_AARCH64_LDST16_LO12:
3097 case BFD_RELOC_AARCH64_LDST32_LO12:
3098 case BFD_RELOC_AARCH64_LDST64_LO12:
3099 case BFD_RELOC_AARCH64_LDST8_LO12:
3100 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3101 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3102 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3103 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3104 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3105 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3106 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3107 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3108 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3109 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3110 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3111 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3112 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3113 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3114 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3115 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3117 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3118 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3119 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3120 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3121 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3122 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3123 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3124 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3125 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3126 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3127 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3133 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3134 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3135 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3138 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3139 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3140 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3142 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3143 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3146 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3147 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3148 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3150 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3151 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3154 /* Always leave these relocations for the linker. */
3155 return 1;
3156
3157 default:
3158 return -1;
3159 }
3160 }
3161
3162 int
3163 aarch64_force_relocation (struct fix *fixp)
3164 {
3165 int res = aarch64_force_reloc (fixp->fx_r_type);
3166
3167 if (res == -1)
3168 return generic_force_reloc (fixp);
3169 return res;
3170 }
3171
3172 /* Mode argument to parse_shift and parser_shifter_operand. */
3173 enum parse_shift_mode
3174 {
3175 SHIFTED_NONE, /* no shifter allowed */
3176 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3177 "#imm{,lsl #n}" */
3178 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3179 "#imm" */
3180 SHIFTED_LSL, /* bare "lsl #n" */
3181 SHIFTED_MUL, /* bare "mul #n" */
3182 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3183 SHIFTED_MUL_VL, /* "mul vl" */
3184 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3185 };
3186
3187 /* Parse a <shift> operator on an AArch64 data processing instruction.
3188 Return TRUE on success; otherwise return FALSE. */
3189 static bool
3190 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3191 {
3192 const struct aarch64_name_value_pair *shift_op;
3193 enum aarch64_modifier_kind kind;
3194 expressionS exp;
3195 int exp_has_prefix;
3196 char *s = *str;
3197 char *p = s;
3198
3199 for (p = *str; ISALPHA (*p); p++)
3200 ;
3201
3202 if (p == *str)
3203 {
3204 set_syntax_error (_("shift expression expected"));
3205 return false;
3206 }
3207
3208 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3209
3210 if (shift_op == NULL)
3211 {
3212 set_syntax_error (_("shift operator expected"));
3213 return false;
3214 }
3215
3216 kind = aarch64_get_operand_modifier (shift_op);
3217
3218 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3219 {
3220 set_syntax_error (_("invalid use of 'MSL'"));
3221 return false;
3222 }
3223
3224 if (kind == AARCH64_MOD_MUL
3225 && mode != SHIFTED_MUL
3226 && mode != SHIFTED_MUL_VL)
3227 {
3228 set_syntax_error (_("invalid use of 'MUL'"));
3229 return false;
3230 }
3231
3232 switch (mode)
3233 {
3234 case SHIFTED_LOGIC_IMM:
3235 if (aarch64_extend_operator_p (kind))
3236 {
3237 set_syntax_error (_("extending shift is not permitted"));
3238 return false;
3239 }
3240 break;
3241
3242 case SHIFTED_ARITH_IMM:
3243 if (kind == AARCH64_MOD_ROR)
3244 {
3245 set_syntax_error (_("'ROR' shift is not permitted"));
3246 return false;
3247 }
3248 break;
3249
3250 case SHIFTED_LSL:
3251 if (kind != AARCH64_MOD_LSL)
3252 {
3253 set_syntax_error (_("only 'LSL' shift is permitted"));
3254 return false;
3255 }
3256 break;
3257
3258 case SHIFTED_MUL:
3259 if (kind != AARCH64_MOD_MUL)
3260 {
3261 set_syntax_error (_("only 'MUL' is permitted"));
3262 return false;
3263 }
3264 break;
3265
3266 case SHIFTED_MUL_VL:
3267 /* "MUL VL" consists of two separate tokens. Require the first
3268 token to be "MUL" and look for a following "VL". */
3269 if (kind == AARCH64_MOD_MUL)
3270 {
3271 skip_whitespace (p);
3272 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3273 {
3274 p += 2;
3275 kind = AARCH64_MOD_MUL_VL;
3276 break;
3277 }
3278 }
3279 set_syntax_error (_("only 'MUL VL' is permitted"));
3280 return false;
3281
3282 case SHIFTED_REG_OFFSET:
3283 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3284 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3285 {
3286 set_fatal_syntax_error
3287 (_("invalid shift for the register offset addressing mode"));
3288 return false;
3289 }
3290 break;
3291
3292 case SHIFTED_LSL_MSL:
3293 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3294 {
3295 set_syntax_error (_("invalid shift operator"));
3296 return false;
3297 }
3298 break;
3299
3300 default:
3301 abort ();
3302 }
3303
3304 /* Whitespace can appear here if the next thing is a bare digit. */
3305 skip_whitespace (p);
3306
3307 /* Parse shift amount. */
3308 exp_has_prefix = 0;
3309 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3310 exp.X_op = O_absent;
3311 else
3312 {
3313 if (is_immediate_prefix (*p))
3314 {
3315 p++;
3316 exp_has_prefix = 1;
3317 }
3318 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3319 NORMAL_RESOLUTION);
3320 }
3321 if (kind == AARCH64_MOD_MUL_VL)
3322 /* For consistency, give MUL VL the same shift amount as an implicit
3323 MUL #1. */
3324 operand->shifter.amount = 1;
3325 else if (exp.X_op == O_absent)
3326 {
3327 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3328 {
3329 set_syntax_error (_("missing shift amount"));
3330 return false;
3331 }
3332 operand->shifter.amount = 0;
3333 }
3334 else if (exp.X_op != O_constant)
3335 {
3336 set_syntax_error (_("constant shift amount required"));
3337 return false;
3338 }
3339 /* For parsing purposes, MUL #n has no inherent range. The range
3340 depends on the operand and will be checked by operand-specific
3341 routines. */
3342 else if (kind != AARCH64_MOD_MUL
3343 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3344 {
3345 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3346 return false;
3347 }
3348 else
3349 {
3350 operand->shifter.amount = exp.X_add_number;
3351 operand->shifter.amount_present = 1;
3352 }
3353
3354 operand->shifter.operator_present = 1;
3355 operand->shifter.kind = kind;
3356
3357 *str = p;
3358 return true;
3359 }
3360
3361 /* Parse a <shifter_operand> for a data processing instruction:
3362
3363 #<immediate>
3364 #<immediate>, LSL #imm
3365
3366 Validation of immediate operands is deferred to md_apply_fix.
3367
3368 Return TRUE on success; otherwise return FALSE. */
3369
3370 static bool
3371 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3372 enum parse_shift_mode mode)
3373 {
3374 char *p;
3375
3376 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3377 return false;
3378
3379 p = *str;
3380
3381 /* Accept an immediate expression. */
3382 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3383 REJECT_ABSENT, NORMAL_RESOLUTION))
3384 return false;
3385
3386 /* Accept optional LSL for arithmetic immediate values. */
3387 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3388 if (! parse_shift (&p, operand, SHIFTED_LSL))
3389 return false;
3390
3391 /* Not accept any shifter for logical immediate values. */
3392 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3393 && parse_shift (&p, operand, mode))
3394 {
3395 set_syntax_error (_("unexpected shift operator"));
3396 return false;
3397 }
3398
3399 *str = p;
3400 return true;
3401 }
3402
3403 /* Parse a <shifter_operand> for a data processing instruction:
3404
3405 <Rm>
3406 <Rm>, <shift>
3407 #<immediate>
3408 #<immediate>, LSL #imm
3409
3410 where <shift> is handled by parse_shift above, and the last two
3411 cases are handled by the function above.
3412
3413 Validation of immediate operands is deferred to md_apply_fix.
3414
3415 Return TRUE on success; otherwise return FALSE. */
3416
3417 static bool
3418 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3419 enum parse_shift_mode mode)
3420 {
3421 const reg_entry *reg;
3422 aarch64_opnd_qualifier_t qualifier;
3423 enum aarch64_operand_class opd_class
3424 = aarch64_get_operand_class (operand->type);
3425
3426 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3427 if (reg)
3428 {
3429 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3430 {
3431 set_syntax_error (_("unexpected register in the immediate operand"));
3432 return false;
3433 }
3434
3435 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3436 {
3437 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3438 return false;
3439 }
3440
3441 operand->reg.regno = reg->number;
3442 operand->qualifier = qualifier;
3443
3444 /* Accept optional shift operation on register. */
3445 if (! skip_past_comma (str))
3446 return true;
3447
3448 if (! parse_shift (str, operand, mode))
3449 return false;
3450
3451 return true;
3452 }
3453 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3454 {
3455 set_syntax_error
3456 (_("integer register expected in the extended/shifted operand "
3457 "register"));
3458 return false;
3459 }
3460
3461 /* We have a shifted immediate variable. */
3462 return parse_shifter_operand_imm (str, operand, mode);
3463 }
3464
3465 /* Return TRUE on success; return FALSE otherwise. */
3466
3467 static bool
3468 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3469 enum parse_shift_mode mode)
3470 {
3471 char *p = *str;
3472
3473 /* Determine if we have the sequence of characters #: or just :
3474 coming next. If we do, then we check for a :rello: relocation
3475 modifier. If we don't, punt the whole lot to
3476 parse_shifter_operand. */
3477
3478 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3479 {
3480 struct reloc_table_entry *entry;
3481
3482 if (p[0] == '#')
3483 p += 2;
3484 else
3485 p++;
3486 *str = p;
3487
3488 /* Try to parse a relocation. Anything else is an error. */
3489 if (!(entry = find_reloc_table_entry (str)))
3490 {
3491 set_syntax_error (_("unknown relocation modifier"));
3492 return false;
3493 }
3494
3495 if (entry->add_type == 0)
3496 {
3497 set_syntax_error
3498 (_("this relocation modifier is not allowed on this instruction"));
3499 return false;
3500 }
3501
3502 /* Save str before we decompose it. */
3503 p = *str;
3504
3505 /* Next, we parse the expression. */
3506 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3507 REJECT_ABSENT,
3508 aarch64_force_reloc (entry->add_type) == 1))
3509 return false;
3510
3511 /* Record the relocation type (use the ADD variant here). */
3512 inst.reloc.type = entry->add_type;
3513 inst.reloc.pc_rel = entry->pc_rel;
3514
3515 /* If str is empty, we've reached the end, stop here. */
3516 if (**str == '\0')
3517 return true;
3518
3519 /* Otherwise, we have a shifted reloc modifier, so rewind to
3520 recover the variable name and continue parsing for the shifter. */
3521 *str = p;
3522 return parse_shifter_operand_imm (str, operand, mode);
3523 }
3524
3525 return parse_shifter_operand (str, operand, mode);
3526 }
3527
3528 /* Parse all forms of an address expression. Information is written
3529 to *OPERAND and/or inst.reloc.
3530
3531 The A64 instruction set has the following addressing modes:
3532
3533 Offset
3534 [base] // in SIMD ld/st structure
3535 [base{,#0}] // in ld/st exclusive
3536 [base{,#imm}]
3537 [base,Xm{,LSL #imm}]
3538 [base,Xm,SXTX {#imm}]
3539 [base,Wm,(S|U)XTW {#imm}]
3540 Pre-indexed
3541 [base]! // in ldraa/ldrab exclusive
3542 [base,#imm]!
3543 Post-indexed
3544 [base],#imm
3545 [base],Xm // in SIMD ld/st structure
3546 PC-relative (literal)
3547 label
3548 SVE:
3549 [base,#imm,MUL VL]
3550 [base,Zm.D{,LSL #imm}]
3551 [base,Zm.S,(S|U)XTW {#imm}]
3552 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3553 [Zn.S,#imm]
3554 [Zn.D,#imm]
3555 [Zn.S{, Xm}]
3556 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3557 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3558 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3559
3560 (As a convenience, the notation "=immediate" is permitted in conjunction
3561 with the pc-relative literal load instructions to automatically place an
3562 immediate value or symbolic address in a nearby literal pool and generate
3563 a hidden label which references it.)
3564
3565 Upon a successful parsing, the address structure in *OPERAND will be
3566 filled in the following way:
3567
3568 .base_regno = <base>
3569 .offset.is_reg // 1 if the offset is a register
3570 .offset.imm = <imm>
3571 .offset.regno = <Rm>
3572
3573 For different addressing modes defined in the A64 ISA:
3574
3575 Offset
3576 .pcrel=0; .preind=1; .postind=0; .writeback=0
3577 Pre-indexed
3578 .pcrel=0; .preind=1; .postind=0; .writeback=1
3579 Post-indexed
3580 .pcrel=0; .preind=0; .postind=1; .writeback=1
3581 PC-relative (literal)
3582 .pcrel=1; .preind=1; .postind=0; .writeback=0
3583
3584 The shift/extension information, if any, will be stored in .shifter.
3585 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3586 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3587 corresponding register.
3588
3589 BASE_TYPE says which types of base register should be accepted and
3590 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3591 is the type of shifter that is allowed for immediate offsets,
3592 or SHIFTED_NONE if none.
3593
3594 In all other respects, it is the caller's responsibility to check
3595 for addressing modes not supported by the instruction, and to set
3596 inst.reloc.type. */
3597
3598 static bool
3599 parse_address_main (char **str, aarch64_opnd_info *operand,
3600 aarch64_opnd_qualifier_t *base_qualifier,
3601 aarch64_opnd_qualifier_t *offset_qualifier,
3602 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3603 enum parse_shift_mode imm_shift_mode)
3604 {
3605 char *p = *str;
3606 const reg_entry *reg;
3607 expressionS *exp = &inst.reloc.exp;
3608
3609 *base_qualifier = AARCH64_OPND_QLF_NIL;
3610 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3611 if (! skip_past_char (&p, '['))
3612 {
3613 /* =immediate or label. */
3614 operand->addr.pcrel = 1;
3615 operand->addr.preind = 1;
3616
3617 /* #:<reloc_op>:<symbol> */
3618 skip_past_char (&p, '#');
3619 if (skip_past_char (&p, ':'))
3620 {
3621 bfd_reloc_code_real_type ty;
3622 struct reloc_table_entry *entry;
3623
3624 /* Try to parse a relocation modifier. Anything else is
3625 an error. */
3626 entry = find_reloc_table_entry (&p);
3627 if (! entry)
3628 {
3629 set_syntax_error (_("unknown relocation modifier"));
3630 return false;
3631 }
3632
3633 switch (operand->type)
3634 {
3635 case AARCH64_OPND_ADDR_PCREL21:
3636 /* adr */
3637 ty = entry->adr_type;
3638 break;
3639
3640 default:
3641 ty = entry->ld_literal_type;
3642 break;
3643 }
3644
3645 if (ty == 0)
3646 {
3647 set_syntax_error
3648 (_("this relocation modifier is not allowed on this "
3649 "instruction"));
3650 return false;
3651 }
3652
3653 /* #:<reloc_op>: */
3654 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3655 aarch64_force_reloc (entry->add_type) == 1))
3656 {
3657 set_syntax_error (_("invalid relocation expression"));
3658 return false;
3659 }
3660 /* #:<reloc_op>:<expr> */
3661 /* Record the relocation type. */
3662 inst.reloc.type = ty;
3663 inst.reloc.pc_rel = entry->pc_rel;
3664 }
3665 else
3666 {
3667 if (skip_past_char (&p, '='))
3668 /* =immediate; need to generate the literal in the literal pool. */
3669 inst.gen_lit_pool = 1;
3670
3671 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3672 NORMAL_RESOLUTION))
3673 {
3674 set_syntax_error (_("invalid address"));
3675 return false;
3676 }
3677 }
3678
3679 *str = p;
3680 return true;
3681 }
3682
3683 /* [ */
3684
3685 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3686 if (!reg || !aarch64_check_reg_type (reg, base_type))
3687 {
3688 set_syntax_error (_(get_reg_expected_msg (base_type)));
3689 return false;
3690 }
3691 operand->addr.base_regno = reg->number;
3692
3693 /* [Xn */
3694 if (skip_past_comma (&p))
3695 {
3696 /* [Xn, */
3697 operand->addr.preind = 1;
3698
3699 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3700 if (reg)
3701 {
3702 if (!aarch64_check_reg_type (reg, offset_type))
3703 {
3704 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3705 return false;
3706 }
3707
3708 /* [Xn,Rm */
3709 operand->addr.offset.regno = reg->number;
3710 operand->addr.offset.is_reg = 1;
3711 /* Shifted index. */
3712 if (skip_past_comma (&p))
3713 {
3714 /* [Xn,Rm, */
3715 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3716 /* Use the diagnostics set in parse_shift, so not set new
3717 error message here. */
3718 return false;
3719 }
3720 /* We only accept:
3721 [base,Xm] # For vector plus scalar SVE2 indexing.
3722 [base,Xm{,LSL #imm}]
3723 [base,Xm,SXTX {#imm}]
3724 [base,Wm,(S|U)XTW {#imm}] */
3725 if (operand->shifter.kind == AARCH64_MOD_NONE
3726 || operand->shifter.kind == AARCH64_MOD_LSL
3727 || operand->shifter.kind == AARCH64_MOD_SXTX)
3728 {
3729 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3730 {
3731 set_syntax_error (_("invalid use of 32-bit register offset"));
3732 return false;
3733 }
3734 if (aarch64_get_qualifier_esize (*base_qualifier)
3735 != aarch64_get_qualifier_esize (*offset_qualifier)
3736 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3737 || *base_qualifier != AARCH64_OPND_QLF_S_S
3738 || *offset_qualifier != AARCH64_OPND_QLF_X))
3739 {
3740 set_syntax_error (_("offset has different size from base"));
3741 return false;
3742 }
3743 }
3744 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3745 {
3746 set_syntax_error (_("invalid use of 64-bit register offset"));
3747 return false;
3748 }
3749 }
3750 else
3751 {
3752 /* [Xn,#:<reloc_op>:<symbol> */
3753 skip_past_char (&p, '#');
3754 if (skip_past_char (&p, ':'))
3755 {
3756 struct reloc_table_entry *entry;
3757
3758 /* Try to parse a relocation modifier. Anything else is
3759 an error. */
3760 if (!(entry = find_reloc_table_entry (&p)))
3761 {
3762 set_syntax_error (_("unknown relocation modifier"));
3763 return false;
3764 }
3765
3766 if (entry->ldst_type == 0)
3767 {
3768 set_syntax_error
3769 (_("this relocation modifier is not allowed on this "
3770 "instruction"));
3771 return false;
3772 }
3773
3774 /* [Xn,#:<reloc_op>: */
3775 /* We now have the group relocation table entry corresponding to
3776 the name in the assembler source. Next, we parse the
3777 expression. */
3778 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3779 aarch64_force_reloc (entry->add_type) == 1))
3780 {
3781 set_syntax_error (_("invalid relocation expression"));
3782 return false;
3783 }
3784
3785 /* [Xn,#:<reloc_op>:<expr> */
3786 /* Record the load/store relocation type. */
3787 inst.reloc.type = entry->ldst_type;
3788 inst.reloc.pc_rel = entry->pc_rel;
3789 }
3790 else
3791 {
3792 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3793 NORMAL_RESOLUTION))
3794 {
3795 set_syntax_error (_("invalid expression in the address"));
3796 return false;
3797 }
3798 /* [Xn,<expr> */
3799 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3800 /* [Xn,<expr>,<shifter> */
3801 if (! parse_shift (&p, operand, imm_shift_mode))
3802 return false;
3803 }
3804 }
3805 }
3806
3807 if (! skip_past_char (&p, ']'))
3808 {
3809 set_syntax_error (_("']' expected"));
3810 return false;
3811 }
3812
3813 if (skip_past_char (&p, '!'))
3814 {
3815 if (operand->addr.preind && operand->addr.offset.is_reg)
3816 {
3817 set_syntax_error (_("register offset not allowed in pre-indexed "
3818 "addressing mode"));
3819 return false;
3820 }
3821 /* [Xn]! */
3822 operand->addr.writeback = 1;
3823 }
3824 else if (skip_past_comma (&p))
3825 {
3826 /* [Xn], */
3827 operand->addr.postind = 1;
3828 operand->addr.writeback = 1;
3829
3830 if (operand->addr.preind)
3831 {
3832 set_syntax_error (_("cannot combine pre- and post-indexing"));
3833 return false;
3834 }
3835
3836 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3837 if (reg)
3838 {
3839 /* [Xn],Xm */
3840 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3841 {
3842 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3843 return false;
3844 }
3845
3846 operand->addr.offset.regno = reg->number;
3847 operand->addr.offset.is_reg = 1;
3848 }
3849 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3850 NORMAL_RESOLUTION))
3851 {
3852 /* [Xn],#expr */
3853 set_syntax_error (_("invalid expression in the address"));
3854 return false;
3855 }
3856 }
3857
3858 /* If at this point neither .preind nor .postind is set, we have a
3859 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3860 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3861 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3862 [Zn.<T>, xzr]. */
3863 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3864 {
3865 if (operand->addr.writeback)
3866 {
3867 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3868 {
3869 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3870 operand->addr.offset.is_reg = 0;
3871 operand->addr.offset.imm = 0;
3872 operand->addr.preind = 1;
3873 }
3874 else
3875 {
3876 /* Reject [Rn]! */
3877 set_syntax_error (_("missing offset in the pre-indexed address"));
3878 return false;
3879 }
3880 }
3881 else
3882 {
3883 operand->addr.preind = 1;
3884 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3885 {
3886 operand->addr.offset.is_reg = 1;
3887 operand->addr.offset.regno = REG_ZR;
3888 *offset_qualifier = AARCH64_OPND_QLF_X;
3889 }
3890 else
3891 {
3892 inst.reloc.exp.X_op = O_constant;
3893 inst.reloc.exp.X_add_number = 0;
3894 }
3895 }
3896 }
3897
3898 *str = p;
3899 return true;
3900 }
3901
3902 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3903 on success. */
3904 static bool
3905 parse_address (char **str, aarch64_opnd_info *operand)
3906 {
3907 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3908 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3909 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3910 }
3911
3912 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3913 The arguments have the same meaning as for parse_address_main.
3914 Return TRUE on success. */
3915 static bool
3916 parse_sve_address (char **str, aarch64_opnd_info *operand,
3917 aarch64_opnd_qualifier_t *base_qualifier,
3918 aarch64_opnd_qualifier_t *offset_qualifier)
3919 {
3920 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3921 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3922 SHIFTED_MUL_VL);
3923 }
3924
3925 /* Parse a register X0-X30. The register must be 64-bit and register 31
3926 is unallocated. */
3927 static bool
3928 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3929 {
3930 const reg_entry *reg = parse_reg (str);
3931 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3932 {
3933 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3934 return false;
3935 }
3936 operand->reg.regno = reg->number;
3937 operand->qualifier = AARCH64_OPND_QLF_X;
3938 return true;
3939 }
3940
3941 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3942 Return TRUE on success; otherwise return FALSE. */
3943 static bool
3944 parse_half (char **str, int *internal_fixup_p)
3945 {
3946 char *p = *str;
3947
3948 skip_past_char (&p, '#');
3949
3950 gas_assert (internal_fixup_p);
3951 *internal_fixup_p = 0;
3952
3953 if (*p == ':')
3954 {
3955 struct reloc_table_entry *entry;
3956
3957 /* Try to parse a relocation. Anything else is an error. */
3958 ++p;
3959
3960 if (!(entry = find_reloc_table_entry (&p)))
3961 {
3962 set_syntax_error (_("unknown relocation modifier"));
3963 return false;
3964 }
3965
3966 if (entry->movw_type == 0)
3967 {
3968 set_syntax_error
3969 (_("this relocation modifier is not allowed on this instruction"));
3970 return false;
3971 }
3972
3973 inst.reloc.type = entry->movw_type;
3974 }
3975 else
3976 *internal_fixup_p = 1;
3977
3978 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3979 aarch64_force_reloc (inst.reloc.type) == 1))
3980 return false;
3981
3982 *str = p;
3983 return true;
3984 }
3985
3986 /* Parse an operand for an ADRP instruction:
3987 ADRP <Xd>, <label>
3988 Return TRUE on success; otherwise return FALSE. */
3989
3990 static bool
3991 parse_adrp (char **str)
3992 {
3993 char *p;
3994
3995 p = *str;
3996 if (*p == ':')
3997 {
3998 struct reloc_table_entry *entry;
3999
4000 /* Try to parse a relocation. Anything else is an error. */
4001 ++p;
4002 if (!(entry = find_reloc_table_entry (&p)))
4003 {
4004 set_syntax_error (_("unknown relocation modifier"));
4005 return false;
4006 }
4007
4008 if (entry->adrp_type == 0)
4009 {
4010 set_syntax_error
4011 (_("this relocation modifier is not allowed on this instruction"));
4012 return false;
4013 }
4014
4015 inst.reloc.type = entry->adrp_type;
4016 }
4017 else
4018 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4019
4020 inst.reloc.pc_rel = 1;
4021 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4022 aarch64_force_reloc (inst.reloc.type) == 1))
4023 return false;
4024 *str = p;
4025 return true;
4026 }
4027
4028 /* Miscellaneous. */
4029
4030 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4031 of SIZE tokens in which index I gives the token for field value I,
4032 or is null if field value I is invalid. REG_TYPE says which register
4033 names should be treated as registers rather than as symbolic immediates.
4034
4035 Return true on success, moving *STR past the operand and storing the
4036 field value in *VAL. */
4037
4038 static int
4039 parse_enum_string (char **str, int64_t *val, const char *const *array,
4040 size_t size, aarch64_reg_type reg_type)
4041 {
4042 expressionS exp;
4043 char *p, *q;
4044 size_t i;
4045
4046 /* Match C-like tokens. */
4047 p = q = *str;
4048 while (ISALNUM (*q))
4049 q++;
4050
4051 for (i = 0; i < size; ++i)
4052 if (array[i]
4053 && strncasecmp (array[i], p, q - p) == 0
4054 && array[i][q - p] == 0)
4055 {
4056 *val = i;
4057 *str = q;
4058 return true;
4059 }
4060
4061 if (!parse_immediate_expression (&p, &exp, reg_type))
4062 return false;
4063
4064 if (exp.X_op == O_constant
4065 && (uint64_t) exp.X_add_number < size)
4066 {
4067 *val = exp.X_add_number;
4068 *str = p;
4069 return true;
4070 }
4071
4072 /* Use the default error for this operand. */
4073 return false;
4074 }
4075
4076 /* Parse an option for a preload instruction. Returns the encoding for the
4077 option, or PARSE_FAIL. */
4078
4079 static int
4080 parse_pldop (char **str)
4081 {
4082 char *p, *q;
4083 const struct aarch64_name_value_pair *o;
4084
4085 p = q = *str;
4086 while (ISALNUM (*q))
4087 q++;
4088
4089 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4090 if (!o)
4091 return PARSE_FAIL;
4092
4093 *str = q;
4094 return o->value;
4095 }
4096
4097 /* Parse an option for a barrier instruction. Returns the encoding for the
4098 option, or PARSE_FAIL. */
4099
4100 static int
4101 parse_barrier (char **str)
4102 {
4103 char *p, *q;
4104 const struct aarch64_name_value_pair *o;
4105
4106 p = q = *str;
4107 while (ISALPHA (*q))
4108 q++;
4109
4110 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4111 if (!o)
4112 return PARSE_FAIL;
4113
4114 *str = q;
4115 return o->value;
4116 }
4117
4118 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4119 return 0 if successful. Otherwise return PARSE_FAIL. */
4120
4121 static int
4122 parse_barrier_psb (char **str,
4123 const struct aarch64_name_value_pair ** hint_opt)
4124 {
4125 char *p, *q;
4126 const struct aarch64_name_value_pair *o;
4127
4128 p = q = *str;
4129 while (ISALPHA (*q))
4130 q++;
4131
4132 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4133 if (!o)
4134 {
4135 set_fatal_syntax_error
4136 ( _("unknown or missing option to PSB/TSB"));
4137 return PARSE_FAIL;
4138 }
4139
4140 if (o->value != 0x11)
4141 {
4142 /* PSB only accepts option name 'CSYNC'. */
4143 set_syntax_error
4144 (_("the specified option is not accepted for PSB/TSB"));
4145 return PARSE_FAIL;
4146 }
4147
4148 *str = q;
4149 *hint_opt = o;
4150 return 0;
4151 }
4152
4153 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4154 return 0 if successful. Otherwise return PARSE_FAIL. */
4155
4156 static int
4157 parse_bti_operand (char **str,
4158 const struct aarch64_name_value_pair ** hint_opt)
4159 {
4160 char *p, *q;
4161 const struct aarch64_name_value_pair *o;
4162
4163 p = q = *str;
4164 while (ISALPHA (*q))
4165 q++;
4166
4167 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4168 if (!o)
4169 {
4170 set_fatal_syntax_error
4171 ( _("unknown option to BTI"));
4172 return PARSE_FAIL;
4173 }
4174
4175 switch (o->value)
4176 {
4177 /* Valid BTI operands. */
4178 case HINT_OPD_C:
4179 case HINT_OPD_J:
4180 case HINT_OPD_JC:
4181 break;
4182
4183 default:
4184 set_syntax_error
4185 (_("unknown option to BTI"));
4186 return PARSE_FAIL;
4187 }
4188
4189 *str = q;
4190 *hint_opt = o;
4191 return 0;
4192 }
4193
4194 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4195 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4196 on failure. Format:
4197
4198 REG_TYPE.QUALIFIER
4199
4200 Side effect: Update STR with current parse position of success.
4201 */
4202
4203 static const reg_entry *
4204 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4205 aarch64_opnd_qualifier_t *qualifier)
4206 {
4207 char *q;
4208
4209 reg_entry *reg = parse_reg (str);
4210 if (reg != NULL && reg->type == reg_type)
4211 {
4212 if (!skip_past_char (str, '.'))
4213 {
4214 set_syntax_error (_("missing ZA tile element size separator"));
4215 return NULL;
4216 }
4217
4218 q = *str;
4219 switch (TOLOWER (*q))
4220 {
4221 case 'b':
4222 *qualifier = AARCH64_OPND_QLF_S_B;
4223 break;
4224 case 'h':
4225 *qualifier = AARCH64_OPND_QLF_S_H;
4226 break;
4227 case 's':
4228 *qualifier = AARCH64_OPND_QLF_S_S;
4229 break;
4230 case 'd':
4231 *qualifier = AARCH64_OPND_QLF_S_D;
4232 break;
4233 case 'q':
4234 *qualifier = AARCH64_OPND_QLF_S_Q;
4235 break;
4236 default:
4237 return NULL;
4238 }
4239 q++;
4240
4241 *str = q;
4242 return reg;
4243 }
4244
4245 return NULL;
4246 }
4247
4248 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4249 Function return tile QUALIFIER on success.
4250
4251 Tiles are in example format: za[0-9]\.[bhsd]
4252
4253 Function returns <ZAda> register number or PARSE_FAIL.
4254 */
4255 static int
4256 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4257 {
4258 int regno;
4259 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4260
4261 if (reg == NULL)
4262 return PARSE_FAIL;
4263 regno = reg->number;
4264
4265 switch (*qualifier)
4266 {
4267 case AARCH64_OPND_QLF_S_B:
4268 if (regno != 0x00)
4269 {
4270 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4271 return PARSE_FAIL;
4272 }
4273 break;
4274 case AARCH64_OPND_QLF_S_H:
4275 if (regno > 0x01)
4276 {
4277 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4278 return PARSE_FAIL;
4279 }
4280 break;
4281 case AARCH64_OPND_QLF_S_S:
4282 if (regno > 0x03)
4283 {
4284 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4285 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4286 return PARSE_FAIL;
4287 }
4288 break;
4289 case AARCH64_OPND_QLF_S_D:
4290 if (regno > 0x07)
4291 {
4292 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4293 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4294 return PARSE_FAIL;
4295 }
4296 break;
4297 default:
4298 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4299 return PARSE_FAIL;
4300 }
4301
4302 return regno;
4303 }
4304
4305 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4306
4307 #<imm>
4308 <imm>
4309
4310 Function return TRUE if immediate was found, or FALSE.
4311 */
4312 static bool
4313 parse_sme_immediate (char **str, int64_t *imm)
4314 {
4315 int64_t val;
4316 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4317 return false;
4318
4319 *imm = val;
4320 return true;
4321 }
4322
4323 /* Parse index with vector select register and immediate:
4324
4325 [<Wv>, <imm>]
4326 [<Wv>, #<imm>]
4327 where <Wv> is in W12-W15 range and # is optional for immediate.
4328
4329 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4330 is set to true.
4331
4332 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4333 IMM output.
4334 */
4335 static bool
4336 parse_sme_za_hv_tiles_operand_index (char **str,
4337 int *vector_select_register,
4338 int64_t *imm)
4339 {
4340 const reg_entry *reg;
4341
4342 if (!skip_past_char (str, '['))
4343 {
4344 set_syntax_error (_("expected '['"));
4345 return false;
4346 }
4347
4348 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4349 reg = parse_reg (str);
4350 if (reg == NULL || reg->type != REG_TYPE_R_32
4351 || reg->number < 12 || reg->number > 15)
4352 {
4353 set_syntax_error (_("expected vector select register W12-W15"));
4354 return false;
4355 }
4356 *vector_select_register = reg->number;
4357
4358 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4359 {
4360 set_syntax_error (_("expected ','"));
4361 return false;
4362 }
4363
4364 if (!parse_sme_immediate (str, imm))
4365 {
4366 set_syntax_error (_("index offset immediate expected"));
4367 return false;
4368 }
4369
4370 if (!skip_past_char (str, ']'))
4371 {
4372 set_syntax_error (_("expected ']'"));
4373 return false;
4374 }
4375
4376 return true;
4377 }
4378
4379 /* Parse SME ZA horizontal or vertical vector access to tiles.
4380 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4381 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4382 contains <Wv> select register and corresponding optional IMMEDIATE.
4383 In addition QUALIFIER is extracted.
4384
4385 Field format examples:
4386
4387 ZA0<HV>.B[<Wv>, #<imm>]
4388 <ZAn><HV>.H[<Wv>, #<imm>]
4389 <ZAn><HV>.S[<Wv>, #<imm>]
4390 <ZAn><HV>.D[<Wv>, #<imm>]
4391 <ZAn><HV>.Q[<Wv>, #<imm>]
4392
4393 Function returns <ZAda> register number or PARSE_FAIL.
4394 */
4395 static int
4396 parse_sme_za_hv_tiles_operand (char **str,
4397 enum sme_hv_slice *slice_indicator,
4398 int *vector_select_register,
4399 int *imm,
4400 aarch64_opnd_qualifier_t *qualifier)
4401 {
4402 char *qh, *qv;
4403 int regno;
4404 int regno_limit;
4405 int64_t imm_limit;
4406 int64_t imm_value;
4407 const reg_entry *reg;
4408
4409 qh = qv = *str;
4410 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4411 {
4412 *slice_indicator = HV_horizontal;
4413 *str = qh;
4414 }
4415 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4416 {
4417 *slice_indicator = HV_vertical;
4418 *str = qv;
4419 }
4420 else
4421 return PARSE_FAIL;
4422 regno = reg->number;
4423
4424 switch (*qualifier)
4425 {
4426 case AARCH64_OPND_QLF_S_B:
4427 regno_limit = 0;
4428 imm_limit = 15;
4429 break;
4430 case AARCH64_OPND_QLF_S_H:
4431 regno_limit = 1;
4432 imm_limit = 7;
4433 break;
4434 case AARCH64_OPND_QLF_S_S:
4435 regno_limit = 3;
4436 imm_limit = 3;
4437 break;
4438 case AARCH64_OPND_QLF_S_D:
4439 regno_limit = 7;
4440 imm_limit = 1;
4441 break;
4442 case AARCH64_OPND_QLF_S_Q:
4443 regno_limit = 15;
4444 imm_limit = 0;
4445 break;
4446 default:
4447 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4448 return PARSE_FAIL;
4449 }
4450
4451 /* Check if destination register ZA tile vector is in range for given
4452 instruction variant. */
4453 if (regno < 0 || regno > regno_limit)
4454 {
4455 set_syntax_error (_("ZA tile vector out of range"));
4456 return PARSE_FAIL;
4457 }
4458
4459 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4460 &imm_value))
4461 return PARSE_FAIL;
4462
4463 /* Check if optional index offset is in the range for instruction
4464 variant. */
4465 if (imm_value < 0 || imm_value > imm_limit)
4466 {
4467 set_syntax_error (_("index offset out of range"));
4468 return PARSE_FAIL;
4469 }
4470
4471 *imm = imm_value;
4472
4473 return regno;
4474 }
4475
4476
4477 static int
4478 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4479 enum sme_hv_slice *slice_indicator,
4480 int *vector_select_register,
4481 int *imm,
4482 aarch64_opnd_qualifier_t *qualifier)
4483 {
4484 int regno;
4485
4486 if (!skip_past_char (str, '{'))
4487 {
4488 set_syntax_error (_("expected '{'"));
4489 return PARSE_FAIL;
4490 }
4491
4492 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4493 vector_select_register, imm,
4494 qualifier);
4495
4496 if (regno == PARSE_FAIL)
4497 return PARSE_FAIL;
4498
4499 if (!skip_past_char (str, '}'))
4500 {
4501 set_syntax_error (_("expected '}'"));
4502 return PARSE_FAIL;
4503 }
4504
4505 return regno;
4506 }
4507
4508 /* Parse list of up to eight 64-bit element tile names separated by commas in
4509 SME's ZERO instruction:
4510
4511 ZERO { <mask> }
4512
4513 Function returns <mask>:
4514
4515 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4516 */
4517 static int
4518 parse_sme_zero_mask(char **str)
4519 {
4520 char *q;
4521 int mask;
4522 aarch64_opnd_qualifier_t qualifier;
4523
4524 mask = 0x00;
4525 q = *str;
4526 do
4527 {
4528 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4529 if (reg)
4530 {
4531 int regno = reg->number;
4532 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4533 {
4534 /* { ZA0.B } is assembled as all-ones immediate. */
4535 mask = 0xff;
4536 }
4537 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4538 mask |= 0x55 << regno;
4539 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4540 mask |= 0x11 << regno;
4541 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4542 mask |= 0x01 << regno;
4543 else
4544 {
4545 set_syntax_error (_("wrong ZA tile element format"));
4546 return PARSE_FAIL;
4547 }
4548 continue;
4549 }
4550 else if (strncasecmp (q, "za", 2) == 0
4551 && !ISALNUM (q[2]))
4552 {
4553 /* { ZA } is assembled as all-ones immediate. */
4554 mask = 0xff;
4555 q += 2;
4556 continue;
4557 }
4558 else
4559 {
4560 set_syntax_error (_("wrong ZA tile element format"));
4561 return PARSE_FAIL;
4562 }
4563 }
4564 while (skip_past_char (&q, ','));
4565
4566 *str = q;
4567 return mask;
4568 }
4569
4570 /* Wraps in curly braces <mask> operand ZERO instruction:
4571
4572 ZERO { <mask> }
4573
4574 Function returns value of <mask> bit-field.
4575 */
4576 static int
4577 parse_sme_list_of_64bit_tiles (char **str)
4578 {
4579 int regno;
4580
4581 if (!skip_past_char (str, '{'))
4582 {
4583 set_syntax_error (_("expected '{'"));
4584 return PARSE_FAIL;
4585 }
4586
4587 /* Empty <mask> list is an all-zeros immediate. */
4588 if (!skip_past_char (str, '}'))
4589 {
4590 regno = parse_sme_zero_mask (str);
4591 if (regno == PARSE_FAIL)
4592 return PARSE_FAIL;
4593
4594 if (!skip_past_char (str, '}'))
4595 {
4596 set_syntax_error (_("expected '}'"));
4597 return PARSE_FAIL;
4598 }
4599 }
4600 else
4601 regno = 0x00;
4602
4603 return regno;
4604 }
4605
4606 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4607 Operand format:
4608
4609 ZA[<Wv>, <imm>]
4610 ZA[<Wv>, #<imm>]
4611
4612 Function returns <Wv> or PARSE_FAIL.
4613 */
4614 static int
4615 parse_sme_za_array (char **str, int *imm)
4616 {
4617 char *p, *q;
4618 int regno;
4619 int64_t imm_value;
4620
4621 p = q = *str;
4622 while (ISALPHA (*q))
4623 q++;
4624
4625 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4626 {
4627 set_syntax_error (_("expected ZA array"));
4628 return PARSE_FAIL;
4629 }
4630
4631 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4632 return PARSE_FAIL;
4633
4634 if (imm_value < 0 || imm_value > 15)
4635 {
4636 set_syntax_error (_("offset out of range"));
4637 return PARSE_FAIL;
4638 }
4639
4640 *imm = imm_value;
4641 *str = q;
4642 return regno;
4643 }
4644
4645 /* Parse streaming mode operand for SMSTART and SMSTOP.
4646
4647 {SM | ZA}
4648
4649 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4650 */
4651 static int
4652 parse_sme_sm_za (char **str)
4653 {
4654 char *p, *q;
4655
4656 p = q = *str;
4657 while (ISALPHA (*q))
4658 q++;
4659
4660 if ((q - p != 2)
4661 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4662 {
4663 set_syntax_error (_("expected SM or ZA operand"));
4664 return PARSE_FAIL;
4665 }
4666
4667 *str = q;
4668 return TOLOWER (p[0]);
4669 }
4670
4671 /* Parse the name of the source scalable predicate register, the index base
4672 register W12-W15 and the element index. Function performs element index
4673 limit checks as well as qualifier type checks.
4674
4675 <Pn>.<T>[<Wv>, <imm>]
4676 <Pn>.<T>[<Wv>, #<imm>]
4677
4678 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4679 <imm> to IMM.
4680 Function returns <Pn>, or PARSE_FAIL.
4681 */
4682 static int
4683 parse_sme_pred_reg_with_index(char **str,
4684 int *index_base_reg,
4685 int *imm,
4686 aarch64_opnd_qualifier_t *qualifier)
4687 {
4688 int regno;
4689 int64_t imm_limit;
4690 int64_t imm_value;
4691 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4692
4693 if (reg == NULL)
4694 return PARSE_FAIL;
4695 regno = reg->number;
4696
4697 switch (*qualifier)
4698 {
4699 case AARCH64_OPND_QLF_S_B:
4700 imm_limit = 15;
4701 break;
4702 case AARCH64_OPND_QLF_S_H:
4703 imm_limit = 7;
4704 break;
4705 case AARCH64_OPND_QLF_S_S:
4706 imm_limit = 3;
4707 break;
4708 case AARCH64_OPND_QLF_S_D:
4709 imm_limit = 1;
4710 break;
4711 default:
4712 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4713 return PARSE_FAIL;
4714 }
4715
4716 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4717 return PARSE_FAIL;
4718
4719 if (imm_value < 0 || imm_value > imm_limit)
4720 {
4721 set_syntax_error (_("element index out of range for given variant"));
4722 return PARSE_FAIL;
4723 }
4724
4725 *imm = imm_value;
4726
4727 return regno;
4728 }
4729
4730 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4731 Returns the encoding for the option, or PARSE_FAIL.
4732
4733 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4734 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4735
4736 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4737 field, otherwise as a system register.
4738 */
4739
4740 static int
4741 parse_sys_reg (char **str, htab_t sys_regs,
4742 int imple_defined_p, int pstatefield_p,
4743 uint32_t* flags)
4744 {
4745 char *p, *q;
4746 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4747 const aarch64_sys_reg *o;
4748 int value;
4749
4750 p = buf;
4751 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4752 if (p < buf + (sizeof (buf) - 1))
4753 *p++ = TOLOWER (*q);
4754 *p = '\0';
4755
4756 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4757 valid system register. This is enforced by construction of the hash
4758 table. */
4759 if (p - buf != q - *str)
4760 return PARSE_FAIL;
4761
4762 o = str_hash_find (sys_regs, buf);
4763 if (!o)
4764 {
4765 if (!imple_defined_p)
4766 return PARSE_FAIL;
4767 else
4768 {
4769 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4770 unsigned int op0, op1, cn, cm, op2;
4771
4772 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4773 != 5)
4774 return PARSE_FAIL;
4775 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4776 return PARSE_FAIL;
4777 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4778 if (flags)
4779 *flags = 0;
4780 }
4781 }
4782 else
4783 {
4784 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4785 as_bad (_("selected processor does not support PSTATE field "
4786 "name '%s'"), buf);
4787 if (!pstatefield_p
4788 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4789 o->value, o->flags, o->features))
4790 as_bad (_("selected processor does not support system register "
4791 "name '%s'"), buf);
4792 if (aarch64_sys_reg_deprecated_p (o->flags))
4793 as_warn (_("system register name '%s' is deprecated and may be "
4794 "removed in a future release"), buf);
4795 value = o->value;
4796 if (flags)
4797 *flags = o->flags;
4798 }
4799
4800 *str = q;
4801 return value;
4802 }
4803
4804 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4805 for the option, or NULL. */
4806
4807 static const aarch64_sys_ins_reg *
4808 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4809 {
4810 char *p, *q;
4811 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4812 const aarch64_sys_ins_reg *o;
4813
4814 p = buf;
4815 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4816 if (p < buf + (sizeof (buf) - 1))
4817 *p++ = TOLOWER (*q);
4818 *p = '\0';
4819
4820 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4821 valid system register. This is enforced by construction of the hash
4822 table. */
4823 if (p - buf != q - *str)
4824 return NULL;
4825
4826 o = str_hash_find (sys_ins_regs, buf);
4827 if (!o)
4828 return NULL;
4829
4830 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4831 o->name, o->value, o->flags, 0))
4832 as_bad (_("selected processor does not support system register "
4833 "name '%s'"), buf);
4834 if (aarch64_sys_reg_deprecated_p (o->flags))
4835 as_warn (_("system register name '%s' is deprecated and may be "
4836 "removed in a future release"), buf);
4837
4838 *str = q;
4839 return o;
4840 }
4841 \f
4842 #define po_char_or_fail(chr) do { \
4843 if (! skip_past_char (&str, chr)) \
4844 goto failure; \
4845 } while (0)
4846
4847 #define po_reg_or_fail(regtype) do { \
4848 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4849 if (val == PARSE_FAIL) \
4850 { \
4851 set_default_error (); \
4852 goto failure; \
4853 } \
4854 } while (0)
4855
4856 #define po_int_reg_or_fail(reg_type) do { \
4857 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4858 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4859 { \
4860 set_default_error (); \
4861 goto failure; \
4862 } \
4863 info->reg.regno = reg->number; \
4864 info->qualifier = qualifier; \
4865 } while (0)
4866
4867 #define po_imm_nc_or_fail() do { \
4868 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4869 goto failure; \
4870 } while (0)
4871
4872 #define po_imm_or_fail(min, max) do { \
4873 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4874 goto failure; \
4875 if (val < min || val > max) \
4876 { \
4877 set_fatal_syntax_error (_("immediate value out of range "\
4878 #min " to "#max)); \
4879 goto failure; \
4880 } \
4881 } while (0)
4882
4883 #define po_enum_or_fail(array) do { \
4884 if (!parse_enum_string (&str, &val, array, \
4885 ARRAY_SIZE (array), imm_reg_type)) \
4886 goto failure; \
4887 } while (0)
4888
4889 #define po_misc_or_fail(expr) do { \
4890 if (!expr) \
4891 goto failure; \
4892 } while (0)
4893 \f
4894 /* encode the 12-bit imm field of Add/sub immediate */
4895 static inline uint32_t
4896 encode_addsub_imm (uint32_t imm)
4897 {
4898 return imm << 10;
4899 }
4900
4901 /* encode the shift amount field of Add/sub immediate */
4902 static inline uint32_t
4903 encode_addsub_imm_shift_amount (uint32_t cnt)
4904 {
4905 return cnt << 22;
4906 }
4907
4908
4909 /* encode the imm field of Adr instruction */
4910 static inline uint32_t
4911 encode_adr_imm (uint32_t imm)
4912 {
4913 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4914 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4915 }
4916
4917 /* encode the immediate field of Move wide immediate */
4918 static inline uint32_t
4919 encode_movw_imm (uint32_t imm)
4920 {
4921 return imm << 5;
4922 }
4923
4924 /* encode the 26-bit offset of unconditional branch */
4925 static inline uint32_t
4926 encode_branch_ofs_26 (uint32_t ofs)
4927 {
4928 return ofs & ((1 << 26) - 1);
4929 }
4930
4931 /* encode the 19-bit offset of conditional branch and compare & branch */
4932 static inline uint32_t
4933 encode_cond_branch_ofs_19 (uint32_t ofs)
4934 {
4935 return (ofs & ((1 << 19) - 1)) << 5;
4936 }
4937
4938 /* encode the 19-bit offset of ld literal */
4939 static inline uint32_t
4940 encode_ld_lit_ofs_19 (uint32_t ofs)
4941 {
4942 return (ofs & ((1 << 19) - 1)) << 5;
4943 }
4944
4945 /* Encode the 14-bit offset of test & branch. */
4946 static inline uint32_t
4947 encode_tst_branch_ofs_14 (uint32_t ofs)
4948 {
4949 return (ofs & ((1 << 14) - 1)) << 5;
4950 }
4951
4952 /* Encode the 16-bit imm field of svc/hvc/smc. */
4953 static inline uint32_t
4954 encode_svc_imm (uint32_t imm)
4955 {
4956 return imm << 5;
4957 }
4958
4959 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4960 static inline uint32_t
4961 reencode_addsub_switch_add_sub (uint32_t opcode)
4962 {
4963 return opcode ^ (1 << 30);
4964 }
4965
4966 static inline uint32_t
4967 reencode_movzn_to_movz (uint32_t opcode)
4968 {
4969 return opcode | (1 << 30);
4970 }
4971
4972 static inline uint32_t
4973 reencode_movzn_to_movn (uint32_t opcode)
4974 {
4975 return opcode & ~(1 << 30);
4976 }
4977
4978 /* Overall per-instruction processing. */
4979
4980 /* We need to be able to fix up arbitrary expressions in some statements.
4981 This is so that we can handle symbols that are an arbitrary distance from
4982 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4983 which returns part of an address in a form which will be valid for
4984 a data instruction. We do this by pushing the expression into a symbol
4985 in the expr_section, and creating a fix for that. */
4986
4987 static fixS *
4988 fix_new_aarch64 (fragS * frag,
4989 int where,
4990 short int size,
4991 expressionS * exp,
4992 int pc_rel,
4993 int reloc)
4994 {
4995 fixS *new_fix;
4996
4997 switch (exp->X_op)
4998 {
4999 case O_constant:
5000 case O_symbol:
5001 case O_add:
5002 case O_subtract:
5003 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5004 break;
5005
5006 default:
5007 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5008 pc_rel, reloc);
5009 break;
5010 }
5011 return new_fix;
5012 }
5013 \f
5014 /* Diagnostics on operands errors. */
5015
5016 /* By default, output verbose error message.
5017 Disable the verbose error message by -mno-verbose-error. */
5018 static int verbose_error_p = 1;
5019
5020 #ifdef DEBUG_AARCH64
5021 /* N.B. this is only for the purpose of debugging. */
5022 const char* operand_mismatch_kind_names[] =
5023 {
5024 "AARCH64_OPDE_NIL",
5025 "AARCH64_OPDE_RECOVERABLE",
5026 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5027 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5028 "AARCH64_OPDE_SYNTAX_ERROR",
5029 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5030 "AARCH64_OPDE_INVALID_VARIANT",
5031 "AARCH64_OPDE_OUT_OF_RANGE",
5032 "AARCH64_OPDE_UNALIGNED",
5033 "AARCH64_OPDE_REG_LIST",
5034 "AARCH64_OPDE_OTHER_ERROR",
5035 };
5036 #endif /* DEBUG_AARCH64 */
5037
5038 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5039
5040 When multiple errors of different kinds are found in the same assembly
5041 line, only the error of the highest severity will be picked up for
5042 issuing the diagnostics. */
5043
5044 static inline bool
5045 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5046 enum aarch64_operand_error_kind rhs)
5047 {
5048 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5049 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5050 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5051 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5052 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5053 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5054 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5055 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5056 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5057 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5058 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5059 return lhs > rhs;
5060 }
5061
5062 /* Helper routine to get the mnemonic name from the assembly instruction
5063 line; should only be called for the diagnosis purpose, as there is
5064 string copy operation involved, which may affect the runtime
5065 performance if used in elsewhere. */
5066
5067 static const char*
5068 get_mnemonic_name (const char *str)
5069 {
5070 static char mnemonic[32];
5071 char *ptr;
5072
5073 /* Get the first 15 bytes and assume that the full name is included. */
5074 strncpy (mnemonic, str, 31);
5075 mnemonic[31] = '\0';
5076
5077 /* Scan up to the end of the mnemonic, which must end in white space,
5078 '.', or end of string. */
5079 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5080 ;
5081
5082 *ptr = '\0';
5083
5084 /* Append '...' to the truncated long name. */
5085 if (ptr - mnemonic == 31)
5086 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5087
5088 return mnemonic;
5089 }
5090
5091 static void
5092 reset_aarch64_instruction (aarch64_instruction *instruction)
5093 {
5094 memset (instruction, '\0', sizeof (aarch64_instruction));
5095 instruction->reloc.type = BFD_RELOC_UNUSED;
5096 }
5097
5098 /* Data structures storing one user error in the assembly code related to
5099 operands. */
5100
5101 struct operand_error_record
5102 {
5103 const aarch64_opcode *opcode;
5104 aarch64_operand_error detail;
5105 struct operand_error_record *next;
5106 };
5107
5108 typedef struct operand_error_record operand_error_record;
5109
5110 struct operand_errors
5111 {
5112 operand_error_record *head;
5113 operand_error_record *tail;
5114 };
5115
5116 typedef struct operand_errors operand_errors;
5117
5118 /* Top-level data structure reporting user errors for the current line of
5119 the assembly code.
5120 The way md_assemble works is that all opcodes sharing the same mnemonic
5121 name are iterated to find a match to the assembly line. In this data
5122 structure, each of the such opcodes will have one operand_error_record
5123 allocated and inserted. In other words, excessive errors related with
5124 a single opcode are disregarded. */
5125 operand_errors operand_error_report;
5126
5127 /* Free record nodes. */
5128 static operand_error_record *free_opnd_error_record_nodes = NULL;
5129
5130 /* Initialize the data structure that stores the operand mismatch
5131 information on assembling one line of the assembly code. */
5132 static void
5133 init_operand_error_report (void)
5134 {
5135 if (operand_error_report.head != NULL)
5136 {
5137 gas_assert (operand_error_report.tail != NULL);
5138 operand_error_report.tail->next = free_opnd_error_record_nodes;
5139 free_opnd_error_record_nodes = operand_error_report.head;
5140 operand_error_report.head = NULL;
5141 operand_error_report.tail = NULL;
5142 return;
5143 }
5144 gas_assert (operand_error_report.tail == NULL);
5145 }
5146
5147 /* Return TRUE if some operand error has been recorded during the
5148 parsing of the current assembly line using the opcode *OPCODE;
5149 otherwise return FALSE. */
5150 static inline bool
5151 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5152 {
5153 operand_error_record *record = operand_error_report.head;
5154 return record && record->opcode == opcode;
5155 }
5156
5157 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5158 OPCODE field is initialized with OPCODE.
5159 N.B. only one record for each opcode, i.e. the maximum of one error is
5160 recorded for each instruction template. */
5161
5162 static void
5163 add_operand_error_record (const operand_error_record* new_record)
5164 {
5165 const aarch64_opcode *opcode = new_record->opcode;
5166 operand_error_record* record = operand_error_report.head;
5167
5168 /* The record may have been created for this opcode. If not, we need
5169 to prepare one. */
5170 if (! opcode_has_operand_error_p (opcode))
5171 {
5172 /* Get one empty record. */
5173 if (free_opnd_error_record_nodes == NULL)
5174 {
5175 record = XNEW (operand_error_record);
5176 }
5177 else
5178 {
5179 record = free_opnd_error_record_nodes;
5180 free_opnd_error_record_nodes = record->next;
5181 }
5182 record->opcode = opcode;
5183 /* Insert at the head. */
5184 record->next = operand_error_report.head;
5185 operand_error_report.head = record;
5186 if (operand_error_report.tail == NULL)
5187 operand_error_report.tail = record;
5188 }
5189 else if (record->detail.kind != AARCH64_OPDE_NIL
5190 && record->detail.index <= new_record->detail.index
5191 && operand_error_higher_severity_p (record->detail.kind,
5192 new_record->detail.kind))
5193 {
5194 /* In the case of multiple errors found on operands related with a
5195 single opcode, only record the error of the leftmost operand and
5196 only if the error is of higher severity. */
5197 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5198 " the existing error %s on operand %d",
5199 operand_mismatch_kind_names[new_record->detail.kind],
5200 new_record->detail.index,
5201 operand_mismatch_kind_names[record->detail.kind],
5202 record->detail.index);
5203 return;
5204 }
5205
5206 record->detail = new_record->detail;
5207 }
5208
5209 static inline void
5210 record_operand_error_info (const aarch64_opcode *opcode,
5211 aarch64_operand_error *error_info)
5212 {
5213 operand_error_record record;
5214 record.opcode = opcode;
5215 record.detail = *error_info;
5216 add_operand_error_record (&record);
5217 }
5218
5219 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5220 error message *ERROR, for operand IDX (count from 0). */
5221
5222 static void
5223 record_operand_error (const aarch64_opcode *opcode, int idx,
5224 enum aarch64_operand_error_kind kind,
5225 const char* error)
5226 {
5227 aarch64_operand_error info;
5228 memset(&info, 0, sizeof (info));
5229 info.index = idx;
5230 info.kind = kind;
5231 info.error = error;
5232 info.non_fatal = false;
5233 record_operand_error_info (opcode, &info);
5234 }
5235
5236 static void
5237 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5238 enum aarch64_operand_error_kind kind,
5239 const char* error, const int *extra_data)
5240 {
5241 aarch64_operand_error info;
5242 info.index = idx;
5243 info.kind = kind;
5244 info.error = error;
5245 info.data[0].i = extra_data[0];
5246 info.data[1].i = extra_data[1];
5247 info.data[2].i = extra_data[2];
5248 info.non_fatal = false;
5249 record_operand_error_info (opcode, &info);
5250 }
5251
5252 static void
5253 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5254 const char* error, int lower_bound,
5255 int upper_bound)
5256 {
5257 int data[3] = {lower_bound, upper_bound, 0};
5258 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5259 error, data);
5260 }
5261
5262 /* Remove the operand error record for *OPCODE. */
5263 static void ATTRIBUTE_UNUSED
5264 remove_operand_error_record (const aarch64_opcode *opcode)
5265 {
5266 if (opcode_has_operand_error_p (opcode))
5267 {
5268 operand_error_record* record = operand_error_report.head;
5269 gas_assert (record != NULL && operand_error_report.tail != NULL);
5270 operand_error_report.head = record->next;
5271 record->next = free_opnd_error_record_nodes;
5272 free_opnd_error_record_nodes = record;
5273 if (operand_error_report.head == NULL)
5274 {
5275 gas_assert (operand_error_report.tail == record);
5276 operand_error_report.tail = NULL;
5277 }
5278 }
5279 }
5280
5281 /* Given the instruction in *INSTR, return the index of the best matched
5282 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5283
5284 Return -1 if there is no qualifier sequence; return the first match
5285 if there is multiple matches found. */
5286
5287 static int
5288 find_best_match (const aarch64_inst *instr,
5289 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5290 {
5291 int i, num_opnds, max_num_matched, idx;
5292
5293 num_opnds = aarch64_num_of_operands (instr->opcode);
5294 if (num_opnds == 0)
5295 {
5296 DEBUG_TRACE ("no operand");
5297 return -1;
5298 }
5299
5300 max_num_matched = 0;
5301 idx = 0;
5302
5303 /* For each pattern. */
5304 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5305 {
5306 int j, num_matched;
5307 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5308
5309 /* Most opcodes has much fewer patterns in the list. */
5310 if (empty_qualifier_sequence_p (qualifiers))
5311 {
5312 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5313 break;
5314 }
5315
5316 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5317 if (*qualifiers == instr->operands[j].qualifier)
5318 ++num_matched;
5319
5320 if (num_matched > max_num_matched)
5321 {
5322 max_num_matched = num_matched;
5323 idx = i;
5324 }
5325 }
5326
5327 DEBUG_TRACE ("return with %d", idx);
5328 return idx;
5329 }
5330
5331 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5332 corresponding operands in *INSTR. */
5333
5334 static inline void
5335 assign_qualifier_sequence (aarch64_inst *instr,
5336 const aarch64_opnd_qualifier_t *qualifiers)
5337 {
5338 int i = 0;
5339 int num_opnds = aarch64_num_of_operands (instr->opcode);
5340 gas_assert (num_opnds);
5341 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5342 instr->operands[i].qualifier = *qualifiers;
5343 }
5344
5345 /* Print operands for the diagnosis purpose. */
5346
5347 static void
5348 print_operands (char *buf, const aarch64_opcode *opcode,
5349 const aarch64_opnd_info *opnds)
5350 {
5351 int i;
5352
5353 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5354 {
5355 char str[128];
5356
5357 /* We regard the opcode operand info more, however we also look into
5358 the inst->operands to support the disassembling of the optional
5359 operand.
5360 The two operand code should be the same in all cases, apart from
5361 when the operand can be optional. */
5362 if (opcode->operands[i] == AARCH64_OPND_NIL
5363 || opnds[i].type == AARCH64_OPND_NIL)
5364 break;
5365
5366 /* Generate the operand string in STR. */
5367 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5368 NULL, cpu_variant);
5369
5370 /* Delimiter. */
5371 if (str[0] != '\0')
5372 strcat (buf, i == 0 ? " " : ", ");
5373
5374 /* Append the operand string. */
5375 strcat (buf, str);
5376 }
5377 }
5378
5379 /* Send to stderr a string as information. */
5380
5381 static void
5382 output_info (const char *format, ...)
5383 {
5384 const char *file;
5385 unsigned int line;
5386 va_list args;
5387
5388 file = as_where (&line);
5389 if (file)
5390 {
5391 if (line != 0)
5392 fprintf (stderr, "%s:%u: ", file, line);
5393 else
5394 fprintf (stderr, "%s: ", file);
5395 }
5396 fprintf (stderr, _("Info: "));
5397 va_start (args, format);
5398 vfprintf (stderr, format, args);
5399 va_end (args);
5400 (void) putc ('\n', stderr);
5401 }
5402
5403 /* Output one operand error record. */
5404
5405 static void
5406 output_operand_error_record (const operand_error_record *record, char *str)
5407 {
5408 const aarch64_operand_error *detail = &record->detail;
5409 int idx = detail->index;
5410 const aarch64_opcode *opcode = record->opcode;
5411 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5412 : AARCH64_OPND_NIL);
5413
5414 typedef void (*handler_t)(const char *format, ...);
5415 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5416
5417 switch (detail->kind)
5418 {
5419 case AARCH64_OPDE_NIL:
5420 gas_assert (0);
5421 break;
5422
5423 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5424 handler (_("this `%s' should have an immediately preceding `%s'"
5425 " -- `%s'"),
5426 detail->data[0].s, detail->data[1].s, str);
5427 break;
5428
5429 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5430 handler (_("the preceding `%s' should be followed by `%s` rather"
5431 " than `%s` -- `%s'"),
5432 detail->data[1].s, detail->data[0].s, opcode->name, str);
5433 break;
5434
5435 case AARCH64_OPDE_SYNTAX_ERROR:
5436 case AARCH64_OPDE_RECOVERABLE:
5437 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5438 case AARCH64_OPDE_OTHER_ERROR:
5439 /* Use the prepared error message if there is, otherwise use the
5440 operand description string to describe the error. */
5441 if (detail->error != NULL)
5442 {
5443 if (idx < 0)
5444 handler (_("%s -- `%s'"), detail->error, str);
5445 else
5446 handler (_("%s at operand %d -- `%s'"),
5447 detail->error, idx + 1, str);
5448 }
5449 else
5450 {
5451 gas_assert (idx >= 0);
5452 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5453 aarch64_get_operand_desc (opd_code), str);
5454 }
5455 break;
5456
5457 case AARCH64_OPDE_INVALID_VARIANT:
5458 handler (_("operand mismatch -- `%s'"), str);
5459 if (verbose_error_p)
5460 {
5461 /* We will try to correct the erroneous instruction and also provide
5462 more information e.g. all other valid variants.
5463
5464 The string representation of the corrected instruction and other
5465 valid variants are generated by
5466
5467 1) obtaining the intermediate representation of the erroneous
5468 instruction;
5469 2) manipulating the IR, e.g. replacing the operand qualifier;
5470 3) printing out the instruction by calling the printer functions
5471 shared with the disassembler.
5472
5473 The limitation of this method is that the exact input assembly
5474 line cannot be accurately reproduced in some cases, for example an
5475 optional operand present in the actual assembly line will be
5476 omitted in the output; likewise for the optional syntax rules,
5477 e.g. the # before the immediate. Another limitation is that the
5478 assembly symbols and relocation operations in the assembly line
5479 currently cannot be printed out in the error report. Last but not
5480 least, when there is other error(s) co-exist with this error, the
5481 'corrected' instruction may be still incorrect, e.g. given
5482 'ldnp h0,h1,[x0,#6]!'
5483 this diagnosis will provide the version:
5484 'ldnp s0,s1,[x0,#6]!'
5485 which is still not right. */
5486 size_t len = strlen (get_mnemonic_name (str));
5487 int i, qlf_idx;
5488 bool result;
5489 char buf[2048];
5490 aarch64_inst *inst_base = &inst.base;
5491 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5492
5493 /* Init inst. */
5494 reset_aarch64_instruction (&inst);
5495 inst_base->opcode = opcode;
5496
5497 /* Reset the error report so that there is no side effect on the
5498 following operand parsing. */
5499 init_operand_error_report ();
5500
5501 /* Fill inst. */
5502 result = parse_operands (str + len, opcode)
5503 && programmer_friendly_fixup (&inst);
5504 gas_assert (result);
5505 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5506 NULL, NULL, insn_sequence);
5507 gas_assert (!result);
5508
5509 /* Find the most matched qualifier sequence. */
5510 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5511 gas_assert (qlf_idx > -1);
5512
5513 /* Assign the qualifiers. */
5514 assign_qualifier_sequence (inst_base,
5515 opcode->qualifiers_list[qlf_idx]);
5516
5517 /* Print the hint. */
5518 output_info (_(" did you mean this?"));
5519 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5520 print_operands (buf, opcode, inst_base->operands);
5521 output_info (_(" %s"), buf);
5522
5523 /* Print out other variant(s) if there is any. */
5524 if (qlf_idx != 0 ||
5525 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5526 output_info (_(" other valid variant(s):"));
5527
5528 /* For each pattern. */
5529 qualifiers_list = opcode->qualifiers_list;
5530 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5531 {
5532 /* Most opcodes has much fewer patterns in the list.
5533 First NIL qualifier indicates the end in the list. */
5534 if (empty_qualifier_sequence_p (*qualifiers_list))
5535 break;
5536
5537 if (i != qlf_idx)
5538 {
5539 /* Mnemonics name. */
5540 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5541
5542 /* Assign the qualifiers. */
5543 assign_qualifier_sequence (inst_base, *qualifiers_list);
5544
5545 /* Print instruction. */
5546 print_operands (buf, opcode, inst_base->operands);
5547
5548 output_info (_(" %s"), buf);
5549 }
5550 }
5551 }
5552 break;
5553
5554 case AARCH64_OPDE_UNTIED_IMMS:
5555 handler (_("operand %d must have the same immediate value "
5556 "as operand 1 -- `%s'"),
5557 detail->index + 1, str);
5558 break;
5559
5560 case AARCH64_OPDE_UNTIED_OPERAND:
5561 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5562 detail->index + 1, str);
5563 break;
5564
5565 case AARCH64_OPDE_OUT_OF_RANGE:
5566 if (detail->data[0].i != detail->data[1].i)
5567 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5568 detail->error ? detail->error : _("immediate value"),
5569 detail->data[0].i, detail->data[1].i, idx + 1, str);
5570 else
5571 handler (_("%s must be %d at operand %d -- `%s'"),
5572 detail->error ? detail->error : _("immediate value"),
5573 detail->data[0].i, idx + 1, str);
5574 break;
5575
5576 case AARCH64_OPDE_REG_LIST:
5577 if (detail->data[0].i == 1)
5578 handler (_("invalid number of registers in the list; "
5579 "only 1 register is expected at operand %d -- `%s'"),
5580 idx + 1, str);
5581 else
5582 handler (_("invalid number of registers in the list; "
5583 "%d registers are expected at operand %d -- `%s'"),
5584 detail->data[0].i, idx + 1, str);
5585 break;
5586
5587 case AARCH64_OPDE_UNALIGNED:
5588 handler (_("immediate value must be a multiple of "
5589 "%d at operand %d -- `%s'"),
5590 detail->data[0].i, idx + 1, str);
5591 break;
5592
5593 default:
5594 gas_assert (0);
5595 break;
5596 }
5597 }
5598
5599 /* Process and output the error message about the operand mismatching.
5600
5601 When this function is called, the operand error information had
5602 been collected for an assembly line and there will be multiple
5603 errors in the case of multiple instruction templates; output the
5604 error message that most closely describes the problem.
5605
5606 The errors to be printed can be filtered on printing all errors
5607 or only non-fatal errors. This distinction has to be made because
5608 the error buffer may already be filled with fatal errors we don't want to
5609 print due to the different instruction templates. */
5610
5611 static void
5612 output_operand_error_report (char *str, bool non_fatal_only)
5613 {
5614 int largest_error_pos;
5615 const char *msg = NULL;
5616 enum aarch64_operand_error_kind kind;
5617 operand_error_record *curr;
5618 operand_error_record *head = operand_error_report.head;
5619 operand_error_record *record = NULL;
5620
5621 /* No error to report. */
5622 if (head == NULL)
5623 return;
5624
5625 gas_assert (head != NULL && operand_error_report.tail != NULL);
5626
5627 /* Only one error. */
5628 if (head == operand_error_report.tail)
5629 {
5630 /* If the only error is a non-fatal one and we don't want to print it,
5631 just exit. */
5632 if (!non_fatal_only || head->detail.non_fatal)
5633 {
5634 DEBUG_TRACE ("single opcode entry with error kind: %s",
5635 operand_mismatch_kind_names[head->detail.kind]);
5636 output_operand_error_record (head, str);
5637 }
5638 return;
5639 }
5640
5641 /* Find the error kind of the highest severity. */
5642 DEBUG_TRACE ("multiple opcode entries with error kind");
5643 kind = AARCH64_OPDE_NIL;
5644 for (curr = head; curr != NULL; curr = curr->next)
5645 {
5646 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5647 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5648 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5649 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5650 kind = curr->detail.kind;
5651 }
5652
5653 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5654
5655 /* Pick up one of errors of KIND to report. */
5656 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5657 for (curr = head; curr != NULL; curr = curr->next)
5658 {
5659 /* If we don't want to print non-fatal errors then don't consider them
5660 at all. */
5661 if (curr->detail.kind != kind
5662 || (non_fatal_only && !curr->detail.non_fatal))
5663 continue;
5664 /* If there are multiple errors, pick up the one with the highest
5665 mismatching operand index. In the case of multiple errors with
5666 the equally highest operand index, pick up the first one or the
5667 first one with non-NULL error message. */
5668 if (curr->detail.index > largest_error_pos
5669 || (curr->detail.index == largest_error_pos && msg == NULL
5670 && curr->detail.error != NULL))
5671 {
5672 largest_error_pos = curr->detail.index;
5673 record = curr;
5674 msg = record->detail.error;
5675 }
5676 }
5677
5678 /* The way errors are collected in the back-end is a bit non-intuitive. But
5679 essentially, because each operand template is tried recursively you may
5680 always have errors collected from the previous tried OPND. These are
5681 usually skipped if there is one successful match. However now with the
5682 non-fatal errors we have to ignore those previously collected hard errors
5683 when we're only interested in printing the non-fatal ones. This condition
5684 prevents us from printing errors that are not appropriate, since we did
5685 match a condition, but it also has warnings that it wants to print. */
5686 if (non_fatal_only && !record)
5687 return;
5688
5689 gas_assert (largest_error_pos != -2 && record != NULL);
5690 DEBUG_TRACE ("Pick up error kind %s to report",
5691 operand_mismatch_kind_names[record->detail.kind]);
5692
5693 /* Output. */
5694 output_operand_error_record (record, str);
5695 }
5696 \f
5697 /* Write an AARCH64 instruction to buf - always little-endian. */
5698 static void
5699 put_aarch64_insn (char *buf, uint32_t insn)
5700 {
5701 unsigned char *where = (unsigned char *) buf;
5702 where[0] = insn;
5703 where[1] = insn >> 8;
5704 where[2] = insn >> 16;
5705 where[3] = insn >> 24;
5706 }
5707
5708 static uint32_t
5709 get_aarch64_insn (char *buf)
5710 {
5711 unsigned char *where = (unsigned char *) buf;
5712 uint32_t result;
5713 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5714 | ((uint32_t) where[3] << 24)));
5715 return result;
5716 }
5717
5718 static void
5719 output_inst (struct aarch64_inst *new_inst)
5720 {
5721 char *to = NULL;
5722
5723 to = frag_more (INSN_SIZE);
5724
5725 frag_now->tc_frag_data.recorded = 1;
5726
5727 put_aarch64_insn (to, inst.base.value);
5728
5729 if (inst.reloc.type != BFD_RELOC_UNUSED)
5730 {
5731 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5732 INSN_SIZE, &inst.reloc.exp,
5733 inst.reloc.pc_rel,
5734 inst.reloc.type);
5735 DEBUG_TRACE ("Prepared relocation fix up");
5736 /* Don't check the addend value against the instruction size,
5737 that's the job of our code in md_apply_fix(). */
5738 fixp->fx_no_overflow = 1;
5739 if (new_inst != NULL)
5740 fixp->tc_fix_data.inst = new_inst;
5741 if (aarch64_gas_internal_fixup_p ())
5742 {
5743 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5744 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5745 fixp->fx_addnumber = inst.reloc.flags;
5746 }
5747 }
5748
5749 dwarf2_emit_insn (INSN_SIZE);
5750 }
5751
5752 /* Link together opcodes of the same name. */
5753
5754 struct templates
5755 {
5756 const aarch64_opcode *opcode;
5757 struct templates *next;
5758 };
5759
5760 typedef struct templates templates;
5761
5762 static templates *
5763 lookup_mnemonic (const char *start, int len)
5764 {
5765 templates *templ = NULL;
5766
5767 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5768 return templ;
5769 }
5770
5771 /* Subroutine of md_assemble, responsible for looking up the primary
5772 opcode from the mnemonic the user wrote. BASE points to the beginning
5773 of the mnemonic, DOT points to the first '.' within the mnemonic
5774 (if any) and END points to the end of the mnemonic. */
5775
5776 static templates *
5777 opcode_lookup (char *base, char *dot, char *end)
5778 {
5779 const aarch64_cond *cond;
5780 char condname[16];
5781 int len;
5782
5783 if (dot == end)
5784 return 0;
5785
5786 inst.cond = COND_ALWAYS;
5787
5788 /* Handle a possible condition. */
5789 if (dot)
5790 {
5791 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5792 if (!cond)
5793 return 0;
5794 inst.cond = cond->value;
5795 len = dot - base;
5796 }
5797 else
5798 len = end - base;
5799
5800 if (inst.cond == COND_ALWAYS)
5801 {
5802 /* Look for unaffixed mnemonic. */
5803 return lookup_mnemonic (base, len);
5804 }
5805 else if (len <= 13)
5806 {
5807 /* append ".c" to mnemonic if conditional */
5808 memcpy (condname, base, len);
5809 memcpy (condname + len, ".c", 2);
5810 base = condname;
5811 len += 2;
5812 return lookup_mnemonic (base, len);
5813 }
5814
5815 return NULL;
5816 }
5817
5818 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5819 to a corresponding operand qualifier. */
5820
5821 static inline aarch64_opnd_qualifier_t
5822 vectype_to_qualifier (const struct vector_type_el *vectype)
5823 {
5824 /* Element size in bytes indexed by vector_el_type. */
5825 const unsigned char ele_size[5]
5826 = {1, 2, 4, 8, 16};
5827 const unsigned int ele_base [5] =
5828 {
5829 AARCH64_OPND_QLF_V_4B,
5830 AARCH64_OPND_QLF_V_2H,
5831 AARCH64_OPND_QLF_V_2S,
5832 AARCH64_OPND_QLF_V_1D,
5833 AARCH64_OPND_QLF_V_1Q
5834 };
5835
5836 if (!vectype->defined || vectype->type == NT_invtype)
5837 goto vectype_conversion_fail;
5838
5839 if (vectype->type == NT_zero)
5840 return AARCH64_OPND_QLF_P_Z;
5841 if (vectype->type == NT_merge)
5842 return AARCH64_OPND_QLF_P_M;
5843
5844 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5845
5846 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5847 {
5848 /* Special case S_4B. */
5849 if (vectype->type == NT_b && vectype->width == 4)
5850 return AARCH64_OPND_QLF_S_4B;
5851
5852 /* Special case S_2H. */
5853 if (vectype->type == NT_h && vectype->width == 2)
5854 return AARCH64_OPND_QLF_S_2H;
5855
5856 /* Vector element register. */
5857 return AARCH64_OPND_QLF_S_B + vectype->type;
5858 }
5859 else
5860 {
5861 /* Vector register. */
5862 int reg_size = ele_size[vectype->type] * vectype->width;
5863 unsigned offset;
5864 unsigned shift;
5865 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5866 goto vectype_conversion_fail;
5867
5868 /* The conversion is by calculating the offset from the base operand
5869 qualifier for the vector type. The operand qualifiers are regular
5870 enough that the offset can established by shifting the vector width by
5871 a vector-type dependent amount. */
5872 shift = 0;
5873 if (vectype->type == NT_b)
5874 shift = 3;
5875 else if (vectype->type == NT_h || vectype->type == NT_s)
5876 shift = 2;
5877 else if (vectype->type >= NT_d)
5878 shift = 1;
5879 else
5880 gas_assert (0);
5881
5882 offset = ele_base [vectype->type] + (vectype->width >> shift);
5883 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5884 && offset <= AARCH64_OPND_QLF_V_1Q);
5885 return offset;
5886 }
5887
5888 vectype_conversion_fail:
5889 first_error (_("bad vector arrangement type"));
5890 return AARCH64_OPND_QLF_NIL;
5891 }
5892
5893 /* Process an optional operand that is found omitted from the assembly line.
5894 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5895 instruction's opcode entry while IDX is the index of this omitted operand.
5896 */
5897
5898 static void
5899 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5900 int idx, aarch64_opnd_info *operand)
5901 {
5902 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5903 gas_assert (optional_operand_p (opcode, idx));
5904 gas_assert (!operand->present);
5905
5906 switch (type)
5907 {
5908 case AARCH64_OPND_Rd:
5909 case AARCH64_OPND_Rn:
5910 case AARCH64_OPND_Rm:
5911 case AARCH64_OPND_Rt:
5912 case AARCH64_OPND_Rt2:
5913 case AARCH64_OPND_Rt_LS64:
5914 case AARCH64_OPND_Rt_SP:
5915 case AARCH64_OPND_Rs:
5916 case AARCH64_OPND_Ra:
5917 case AARCH64_OPND_Rt_SYS:
5918 case AARCH64_OPND_Rd_SP:
5919 case AARCH64_OPND_Rn_SP:
5920 case AARCH64_OPND_Rm_SP:
5921 case AARCH64_OPND_Fd:
5922 case AARCH64_OPND_Fn:
5923 case AARCH64_OPND_Fm:
5924 case AARCH64_OPND_Fa:
5925 case AARCH64_OPND_Ft:
5926 case AARCH64_OPND_Ft2:
5927 case AARCH64_OPND_Sd:
5928 case AARCH64_OPND_Sn:
5929 case AARCH64_OPND_Sm:
5930 case AARCH64_OPND_Va:
5931 case AARCH64_OPND_Vd:
5932 case AARCH64_OPND_Vn:
5933 case AARCH64_OPND_Vm:
5934 case AARCH64_OPND_VdD1:
5935 case AARCH64_OPND_VnD1:
5936 operand->reg.regno = default_value;
5937 break;
5938
5939 case AARCH64_OPND_Ed:
5940 case AARCH64_OPND_En:
5941 case AARCH64_OPND_Em:
5942 case AARCH64_OPND_Em16:
5943 case AARCH64_OPND_SM3_IMM2:
5944 operand->reglane.regno = default_value;
5945 break;
5946
5947 case AARCH64_OPND_IDX:
5948 case AARCH64_OPND_BIT_NUM:
5949 case AARCH64_OPND_IMMR:
5950 case AARCH64_OPND_IMMS:
5951 case AARCH64_OPND_SHLL_IMM:
5952 case AARCH64_OPND_IMM_VLSL:
5953 case AARCH64_OPND_IMM_VLSR:
5954 case AARCH64_OPND_CCMP_IMM:
5955 case AARCH64_OPND_FBITS:
5956 case AARCH64_OPND_UIMM4:
5957 case AARCH64_OPND_UIMM3_OP1:
5958 case AARCH64_OPND_UIMM3_OP2:
5959 case AARCH64_OPND_IMM:
5960 case AARCH64_OPND_IMM_2:
5961 case AARCH64_OPND_WIDTH:
5962 case AARCH64_OPND_UIMM7:
5963 case AARCH64_OPND_NZCV:
5964 case AARCH64_OPND_SVE_PATTERN:
5965 case AARCH64_OPND_SVE_PRFOP:
5966 operand->imm.value = default_value;
5967 break;
5968
5969 case AARCH64_OPND_SVE_PATTERN_SCALED:
5970 operand->imm.value = default_value;
5971 operand->shifter.kind = AARCH64_MOD_MUL;
5972 operand->shifter.amount = 1;
5973 break;
5974
5975 case AARCH64_OPND_EXCEPTION:
5976 inst.reloc.type = BFD_RELOC_UNUSED;
5977 break;
5978
5979 case AARCH64_OPND_BARRIER_ISB:
5980 operand->barrier = aarch64_barrier_options + default_value;
5981 break;
5982
5983 case AARCH64_OPND_BTI_TARGET:
5984 operand->hint_option = aarch64_hint_options + default_value;
5985 break;
5986
5987 default:
5988 break;
5989 }
5990 }
5991
5992 /* Process the relocation type for move wide instructions.
5993 Return TRUE on success; otherwise return FALSE. */
5994
5995 static bool
5996 process_movw_reloc_info (void)
5997 {
5998 int is32;
5999 unsigned shift;
6000
6001 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6002
6003 if (inst.base.opcode->op == OP_MOVK)
6004 switch (inst.reloc.type)
6005 {
6006 case BFD_RELOC_AARCH64_MOVW_G0_S:
6007 case BFD_RELOC_AARCH64_MOVW_G1_S:
6008 case BFD_RELOC_AARCH64_MOVW_G2_S:
6009 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6010 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6011 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6012 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6013 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6014 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6015 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6016 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6017 set_syntax_error
6018 (_("the specified relocation type is not allowed for MOVK"));
6019 return false;
6020 default:
6021 break;
6022 }
6023
6024 switch (inst.reloc.type)
6025 {
6026 case BFD_RELOC_AARCH64_MOVW_G0:
6027 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6028 case BFD_RELOC_AARCH64_MOVW_G0_S:
6029 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6030 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6031 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6032 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6033 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6034 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6035 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6036 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6037 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6038 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6039 shift = 0;
6040 break;
6041 case BFD_RELOC_AARCH64_MOVW_G1:
6042 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6043 case BFD_RELOC_AARCH64_MOVW_G1_S:
6044 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6045 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6046 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6047 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6048 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6049 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6050 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6051 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6052 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6053 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6054 shift = 16;
6055 break;
6056 case BFD_RELOC_AARCH64_MOVW_G2:
6057 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6058 case BFD_RELOC_AARCH64_MOVW_G2_S:
6059 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6060 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6061 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6063 if (is32)
6064 {
6065 set_fatal_syntax_error
6066 (_("the specified relocation type is not allowed for 32-bit "
6067 "register"));
6068 return false;
6069 }
6070 shift = 32;
6071 break;
6072 case BFD_RELOC_AARCH64_MOVW_G3:
6073 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6074 if (is32)
6075 {
6076 set_fatal_syntax_error
6077 (_("the specified relocation type is not allowed for 32-bit "
6078 "register"));
6079 return false;
6080 }
6081 shift = 48;
6082 break;
6083 default:
6084 /* More cases should be added when more MOVW-related relocation types
6085 are supported in GAS. */
6086 gas_assert (aarch64_gas_internal_fixup_p ());
6087 /* The shift amount should have already been set by the parser. */
6088 return true;
6089 }
6090 inst.base.operands[1].shifter.amount = shift;
6091 return true;
6092 }
6093
6094 /* A primitive log calculator. */
6095
6096 static inline unsigned int
6097 get_logsz (unsigned int size)
6098 {
6099 const unsigned char ls[16] =
6100 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6101 if (size > 16)
6102 {
6103 gas_assert (0);
6104 return -1;
6105 }
6106 gas_assert (ls[size - 1] != (unsigned char)-1);
6107 return ls[size - 1];
6108 }
6109
6110 /* Determine and return the real reloc type code for an instruction
6111 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6112
6113 static inline bfd_reloc_code_real_type
6114 ldst_lo12_determine_real_reloc_type (void)
6115 {
6116 unsigned logsz, max_logsz;
6117 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6118 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6119
6120 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6121 {
6122 BFD_RELOC_AARCH64_LDST8_LO12,
6123 BFD_RELOC_AARCH64_LDST16_LO12,
6124 BFD_RELOC_AARCH64_LDST32_LO12,
6125 BFD_RELOC_AARCH64_LDST64_LO12,
6126 BFD_RELOC_AARCH64_LDST128_LO12
6127 },
6128 {
6129 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6130 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6131 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6132 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6133 BFD_RELOC_AARCH64_NONE
6134 },
6135 {
6136 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6137 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6138 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6139 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6140 BFD_RELOC_AARCH64_NONE
6141 },
6142 {
6143 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6144 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6145 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6146 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6147 BFD_RELOC_AARCH64_NONE
6148 },
6149 {
6150 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6151 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6152 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6153 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6154 BFD_RELOC_AARCH64_NONE
6155 }
6156 };
6157
6158 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6159 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6160 || (inst.reloc.type
6161 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6162 || (inst.reloc.type
6163 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6164 || (inst.reloc.type
6165 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6166 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6167
6168 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6169 opd1_qlf =
6170 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6171 1, opd0_qlf, 0);
6172 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6173
6174 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6175
6176 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6177 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6178 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6179 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6180 max_logsz = 3;
6181 else
6182 max_logsz = 4;
6183
6184 if (logsz > max_logsz)
6185 {
6186 /* SEE PR 27904 for an example of this. */
6187 set_fatal_syntax_error
6188 (_("relocation qualifier does not match instruction size"));
6189 return BFD_RELOC_AARCH64_NONE;
6190 }
6191
6192 /* In reloc.c, these pseudo relocation types should be defined in similar
6193 order as above reloc_ldst_lo12 array. Because the array index calculation
6194 below relies on this. */
6195 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6196 }
6197
6198 /* Check whether a register list REGINFO is valid. The registers must be
6199 numbered in increasing order (modulo 32), in increments of one or two.
6200
6201 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6202 increments of two.
6203
6204 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6205
6206 static bool
6207 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6208 {
6209 uint32_t i, nb_regs, prev_regno, incr;
6210
6211 nb_regs = 1 + (reginfo & 0x3);
6212 reginfo >>= 2;
6213 prev_regno = reginfo & 0x1f;
6214 incr = accept_alternate ? 2 : 1;
6215
6216 for (i = 1; i < nb_regs; ++i)
6217 {
6218 uint32_t curr_regno;
6219 reginfo >>= 5;
6220 curr_regno = reginfo & 0x1f;
6221 if (curr_regno != ((prev_regno + incr) & 0x1f))
6222 return false;
6223 prev_regno = curr_regno;
6224 }
6225
6226 return true;
6227 }
6228
6229 /* Generic instruction operand parser. This does no encoding and no
6230 semantic validation; it merely squirrels values away in the inst
6231 structure. Returns TRUE or FALSE depending on whether the
6232 specified grammar matched. */
6233
6234 static bool
6235 parse_operands (char *str, const aarch64_opcode *opcode)
6236 {
6237 int i;
6238 char *backtrack_pos = 0;
6239 const enum aarch64_opnd *operands = opcode->operands;
6240 aarch64_reg_type imm_reg_type;
6241
6242 clear_error ();
6243 skip_whitespace (str);
6244
6245 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6246 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6247 else
6248 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6249
6250 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6251 {
6252 int64_t val;
6253 const reg_entry *reg;
6254 int comma_skipped_p = 0;
6255 aarch64_reg_type rtype;
6256 struct vector_type_el vectype;
6257 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6258 aarch64_opnd_info *info = &inst.base.operands[i];
6259 aarch64_reg_type reg_type;
6260
6261 DEBUG_TRACE ("parse operand %d", i);
6262
6263 /* Assign the operand code. */
6264 info->type = operands[i];
6265
6266 if (optional_operand_p (opcode, i))
6267 {
6268 /* Remember where we are in case we need to backtrack. */
6269 gas_assert (!backtrack_pos);
6270 backtrack_pos = str;
6271 }
6272
6273 /* Expect comma between operands; the backtrack mechanism will take
6274 care of cases of omitted optional operand. */
6275 if (i > 0 && ! skip_past_char (&str, ','))
6276 {
6277 set_syntax_error (_("comma expected between operands"));
6278 goto failure;
6279 }
6280 else
6281 comma_skipped_p = 1;
6282
6283 switch (operands[i])
6284 {
6285 case AARCH64_OPND_Rd:
6286 case AARCH64_OPND_Rn:
6287 case AARCH64_OPND_Rm:
6288 case AARCH64_OPND_Rt:
6289 case AARCH64_OPND_Rt2:
6290 case AARCH64_OPND_Rs:
6291 case AARCH64_OPND_Ra:
6292 case AARCH64_OPND_Rt_LS64:
6293 case AARCH64_OPND_Rt_SYS:
6294 case AARCH64_OPND_PAIRREG:
6295 case AARCH64_OPND_SVE_Rm:
6296 po_int_reg_or_fail (REG_TYPE_R_Z);
6297
6298 /* In LS64 load/store instructions Rt register number must be even
6299 and <=22. */
6300 if (operands[i] == AARCH64_OPND_Rt_LS64)
6301 {
6302 /* We've already checked if this is valid register.
6303 This will check if register number (Rt) is not undefined for LS64
6304 instructions:
6305 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6306 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6307 {
6308 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6309 goto failure;
6310 }
6311 }
6312 break;
6313
6314 case AARCH64_OPND_Rd_SP:
6315 case AARCH64_OPND_Rn_SP:
6316 case AARCH64_OPND_Rt_SP:
6317 case AARCH64_OPND_SVE_Rn_SP:
6318 case AARCH64_OPND_Rm_SP:
6319 po_int_reg_or_fail (REG_TYPE_R_SP);
6320 break;
6321
6322 case AARCH64_OPND_Rm_EXT:
6323 case AARCH64_OPND_Rm_SFT:
6324 po_misc_or_fail (parse_shifter_operand
6325 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6326 ? SHIFTED_ARITH_IMM
6327 : SHIFTED_LOGIC_IMM)));
6328 if (!info->shifter.operator_present)
6329 {
6330 /* Default to LSL if not present. Libopcodes prefers shifter
6331 kind to be explicit. */
6332 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6333 info->shifter.kind = AARCH64_MOD_LSL;
6334 /* For Rm_EXT, libopcodes will carry out further check on whether
6335 or not stack pointer is used in the instruction (Recall that
6336 "the extend operator is not optional unless at least one of
6337 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6338 }
6339 break;
6340
6341 case AARCH64_OPND_Fd:
6342 case AARCH64_OPND_Fn:
6343 case AARCH64_OPND_Fm:
6344 case AARCH64_OPND_Fa:
6345 case AARCH64_OPND_Ft:
6346 case AARCH64_OPND_Ft2:
6347 case AARCH64_OPND_Sd:
6348 case AARCH64_OPND_Sn:
6349 case AARCH64_OPND_Sm:
6350 case AARCH64_OPND_SVE_VZn:
6351 case AARCH64_OPND_SVE_Vd:
6352 case AARCH64_OPND_SVE_Vm:
6353 case AARCH64_OPND_SVE_Vn:
6354 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6355 if (val == PARSE_FAIL)
6356 {
6357 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6358 goto failure;
6359 }
6360 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6361
6362 info->reg.regno = val;
6363 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6364 break;
6365
6366 case AARCH64_OPND_SVE_Pd:
6367 case AARCH64_OPND_SVE_Pg3:
6368 case AARCH64_OPND_SVE_Pg4_5:
6369 case AARCH64_OPND_SVE_Pg4_10:
6370 case AARCH64_OPND_SVE_Pg4_16:
6371 case AARCH64_OPND_SVE_Pm:
6372 case AARCH64_OPND_SVE_Pn:
6373 case AARCH64_OPND_SVE_Pt:
6374 case AARCH64_OPND_SME_Pm:
6375 reg_type = REG_TYPE_PN;
6376 goto vector_reg;
6377
6378 case AARCH64_OPND_SVE_Za_5:
6379 case AARCH64_OPND_SVE_Za_16:
6380 case AARCH64_OPND_SVE_Zd:
6381 case AARCH64_OPND_SVE_Zm_5:
6382 case AARCH64_OPND_SVE_Zm_16:
6383 case AARCH64_OPND_SVE_Zn:
6384 case AARCH64_OPND_SVE_Zt:
6385 reg_type = REG_TYPE_ZN;
6386 goto vector_reg;
6387
6388 case AARCH64_OPND_Va:
6389 case AARCH64_OPND_Vd:
6390 case AARCH64_OPND_Vn:
6391 case AARCH64_OPND_Vm:
6392 reg_type = REG_TYPE_VN;
6393 vector_reg:
6394 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6395 if (val == PARSE_FAIL)
6396 {
6397 first_error (_(get_reg_expected_msg (reg_type)));
6398 goto failure;
6399 }
6400 if (vectype.defined & NTA_HASINDEX)
6401 goto failure;
6402
6403 info->reg.regno = val;
6404 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6405 && vectype.type == NT_invtype)
6406 /* Unqualified Pn and Zn registers are allowed in certain
6407 contexts. Rely on F_STRICT qualifier checking to catch
6408 invalid uses. */
6409 info->qualifier = AARCH64_OPND_QLF_NIL;
6410 else
6411 {
6412 info->qualifier = vectype_to_qualifier (&vectype);
6413 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6414 goto failure;
6415 }
6416 break;
6417
6418 case AARCH64_OPND_VdD1:
6419 case AARCH64_OPND_VnD1:
6420 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6421 if (val == PARSE_FAIL)
6422 {
6423 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6424 goto failure;
6425 }
6426 if (vectype.type != NT_d || vectype.index != 1)
6427 {
6428 set_fatal_syntax_error
6429 (_("the top half of a 128-bit FP/SIMD register is expected"));
6430 goto failure;
6431 }
6432 info->reg.regno = val;
6433 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6434 here; it is correct for the purpose of encoding/decoding since
6435 only the register number is explicitly encoded in the related
6436 instructions, although this appears a bit hacky. */
6437 info->qualifier = AARCH64_OPND_QLF_S_D;
6438 break;
6439
6440 case AARCH64_OPND_SVE_Zm3_INDEX:
6441 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6442 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6443 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6444 case AARCH64_OPND_SVE_Zm4_INDEX:
6445 case AARCH64_OPND_SVE_Zn_INDEX:
6446 reg_type = REG_TYPE_ZN;
6447 goto vector_reg_index;
6448
6449 case AARCH64_OPND_Ed:
6450 case AARCH64_OPND_En:
6451 case AARCH64_OPND_Em:
6452 case AARCH64_OPND_Em16:
6453 case AARCH64_OPND_SM3_IMM2:
6454 reg_type = REG_TYPE_VN;
6455 vector_reg_index:
6456 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6457 if (val == PARSE_FAIL)
6458 {
6459 first_error (_(get_reg_expected_msg (reg_type)));
6460 goto failure;
6461 }
6462 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6463 goto failure;
6464
6465 info->reglane.regno = val;
6466 info->reglane.index = vectype.index;
6467 info->qualifier = vectype_to_qualifier (&vectype);
6468 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6469 goto failure;
6470 break;
6471
6472 case AARCH64_OPND_SVE_ZnxN:
6473 case AARCH64_OPND_SVE_ZtxN:
6474 reg_type = REG_TYPE_ZN;
6475 goto vector_reg_list;
6476
6477 case AARCH64_OPND_LVn:
6478 case AARCH64_OPND_LVt:
6479 case AARCH64_OPND_LVt_AL:
6480 case AARCH64_OPND_LEt:
6481 reg_type = REG_TYPE_VN;
6482 vector_reg_list:
6483 if (reg_type == REG_TYPE_ZN
6484 && get_opcode_dependent_value (opcode) == 1
6485 && *str != '{')
6486 {
6487 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6488 if (val == PARSE_FAIL)
6489 {
6490 first_error (_(get_reg_expected_msg (reg_type)));
6491 goto failure;
6492 }
6493 info->reglist.first_regno = val;
6494 info->reglist.num_regs = 1;
6495 }
6496 else
6497 {
6498 val = parse_vector_reg_list (&str, reg_type, &vectype);
6499 if (val == PARSE_FAIL)
6500 goto failure;
6501
6502 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6503 {
6504 set_fatal_syntax_error (_("invalid register list"));
6505 goto failure;
6506 }
6507
6508 if (vectype.width != 0 && *str != ',')
6509 {
6510 set_fatal_syntax_error
6511 (_("expected element type rather than vector type"));
6512 goto failure;
6513 }
6514
6515 info->reglist.first_regno = (val >> 2) & 0x1f;
6516 info->reglist.num_regs = (val & 0x3) + 1;
6517 }
6518 if (operands[i] == AARCH64_OPND_LEt)
6519 {
6520 if (!(vectype.defined & NTA_HASINDEX))
6521 goto failure;
6522 info->reglist.has_index = 1;
6523 info->reglist.index = vectype.index;
6524 }
6525 else
6526 {
6527 if (vectype.defined & NTA_HASINDEX)
6528 goto failure;
6529 if (!(vectype.defined & NTA_HASTYPE))
6530 {
6531 if (reg_type == REG_TYPE_ZN)
6532 set_fatal_syntax_error (_("missing type suffix"));
6533 goto failure;
6534 }
6535 }
6536 info->qualifier = vectype_to_qualifier (&vectype);
6537 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6538 goto failure;
6539 break;
6540
6541 case AARCH64_OPND_CRn:
6542 case AARCH64_OPND_CRm:
6543 {
6544 char prefix = *(str++);
6545 if (prefix != 'c' && prefix != 'C')
6546 goto failure;
6547
6548 po_imm_nc_or_fail ();
6549 if (val > 15)
6550 {
6551 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6552 goto failure;
6553 }
6554 info->qualifier = AARCH64_OPND_QLF_CR;
6555 info->imm.value = val;
6556 break;
6557 }
6558
6559 case AARCH64_OPND_SHLL_IMM:
6560 case AARCH64_OPND_IMM_VLSR:
6561 po_imm_or_fail (1, 64);
6562 info->imm.value = val;
6563 break;
6564
6565 case AARCH64_OPND_CCMP_IMM:
6566 case AARCH64_OPND_SIMM5:
6567 case AARCH64_OPND_FBITS:
6568 case AARCH64_OPND_TME_UIMM16:
6569 case AARCH64_OPND_UIMM4:
6570 case AARCH64_OPND_UIMM4_ADDG:
6571 case AARCH64_OPND_UIMM10:
6572 case AARCH64_OPND_UIMM3_OP1:
6573 case AARCH64_OPND_UIMM3_OP2:
6574 case AARCH64_OPND_IMM_VLSL:
6575 case AARCH64_OPND_IMM:
6576 case AARCH64_OPND_IMM_2:
6577 case AARCH64_OPND_WIDTH:
6578 case AARCH64_OPND_SVE_INV_LIMM:
6579 case AARCH64_OPND_SVE_LIMM:
6580 case AARCH64_OPND_SVE_LIMM_MOV:
6581 case AARCH64_OPND_SVE_SHLIMM_PRED:
6582 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6583 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6584 case AARCH64_OPND_SVE_SHRIMM_PRED:
6585 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6586 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6587 case AARCH64_OPND_SVE_SIMM5:
6588 case AARCH64_OPND_SVE_SIMM5B:
6589 case AARCH64_OPND_SVE_SIMM6:
6590 case AARCH64_OPND_SVE_SIMM8:
6591 case AARCH64_OPND_SVE_UIMM3:
6592 case AARCH64_OPND_SVE_UIMM7:
6593 case AARCH64_OPND_SVE_UIMM8:
6594 case AARCH64_OPND_SVE_UIMM8_53:
6595 case AARCH64_OPND_IMM_ROT1:
6596 case AARCH64_OPND_IMM_ROT2:
6597 case AARCH64_OPND_IMM_ROT3:
6598 case AARCH64_OPND_SVE_IMM_ROT1:
6599 case AARCH64_OPND_SVE_IMM_ROT2:
6600 case AARCH64_OPND_SVE_IMM_ROT3:
6601 po_imm_nc_or_fail ();
6602 info->imm.value = val;
6603 break;
6604
6605 case AARCH64_OPND_SVE_AIMM:
6606 case AARCH64_OPND_SVE_ASIMM:
6607 po_imm_nc_or_fail ();
6608 info->imm.value = val;
6609 skip_whitespace (str);
6610 if (skip_past_comma (&str))
6611 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6612 else
6613 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6614 break;
6615
6616 case AARCH64_OPND_SVE_PATTERN:
6617 po_enum_or_fail (aarch64_sve_pattern_array);
6618 info->imm.value = val;
6619 break;
6620
6621 case AARCH64_OPND_SVE_PATTERN_SCALED:
6622 po_enum_or_fail (aarch64_sve_pattern_array);
6623 info->imm.value = val;
6624 if (skip_past_comma (&str)
6625 && !parse_shift (&str, info, SHIFTED_MUL))
6626 goto failure;
6627 if (!info->shifter.operator_present)
6628 {
6629 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6630 info->shifter.kind = AARCH64_MOD_MUL;
6631 info->shifter.amount = 1;
6632 }
6633 break;
6634
6635 case AARCH64_OPND_SVE_PRFOP:
6636 po_enum_or_fail (aarch64_sve_prfop_array);
6637 info->imm.value = val;
6638 break;
6639
6640 case AARCH64_OPND_UIMM7:
6641 po_imm_or_fail (0, 127);
6642 info->imm.value = val;
6643 break;
6644
6645 case AARCH64_OPND_IDX:
6646 case AARCH64_OPND_MASK:
6647 case AARCH64_OPND_BIT_NUM:
6648 case AARCH64_OPND_IMMR:
6649 case AARCH64_OPND_IMMS:
6650 po_imm_or_fail (0, 63);
6651 info->imm.value = val;
6652 break;
6653
6654 case AARCH64_OPND_IMM0:
6655 po_imm_nc_or_fail ();
6656 if (val != 0)
6657 {
6658 set_fatal_syntax_error (_("immediate zero expected"));
6659 goto failure;
6660 }
6661 info->imm.value = 0;
6662 break;
6663
6664 case AARCH64_OPND_FPIMM0:
6665 {
6666 int qfloat;
6667 bool res1 = false, res2 = false;
6668 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6669 it is probably not worth the effort to support it. */
6670 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6671 imm_reg_type))
6672 && (error_p ()
6673 || !(res2 = parse_constant_immediate (&str, &val,
6674 imm_reg_type))))
6675 goto failure;
6676 if ((res1 && qfloat == 0) || (res2 && val == 0))
6677 {
6678 info->imm.value = 0;
6679 info->imm.is_fp = 1;
6680 break;
6681 }
6682 set_fatal_syntax_error (_("immediate zero expected"));
6683 goto failure;
6684 }
6685
6686 case AARCH64_OPND_IMM_MOV:
6687 {
6688 char *saved = str;
6689 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6690 reg_name_p (str, REG_TYPE_VN))
6691 goto failure;
6692 str = saved;
6693 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6694 GE_OPT_PREFIX, REJECT_ABSENT,
6695 NORMAL_RESOLUTION));
6696 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6697 later. fix_mov_imm_insn will try to determine a machine
6698 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6699 message if the immediate cannot be moved by a single
6700 instruction. */
6701 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6702 inst.base.operands[i].skip = 1;
6703 }
6704 break;
6705
6706 case AARCH64_OPND_SIMD_IMM:
6707 case AARCH64_OPND_SIMD_IMM_SFT:
6708 if (! parse_big_immediate (&str, &val, imm_reg_type))
6709 goto failure;
6710 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6711 /* addr_off_p */ 0,
6712 /* need_libopcodes_p */ 1,
6713 /* skip_p */ 1);
6714 /* Parse shift.
6715 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6716 shift, we don't check it here; we leave the checking to
6717 the libopcodes (operand_general_constraint_met_p). By
6718 doing this, we achieve better diagnostics. */
6719 if (skip_past_comma (&str)
6720 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6721 goto failure;
6722 if (!info->shifter.operator_present
6723 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6724 {
6725 /* Default to LSL if not present. Libopcodes prefers shifter
6726 kind to be explicit. */
6727 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6728 info->shifter.kind = AARCH64_MOD_LSL;
6729 }
6730 break;
6731
6732 case AARCH64_OPND_FPIMM:
6733 case AARCH64_OPND_SIMD_FPIMM:
6734 case AARCH64_OPND_SVE_FPIMM8:
6735 {
6736 int qfloat;
6737 bool dp_p;
6738
6739 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6740 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6741 || !aarch64_imm_float_p (qfloat))
6742 {
6743 if (!error_p ())
6744 set_fatal_syntax_error (_("invalid floating-point"
6745 " constant"));
6746 goto failure;
6747 }
6748 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6749 inst.base.operands[i].imm.is_fp = 1;
6750 }
6751 break;
6752
6753 case AARCH64_OPND_SVE_I1_HALF_ONE:
6754 case AARCH64_OPND_SVE_I1_HALF_TWO:
6755 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6756 {
6757 int qfloat;
6758 bool dp_p;
6759
6760 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6761 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6762 {
6763 if (!error_p ())
6764 set_fatal_syntax_error (_("invalid floating-point"
6765 " constant"));
6766 goto failure;
6767 }
6768 inst.base.operands[i].imm.value = qfloat;
6769 inst.base.operands[i].imm.is_fp = 1;
6770 }
6771 break;
6772
6773 case AARCH64_OPND_LIMM:
6774 po_misc_or_fail (parse_shifter_operand (&str, info,
6775 SHIFTED_LOGIC_IMM));
6776 if (info->shifter.operator_present)
6777 {
6778 set_fatal_syntax_error
6779 (_("shift not allowed for bitmask immediate"));
6780 goto failure;
6781 }
6782 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6783 /* addr_off_p */ 0,
6784 /* need_libopcodes_p */ 1,
6785 /* skip_p */ 1);
6786 break;
6787
6788 case AARCH64_OPND_AIMM:
6789 if (opcode->op == OP_ADD)
6790 /* ADD may have relocation types. */
6791 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6792 SHIFTED_ARITH_IMM));
6793 else
6794 po_misc_or_fail (parse_shifter_operand (&str, info,
6795 SHIFTED_ARITH_IMM));
6796 switch (inst.reloc.type)
6797 {
6798 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6799 info->shifter.amount = 12;
6800 break;
6801 case BFD_RELOC_UNUSED:
6802 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6803 if (info->shifter.kind != AARCH64_MOD_NONE)
6804 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6805 inst.reloc.pc_rel = 0;
6806 break;
6807 default:
6808 break;
6809 }
6810 info->imm.value = 0;
6811 if (!info->shifter.operator_present)
6812 {
6813 /* Default to LSL if not present. Libopcodes prefers shifter
6814 kind to be explicit. */
6815 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6816 info->shifter.kind = AARCH64_MOD_LSL;
6817 }
6818 break;
6819
6820 case AARCH64_OPND_HALF:
6821 {
6822 /* #<imm16> or relocation. */
6823 int internal_fixup_p;
6824 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6825 if (internal_fixup_p)
6826 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6827 skip_whitespace (str);
6828 if (skip_past_comma (&str))
6829 {
6830 /* {, LSL #<shift>} */
6831 if (! aarch64_gas_internal_fixup_p ())
6832 {
6833 set_fatal_syntax_error (_("can't mix relocation modifier "
6834 "with explicit shift"));
6835 goto failure;
6836 }
6837 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6838 }
6839 else
6840 inst.base.operands[i].shifter.amount = 0;
6841 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6842 inst.base.operands[i].imm.value = 0;
6843 if (! process_movw_reloc_info ())
6844 goto failure;
6845 }
6846 break;
6847
6848 case AARCH64_OPND_EXCEPTION:
6849 case AARCH64_OPND_UNDEFINED:
6850 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6851 imm_reg_type));
6852 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6853 /* addr_off_p */ 0,
6854 /* need_libopcodes_p */ 0,
6855 /* skip_p */ 1);
6856 break;
6857
6858 case AARCH64_OPND_NZCV:
6859 {
6860 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6861 if (nzcv != NULL)
6862 {
6863 str += 4;
6864 info->imm.value = nzcv->value;
6865 break;
6866 }
6867 po_imm_or_fail (0, 15);
6868 info->imm.value = val;
6869 }
6870 break;
6871
6872 case AARCH64_OPND_COND:
6873 case AARCH64_OPND_COND1:
6874 {
6875 char *start = str;
6876 do
6877 str++;
6878 while (ISALPHA (*str));
6879 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6880 if (info->cond == NULL)
6881 {
6882 set_syntax_error (_("invalid condition"));
6883 goto failure;
6884 }
6885 else if (operands[i] == AARCH64_OPND_COND1
6886 && (info->cond->value & 0xe) == 0xe)
6887 {
6888 /* Do not allow AL or NV. */
6889 set_default_error ();
6890 goto failure;
6891 }
6892 }
6893 break;
6894
6895 case AARCH64_OPND_ADDR_ADRP:
6896 po_misc_or_fail (parse_adrp (&str));
6897 /* Clear the value as operand needs to be relocated. */
6898 info->imm.value = 0;
6899 break;
6900
6901 case AARCH64_OPND_ADDR_PCREL14:
6902 case AARCH64_OPND_ADDR_PCREL19:
6903 case AARCH64_OPND_ADDR_PCREL21:
6904 case AARCH64_OPND_ADDR_PCREL26:
6905 po_misc_or_fail (parse_address (&str, info));
6906 if (!info->addr.pcrel)
6907 {
6908 set_syntax_error (_("invalid pc-relative address"));
6909 goto failure;
6910 }
6911 if (inst.gen_lit_pool
6912 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6913 {
6914 /* Only permit "=value" in the literal load instructions.
6915 The literal will be generated by programmer_friendly_fixup. */
6916 set_syntax_error (_("invalid use of \"=immediate\""));
6917 goto failure;
6918 }
6919 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6920 {
6921 set_syntax_error (_("unrecognized relocation suffix"));
6922 goto failure;
6923 }
6924 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6925 {
6926 info->imm.value = inst.reloc.exp.X_add_number;
6927 inst.reloc.type = BFD_RELOC_UNUSED;
6928 }
6929 else
6930 {
6931 info->imm.value = 0;
6932 if (inst.reloc.type == BFD_RELOC_UNUSED)
6933 switch (opcode->iclass)
6934 {
6935 case compbranch:
6936 case condbranch:
6937 /* e.g. CBZ or B.COND */
6938 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6939 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6940 break;
6941 case testbranch:
6942 /* e.g. TBZ */
6943 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6944 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6945 break;
6946 case branch_imm:
6947 /* e.g. B or BL */
6948 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6949 inst.reloc.type =
6950 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6951 : BFD_RELOC_AARCH64_JUMP26;
6952 break;
6953 case loadlit:
6954 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6955 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6956 break;
6957 case pcreladdr:
6958 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6959 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6960 break;
6961 default:
6962 gas_assert (0);
6963 abort ();
6964 }
6965 inst.reloc.pc_rel = 1;
6966 }
6967 break;
6968
6969 case AARCH64_OPND_ADDR_SIMPLE:
6970 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6971 {
6972 /* [<Xn|SP>{, #<simm>}] */
6973 char *start = str;
6974 /* First use the normal address-parsing routines, to get
6975 the usual syntax errors. */
6976 po_misc_or_fail (parse_address (&str, info));
6977 if (info->addr.pcrel || info->addr.offset.is_reg
6978 || !info->addr.preind || info->addr.postind
6979 || info->addr.writeback)
6980 {
6981 set_syntax_error (_("invalid addressing mode"));
6982 goto failure;
6983 }
6984
6985 /* Then retry, matching the specific syntax of these addresses. */
6986 str = start;
6987 po_char_or_fail ('[');
6988 po_reg_or_fail (REG_TYPE_R64_SP);
6989 /* Accept optional ", #0". */
6990 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6991 && skip_past_char (&str, ','))
6992 {
6993 skip_past_char (&str, '#');
6994 if (! skip_past_char (&str, '0'))
6995 {
6996 set_fatal_syntax_error
6997 (_("the optional immediate offset can only be 0"));
6998 goto failure;
6999 }
7000 }
7001 po_char_or_fail (']');
7002 break;
7003 }
7004
7005 case AARCH64_OPND_ADDR_REGOFF:
7006 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7007 po_misc_or_fail (parse_address (&str, info));
7008 regoff_addr:
7009 if (info->addr.pcrel || !info->addr.offset.is_reg
7010 || !info->addr.preind || info->addr.postind
7011 || info->addr.writeback)
7012 {
7013 set_syntax_error (_("invalid addressing mode"));
7014 goto failure;
7015 }
7016 if (!info->shifter.operator_present)
7017 {
7018 /* Default to LSL if not present. Libopcodes prefers shifter
7019 kind to be explicit. */
7020 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7021 info->shifter.kind = AARCH64_MOD_LSL;
7022 }
7023 /* Qualifier to be deduced by libopcodes. */
7024 break;
7025
7026 case AARCH64_OPND_ADDR_SIMM7:
7027 po_misc_or_fail (parse_address (&str, info));
7028 if (info->addr.pcrel || info->addr.offset.is_reg
7029 || (!info->addr.preind && !info->addr.postind))
7030 {
7031 set_syntax_error (_("invalid addressing mode"));
7032 goto failure;
7033 }
7034 if (inst.reloc.type != BFD_RELOC_UNUSED)
7035 {
7036 set_syntax_error (_("relocation not allowed"));
7037 goto failure;
7038 }
7039 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7040 /* addr_off_p */ 1,
7041 /* need_libopcodes_p */ 1,
7042 /* skip_p */ 0);
7043 break;
7044
7045 case AARCH64_OPND_ADDR_SIMM9:
7046 case AARCH64_OPND_ADDR_SIMM9_2:
7047 case AARCH64_OPND_ADDR_SIMM11:
7048 case AARCH64_OPND_ADDR_SIMM13:
7049 po_misc_or_fail (parse_address (&str, info));
7050 if (info->addr.pcrel || info->addr.offset.is_reg
7051 || (!info->addr.preind && !info->addr.postind)
7052 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7053 && info->addr.writeback))
7054 {
7055 set_syntax_error (_("invalid addressing mode"));
7056 goto failure;
7057 }
7058 if (inst.reloc.type != BFD_RELOC_UNUSED)
7059 {
7060 set_syntax_error (_("relocation not allowed"));
7061 goto failure;
7062 }
7063 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7064 /* addr_off_p */ 1,
7065 /* need_libopcodes_p */ 1,
7066 /* skip_p */ 0);
7067 break;
7068
7069 case AARCH64_OPND_ADDR_SIMM10:
7070 case AARCH64_OPND_ADDR_OFFSET:
7071 po_misc_or_fail (parse_address (&str, info));
7072 if (info->addr.pcrel || info->addr.offset.is_reg
7073 || !info->addr.preind || info->addr.postind)
7074 {
7075 set_syntax_error (_("invalid addressing mode"));
7076 goto failure;
7077 }
7078 if (inst.reloc.type != BFD_RELOC_UNUSED)
7079 {
7080 set_syntax_error (_("relocation not allowed"));
7081 goto failure;
7082 }
7083 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7084 /* addr_off_p */ 1,
7085 /* need_libopcodes_p */ 1,
7086 /* skip_p */ 0);
7087 break;
7088
7089 case AARCH64_OPND_ADDR_UIMM12:
7090 po_misc_or_fail (parse_address (&str, info));
7091 if (info->addr.pcrel || info->addr.offset.is_reg
7092 || !info->addr.preind || info->addr.writeback)
7093 {
7094 set_syntax_error (_("invalid addressing mode"));
7095 goto failure;
7096 }
7097 if (inst.reloc.type == BFD_RELOC_UNUSED)
7098 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7099 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7100 || (inst.reloc.type
7101 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7102 || (inst.reloc.type
7103 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7104 || (inst.reloc.type
7105 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7106 || (inst.reloc.type
7107 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7108 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7109 /* Leave qualifier to be determined by libopcodes. */
7110 break;
7111
7112 case AARCH64_OPND_SIMD_ADDR_POST:
7113 /* [<Xn|SP>], <Xm|#<amount>> */
7114 po_misc_or_fail (parse_address (&str, info));
7115 if (!info->addr.postind || !info->addr.writeback)
7116 {
7117 set_syntax_error (_("invalid addressing mode"));
7118 goto failure;
7119 }
7120 if (!info->addr.offset.is_reg)
7121 {
7122 if (inst.reloc.exp.X_op == O_constant)
7123 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7124 else
7125 {
7126 set_fatal_syntax_error
7127 (_("writeback value must be an immediate constant"));
7128 goto failure;
7129 }
7130 }
7131 /* No qualifier. */
7132 break;
7133
7134 case AARCH64_OPND_SME_SM_ZA:
7135 /* { SM | ZA } */
7136 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7137 {
7138 set_syntax_error (_("unknown or missing PSTATE field name"));
7139 goto failure;
7140 }
7141 info->reg.regno = val;
7142 break;
7143
7144 case AARCH64_OPND_SME_PnT_Wm_imm:
7145 /* <Pn>.<T>[<Wm>, #<imm>] */
7146 {
7147 int index_base_reg;
7148 int imm;
7149 val = parse_sme_pred_reg_with_index (&str,
7150 &index_base_reg,
7151 &imm,
7152 &qualifier);
7153 if (val == PARSE_FAIL)
7154 goto failure;
7155
7156 info->za_tile_vector.regno = val;
7157 info->za_tile_vector.index.regno = index_base_reg;
7158 info->za_tile_vector.index.imm = imm;
7159 info->qualifier = qualifier;
7160 break;
7161 }
7162
7163 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7164 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7165 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7166 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7167 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7168 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7169 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7170 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7171 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7172 case AARCH64_OPND_SVE_ADDR_RI_U6:
7173 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7174 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7175 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7176 /* [X<n>{, #imm, MUL VL}]
7177 [X<n>{, #imm}]
7178 but recognizing SVE registers. */
7179 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7180 &offset_qualifier));
7181 if (base_qualifier != AARCH64_OPND_QLF_X)
7182 {
7183 set_syntax_error (_("invalid addressing mode"));
7184 goto failure;
7185 }
7186 sve_regimm:
7187 if (info->addr.pcrel || info->addr.offset.is_reg
7188 || !info->addr.preind || info->addr.writeback)
7189 {
7190 set_syntax_error (_("invalid addressing mode"));
7191 goto failure;
7192 }
7193 if (inst.reloc.type != BFD_RELOC_UNUSED
7194 || inst.reloc.exp.X_op != O_constant)
7195 {
7196 /* Make sure this has priority over
7197 "invalid addressing mode". */
7198 set_fatal_syntax_error (_("constant offset required"));
7199 goto failure;
7200 }
7201 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7202 break;
7203
7204 case AARCH64_OPND_SVE_ADDR_R:
7205 /* [<Xn|SP>{, <R><m>}]
7206 but recognizing SVE registers. */
7207 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7208 &offset_qualifier));
7209 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7210 {
7211 offset_qualifier = AARCH64_OPND_QLF_X;
7212 info->addr.offset.is_reg = 1;
7213 info->addr.offset.regno = 31;
7214 }
7215 else if (base_qualifier != AARCH64_OPND_QLF_X
7216 || offset_qualifier != AARCH64_OPND_QLF_X)
7217 {
7218 set_syntax_error (_("invalid addressing mode"));
7219 goto failure;
7220 }
7221 goto regoff_addr;
7222
7223 case AARCH64_OPND_SVE_ADDR_RR:
7224 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7225 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7226 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7227 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7228 case AARCH64_OPND_SVE_ADDR_RX:
7229 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7230 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7231 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7232 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7233 but recognizing SVE registers. */
7234 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7235 &offset_qualifier));
7236 if (base_qualifier != AARCH64_OPND_QLF_X
7237 || offset_qualifier != AARCH64_OPND_QLF_X)
7238 {
7239 set_syntax_error (_("invalid addressing mode"));
7240 goto failure;
7241 }
7242 goto regoff_addr;
7243
7244 case AARCH64_OPND_SVE_ADDR_RZ:
7245 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7246 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7247 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7248 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7249 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7250 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7251 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7252 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7253 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7254 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7255 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7256 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7257 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7258 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7259 &offset_qualifier));
7260 if (base_qualifier != AARCH64_OPND_QLF_X
7261 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7262 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7263 {
7264 set_syntax_error (_("invalid addressing mode"));
7265 goto failure;
7266 }
7267 info->qualifier = offset_qualifier;
7268 goto regoff_addr;
7269
7270 case AARCH64_OPND_SVE_ADDR_ZX:
7271 /* [Zn.<T>{, <Xm>}]. */
7272 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7273 &offset_qualifier));
7274 /* Things to check:
7275 base_qualifier either S_S or S_D
7276 offset_qualifier must be X
7277 */
7278 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7279 && base_qualifier != AARCH64_OPND_QLF_S_D)
7280 || offset_qualifier != AARCH64_OPND_QLF_X)
7281 {
7282 set_syntax_error (_("invalid addressing mode"));
7283 goto failure;
7284 }
7285 info->qualifier = base_qualifier;
7286 if (!info->addr.offset.is_reg || info->addr.pcrel
7287 || !info->addr.preind || info->addr.writeback
7288 || info->shifter.operator_present != 0)
7289 {
7290 set_syntax_error (_("invalid addressing mode"));
7291 goto failure;
7292 }
7293 info->shifter.kind = AARCH64_MOD_LSL;
7294 break;
7295
7296
7297 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7298 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7299 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7300 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7301 /* [Z<n>.<T>{, #imm}] */
7302 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7303 &offset_qualifier));
7304 if (base_qualifier != AARCH64_OPND_QLF_S_S
7305 && base_qualifier != AARCH64_OPND_QLF_S_D)
7306 {
7307 set_syntax_error (_("invalid addressing mode"));
7308 goto failure;
7309 }
7310 info->qualifier = base_qualifier;
7311 goto sve_regimm;
7312
7313 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7314 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7315 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7316 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7317 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7318
7319 We don't reject:
7320
7321 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7322
7323 here since we get better error messages by leaving it to
7324 the qualifier checking routines. */
7325 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7326 &offset_qualifier));
7327 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7328 && base_qualifier != AARCH64_OPND_QLF_S_D)
7329 || offset_qualifier != base_qualifier)
7330 {
7331 set_syntax_error (_("invalid addressing mode"));
7332 goto failure;
7333 }
7334 info->qualifier = base_qualifier;
7335 goto regoff_addr;
7336
7337 case AARCH64_OPND_SYSREG:
7338 {
7339 uint32_t sysreg_flags;
7340 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7341 &sysreg_flags)) == PARSE_FAIL)
7342 {
7343 set_syntax_error (_("unknown or missing system register name"));
7344 goto failure;
7345 }
7346 inst.base.operands[i].sysreg.value = val;
7347 inst.base.operands[i].sysreg.flags = sysreg_flags;
7348 break;
7349 }
7350
7351 case AARCH64_OPND_PSTATEFIELD:
7352 {
7353 uint32_t sysreg_flags;
7354 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7355 &sysreg_flags)) == PARSE_FAIL)
7356 {
7357 set_syntax_error (_("unknown or missing PSTATE field name"));
7358 goto failure;
7359 }
7360 inst.base.operands[i].pstatefield = val;
7361 inst.base.operands[i].sysreg.flags = sysreg_flags;
7362 break;
7363 }
7364
7365 case AARCH64_OPND_SYSREG_IC:
7366 inst.base.operands[i].sysins_op =
7367 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7368 goto sys_reg_ins;
7369
7370 case AARCH64_OPND_SYSREG_DC:
7371 inst.base.operands[i].sysins_op =
7372 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7373 goto sys_reg_ins;
7374
7375 case AARCH64_OPND_SYSREG_AT:
7376 inst.base.operands[i].sysins_op =
7377 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7378 goto sys_reg_ins;
7379
7380 case AARCH64_OPND_SYSREG_SR:
7381 inst.base.operands[i].sysins_op =
7382 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7383 goto sys_reg_ins;
7384
7385 case AARCH64_OPND_SYSREG_TLBI:
7386 inst.base.operands[i].sysins_op =
7387 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7388 sys_reg_ins:
7389 if (inst.base.operands[i].sysins_op == NULL)
7390 {
7391 set_fatal_syntax_error ( _("unknown or missing operation name"));
7392 goto failure;
7393 }
7394 break;
7395
7396 case AARCH64_OPND_BARRIER:
7397 case AARCH64_OPND_BARRIER_ISB:
7398 val = parse_barrier (&str);
7399 if (val != PARSE_FAIL
7400 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7401 {
7402 /* ISB only accepts options name 'sy'. */
7403 set_syntax_error
7404 (_("the specified option is not accepted in ISB"));
7405 /* Turn off backtrack as this optional operand is present. */
7406 backtrack_pos = 0;
7407 goto failure;
7408 }
7409 if (val != PARSE_FAIL
7410 && operands[i] == AARCH64_OPND_BARRIER)
7411 {
7412 /* Regular barriers accept options CRm (C0-C15).
7413 DSB nXS barrier variant accepts values > 15. */
7414 if (val < 0 || val > 15)
7415 {
7416 set_syntax_error (_("the specified option is not accepted in DSB"));
7417 goto failure;
7418 }
7419 }
7420 /* This is an extension to accept a 0..15 immediate. */
7421 if (val == PARSE_FAIL)
7422 po_imm_or_fail (0, 15);
7423 info->barrier = aarch64_barrier_options + val;
7424 break;
7425
7426 case AARCH64_OPND_BARRIER_DSB_NXS:
7427 val = parse_barrier (&str);
7428 if (val != PARSE_FAIL)
7429 {
7430 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7431 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7432 {
7433 set_syntax_error (_("the specified option is not accepted in DSB"));
7434 /* Turn off backtrack as this optional operand is present. */
7435 backtrack_pos = 0;
7436 goto failure;
7437 }
7438 }
7439 else
7440 {
7441 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7442 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7443 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7444 goto failure;
7445 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7446 {
7447 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7448 goto failure;
7449 }
7450 }
7451 /* Option index is encoded as 2-bit value in val<3:2>. */
7452 val = (val >> 2) - 4;
7453 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7454 break;
7455
7456 case AARCH64_OPND_PRFOP:
7457 val = parse_pldop (&str);
7458 /* This is an extension to accept a 0..31 immediate. */
7459 if (val == PARSE_FAIL)
7460 po_imm_or_fail (0, 31);
7461 inst.base.operands[i].prfop = aarch64_prfops + val;
7462 break;
7463
7464 case AARCH64_OPND_BARRIER_PSB:
7465 val = parse_barrier_psb (&str, &(info->hint_option));
7466 if (val == PARSE_FAIL)
7467 goto failure;
7468 break;
7469
7470 case AARCH64_OPND_BTI_TARGET:
7471 val = parse_bti_operand (&str, &(info->hint_option));
7472 if (val == PARSE_FAIL)
7473 goto failure;
7474 break;
7475
7476 case AARCH64_OPND_SME_ZAda_2b:
7477 case AARCH64_OPND_SME_ZAda_3b:
7478 val = parse_sme_zada_operand (&str, &qualifier);
7479 if (val == PARSE_FAIL)
7480 goto failure;
7481 info->reg.regno = val;
7482 info->qualifier = qualifier;
7483 break;
7484
7485 case AARCH64_OPND_SME_ZA_HV_idx_src:
7486 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7487 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7488 {
7489 enum sme_hv_slice slice_indicator;
7490 int vector_select_register;
7491 int imm;
7492
7493 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7494 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7495 &slice_indicator,
7496 &vector_select_register,
7497 &imm,
7498 &qualifier);
7499 else
7500 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7501 &vector_select_register,
7502 &imm,
7503 &qualifier);
7504 if (val == PARSE_FAIL)
7505 goto failure;
7506 info->za_tile_vector.regno = val;
7507 info->za_tile_vector.index.regno = vector_select_register;
7508 info->za_tile_vector.index.imm = imm;
7509 info->za_tile_vector.v = slice_indicator;
7510 info->qualifier = qualifier;
7511 break;
7512 }
7513
7514 case AARCH64_OPND_SME_list_of_64bit_tiles:
7515 val = parse_sme_list_of_64bit_tiles (&str);
7516 if (val == PARSE_FAIL)
7517 goto failure;
7518 info->imm.value = val;
7519 break;
7520
7521 case AARCH64_OPND_SME_ZA_array:
7522 {
7523 int imm;
7524 val = parse_sme_za_array (&str, &imm);
7525 if (val == PARSE_FAIL)
7526 goto failure;
7527 info->za_tile_vector.index.regno = val;
7528 info->za_tile_vector.index.imm = imm;
7529 break;
7530 }
7531
7532 case AARCH64_OPND_MOPS_ADDR_Rd:
7533 case AARCH64_OPND_MOPS_ADDR_Rs:
7534 po_char_or_fail ('[');
7535 if (!parse_x0_to_x30 (&str, info))
7536 goto failure;
7537 po_char_or_fail (']');
7538 po_char_or_fail ('!');
7539 break;
7540
7541 case AARCH64_OPND_MOPS_WB_Rn:
7542 if (!parse_x0_to_x30 (&str, info))
7543 goto failure;
7544 po_char_or_fail ('!');
7545 break;
7546
7547 default:
7548 as_fatal (_("unhandled operand code %d"), operands[i]);
7549 }
7550
7551 /* If we get here, this operand was successfully parsed. */
7552 inst.base.operands[i].present = 1;
7553 continue;
7554
7555 failure:
7556 /* The parse routine should already have set the error, but in case
7557 not, set a default one here. */
7558 if (! error_p ())
7559 set_default_error ();
7560
7561 if (! backtrack_pos)
7562 goto parse_operands_return;
7563
7564 {
7565 /* We reach here because this operand is marked as optional, and
7566 either no operand was supplied or the operand was supplied but it
7567 was syntactically incorrect. In the latter case we report an
7568 error. In the former case we perform a few more checks before
7569 dropping through to the code to insert the default operand. */
7570
7571 char *tmp = backtrack_pos;
7572 char endchar = END_OF_INSN;
7573
7574 if (i != (aarch64_num_of_operands (opcode) - 1))
7575 endchar = ',';
7576 skip_past_char (&tmp, ',');
7577
7578 if (*tmp != endchar)
7579 /* The user has supplied an operand in the wrong format. */
7580 goto parse_operands_return;
7581
7582 /* Make sure there is not a comma before the optional operand.
7583 For example the fifth operand of 'sys' is optional:
7584
7585 sys #0,c0,c0,#0, <--- wrong
7586 sys #0,c0,c0,#0 <--- correct. */
7587 if (comma_skipped_p && i && endchar == END_OF_INSN)
7588 {
7589 set_fatal_syntax_error
7590 (_("unexpected comma before the omitted optional operand"));
7591 goto parse_operands_return;
7592 }
7593 }
7594
7595 /* Reaching here means we are dealing with an optional operand that is
7596 omitted from the assembly line. */
7597 gas_assert (optional_operand_p (opcode, i));
7598 info->present = 0;
7599 process_omitted_operand (operands[i], opcode, i, info);
7600
7601 /* Try again, skipping the optional operand at backtrack_pos. */
7602 str = backtrack_pos;
7603 backtrack_pos = 0;
7604
7605 /* Clear any error record after the omitted optional operand has been
7606 successfully handled. */
7607 clear_error ();
7608 }
7609
7610 /* Check if we have parsed all the operands. */
7611 if (*str != '\0' && ! error_p ())
7612 {
7613 /* Set I to the index of the last present operand; this is
7614 for the purpose of diagnostics. */
7615 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7616 ;
7617 set_fatal_syntax_error
7618 (_("unexpected characters following instruction"));
7619 }
7620
7621 parse_operands_return:
7622
7623 if (error_p ())
7624 {
7625 DEBUG_TRACE ("parsing FAIL: %s - %s",
7626 operand_mismatch_kind_names[get_error_kind ()],
7627 get_error_message ());
7628 /* Record the operand error properly; this is useful when there
7629 are multiple instruction templates for a mnemonic name, so that
7630 later on, we can select the error that most closely describes
7631 the problem. */
7632 record_operand_error (opcode, i, get_error_kind (),
7633 get_error_message ());
7634 return false;
7635 }
7636 else
7637 {
7638 DEBUG_TRACE ("parsing SUCCESS");
7639 return true;
7640 }
7641 }
7642
7643 /* It does some fix-up to provide some programmer friendly feature while
7644 keeping the libopcodes happy, i.e. libopcodes only accepts
7645 the preferred architectural syntax.
7646 Return FALSE if there is any failure; otherwise return TRUE. */
7647
7648 static bool
7649 programmer_friendly_fixup (aarch64_instruction *instr)
7650 {
7651 aarch64_inst *base = &instr->base;
7652 const aarch64_opcode *opcode = base->opcode;
7653 enum aarch64_op op = opcode->op;
7654 aarch64_opnd_info *operands = base->operands;
7655
7656 DEBUG_TRACE ("enter");
7657
7658 switch (opcode->iclass)
7659 {
7660 case testbranch:
7661 /* TBNZ Xn|Wn, #uimm6, label
7662 Test and Branch Not Zero: conditionally jumps to label if bit number
7663 uimm6 in register Xn is not zero. The bit number implies the width of
7664 the register, which may be written and should be disassembled as Wn if
7665 uimm is less than 32. */
7666 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7667 {
7668 if (operands[1].imm.value >= 32)
7669 {
7670 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7671 0, 31);
7672 return false;
7673 }
7674 operands[0].qualifier = AARCH64_OPND_QLF_X;
7675 }
7676 break;
7677 case loadlit:
7678 /* LDR Wt, label | =value
7679 As a convenience assemblers will typically permit the notation
7680 "=value" in conjunction with the pc-relative literal load instructions
7681 to automatically place an immediate value or symbolic address in a
7682 nearby literal pool and generate a hidden label which references it.
7683 ISREG has been set to 0 in the case of =value. */
7684 if (instr->gen_lit_pool
7685 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7686 {
7687 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7688 if (op == OP_LDRSW_LIT)
7689 size = 4;
7690 if (instr->reloc.exp.X_op != O_constant
7691 && instr->reloc.exp.X_op != O_big
7692 && instr->reloc.exp.X_op != O_symbol)
7693 {
7694 record_operand_error (opcode, 1,
7695 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7696 _("constant expression expected"));
7697 return false;
7698 }
7699 if (! add_to_lit_pool (&instr->reloc.exp, size))
7700 {
7701 record_operand_error (opcode, 1,
7702 AARCH64_OPDE_OTHER_ERROR,
7703 _("literal pool insertion failed"));
7704 return false;
7705 }
7706 }
7707 break;
7708 case log_shift:
7709 case bitfield:
7710 /* UXT[BHW] Wd, Wn
7711 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7712 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7713 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7714 A programmer-friendly assembler should accept a destination Xd in
7715 place of Wd, however that is not the preferred form for disassembly.
7716 */
7717 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7718 && operands[1].qualifier == AARCH64_OPND_QLF_W
7719 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7720 operands[0].qualifier = AARCH64_OPND_QLF_W;
7721 break;
7722
7723 case addsub_ext:
7724 {
7725 /* In the 64-bit form, the final register operand is written as Wm
7726 for all but the (possibly omitted) UXTX/LSL and SXTX
7727 operators.
7728 As a programmer-friendly assembler, we accept e.g.
7729 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7730 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7731 int idx = aarch64_operand_index (opcode->operands,
7732 AARCH64_OPND_Rm_EXT);
7733 gas_assert (idx == 1 || idx == 2);
7734 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7735 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7736 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7737 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7738 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7739 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7740 }
7741 break;
7742
7743 default:
7744 break;
7745 }
7746
7747 DEBUG_TRACE ("exit with SUCCESS");
7748 return true;
7749 }
7750
7751 /* Check for loads and stores that will cause unpredictable behavior. */
7752
7753 static void
7754 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7755 {
7756 aarch64_inst *base = &instr->base;
7757 const aarch64_opcode *opcode = base->opcode;
7758 const aarch64_opnd_info *opnds = base->operands;
7759 switch (opcode->iclass)
7760 {
7761 case ldst_pos:
7762 case ldst_imm9:
7763 case ldst_imm10:
7764 case ldst_unscaled:
7765 case ldst_unpriv:
7766 /* Loading/storing the base register is unpredictable if writeback. */
7767 if ((aarch64_get_operand_class (opnds[0].type)
7768 == AARCH64_OPND_CLASS_INT_REG)
7769 && opnds[0].reg.regno == opnds[1].addr.base_regno
7770 && opnds[1].addr.base_regno != REG_SP
7771 /* Exempt STG/STZG/ST2G/STZ2G. */
7772 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7773 && opnds[1].addr.writeback)
7774 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7775 break;
7776
7777 case ldstpair_off:
7778 case ldstnapair_offs:
7779 case ldstpair_indexed:
7780 /* Loading/storing the base register is unpredictable if writeback. */
7781 if ((aarch64_get_operand_class (opnds[0].type)
7782 == AARCH64_OPND_CLASS_INT_REG)
7783 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7784 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7785 && opnds[2].addr.base_regno != REG_SP
7786 /* Exempt STGP. */
7787 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7788 && opnds[2].addr.writeback)
7789 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7790 /* Load operations must load different registers. */
7791 if ((opcode->opcode & (1 << 22))
7792 && opnds[0].reg.regno == opnds[1].reg.regno)
7793 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7794 break;
7795
7796 case ldstexcl:
7797 if ((aarch64_get_operand_class (opnds[0].type)
7798 == AARCH64_OPND_CLASS_INT_REG)
7799 && (aarch64_get_operand_class (opnds[1].type)
7800 == AARCH64_OPND_CLASS_INT_REG))
7801 {
7802 if ((opcode->opcode & (1 << 22)))
7803 {
7804 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7805 if ((opcode->opcode & (1 << 21))
7806 && opnds[0].reg.regno == opnds[1].reg.regno)
7807 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7808 }
7809 else
7810 {
7811 /* Store-Exclusive is unpredictable if Rt == Rs. */
7812 if (opnds[0].reg.regno == opnds[1].reg.regno)
7813 as_warn
7814 (_("unpredictable: identical transfer and status registers"
7815 " --`%s'"),str);
7816
7817 if (opnds[0].reg.regno == opnds[2].reg.regno)
7818 {
7819 if (!(opcode->opcode & (1 << 21)))
7820 /* Store-Exclusive is unpredictable if Rn == Rs. */
7821 as_warn
7822 (_("unpredictable: identical base and status registers"
7823 " --`%s'"),str);
7824 else
7825 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7826 as_warn
7827 (_("unpredictable: "
7828 "identical transfer and status registers"
7829 " --`%s'"),str);
7830 }
7831
7832 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7833 if ((opcode->opcode & (1 << 21))
7834 && opnds[0].reg.regno == opnds[3].reg.regno
7835 && opnds[3].reg.regno != REG_SP)
7836 as_warn (_("unpredictable: identical base and status registers"
7837 " --`%s'"),str);
7838 }
7839 }
7840 break;
7841
7842 default:
7843 break;
7844 }
7845 }
7846
7847 static void
7848 force_automatic_sequence_close (void)
7849 {
7850 struct aarch64_segment_info_type *tc_seg_info;
7851
7852 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7853 if (tc_seg_info->insn_sequence.instr)
7854 {
7855 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7856 _("previous `%s' sequence has not been closed"),
7857 tc_seg_info->insn_sequence.instr->opcode->name);
7858 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7859 }
7860 }
7861
7862 /* A wrapper function to interface with libopcodes on encoding and
7863 record the error message if there is any.
7864
7865 Return TRUE on success; otherwise return FALSE. */
7866
7867 static bool
7868 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7869 aarch64_insn *code)
7870 {
7871 aarch64_operand_error error_info;
7872 memset (&error_info, '\0', sizeof (error_info));
7873 error_info.kind = AARCH64_OPDE_NIL;
7874 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7875 && !error_info.non_fatal)
7876 return true;
7877
7878 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7879 record_operand_error_info (opcode, &error_info);
7880 return error_info.non_fatal;
7881 }
7882
7883 #ifdef DEBUG_AARCH64
7884 static inline void
7885 dump_opcode_operands (const aarch64_opcode *opcode)
7886 {
7887 int i = 0;
7888 while (opcode->operands[i] != AARCH64_OPND_NIL)
7889 {
7890 aarch64_verbose ("\t\t opnd%d: %s", i,
7891 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7892 ? aarch64_get_operand_name (opcode->operands[i])
7893 : aarch64_get_operand_desc (opcode->operands[i]));
7894 ++i;
7895 }
7896 }
7897 #endif /* DEBUG_AARCH64 */
7898
7899 /* This is the guts of the machine-dependent assembler. STR points to a
7900 machine dependent instruction. This function is supposed to emit
7901 the frags/bytes it assembles to. */
7902
7903 void
7904 md_assemble (char *str)
7905 {
7906 templates *template;
7907 const aarch64_opcode *opcode;
7908 struct aarch64_segment_info_type *tc_seg_info;
7909 aarch64_inst *inst_base;
7910 unsigned saved_cond;
7911
7912 /* Align the previous label if needed. */
7913 if (last_label_seen != NULL)
7914 {
7915 symbol_set_frag (last_label_seen, frag_now);
7916 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7917 S_SET_SEGMENT (last_label_seen, now_seg);
7918 }
7919
7920 /* Update the current insn_sequence from the segment. */
7921 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7922 insn_sequence = &tc_seg_info->insn_sequence;
7923 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7924
7925 inst.reloc.type = BFD_RELOC_UNUSED;
7926
7927 DEBUG_TRACE ("\n\n");
7928 DEBUG_TRACE ("==============================");
7929 DEBUG_TRACE ("Enter md_assemble with %s", str);
7930
7931 /* Scan up to the end of the mnemonic, which must end in whitespace,
7932 '.', or end of string. */
7933 char *p = str;
7934 char *dot = 0;
7935 for (; is_part_of_name (*p); p++)
7936 if (*p == '.' && !dot)
7937 dot = p;
7938
7939 if (p == str)
7940 {
7941 as_bad (_("unknown mnemonic -- `%s'"), str);
7942 return;
7943 }
7944
7945 if (!dot && create_register_alias (str, p))
7946 return;
7947
7948 template = opcode_lookup (str, dot, p);
7949 if (!template)
7950 {
7951 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7952 str);
7953 return;
7954 }
7955
7956 skip_whitespace (p);
7957 if (*p == ',')
7958 {
7959 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7960 get_mnemonic_name (str), str);
7961 return;
7962 }
7963
7964 init_operand_error_report ();
7965
7966 /* Sections are assumed to start aligned. In executable section, there is no
7967 MAP_DATA symbol pending. So we only align the address during
7968 MAP_DATA --> MAP_INSN transition.
7969 For other sections, this is not guaranteed. */
7970 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7971 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7972 frag_align_code (2, 0);
7973
7974 saved_cond = inst.cond;
7975 reset_aarch64_instruction (&inst);
7976 inst.cond = saved_cond;
7977
7978 /* Iterate through all opcode entries with the same mnemonic name. */
7979 do
7980 {
7981 opcode = template->opcode;
7982
7983 DEBUG_TRACE ("opcode %s found", opcode->name);
7984 #ifdef DEBUG_AARCH64
7985 if (debug_dump)
7986 dump_opcode_operands (opcode);
7987 #endif /* DEBUG_AARCH64 */
7988
7989 mapping_state (MAP_INSN);
7990
7991 inst_base = &inst.base;
7992 inst_base->opcode = opcode;
7993
7994 /* Truly conditionally executed instructions, e.g. b.cond. */
7995 if (opcode->flags & F_COND)
7996 {
7997 gas_assert (inst.cond != COND_ALWAYS);
7998 inst_base->cond = get_cond_from_value (inst.cond);
7999 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8000 }
8001 else if (inst.cond != COND_ALWAYS)
8002 {
8003 /* It shouldn't arrive here, where the assembly looks like a
8004 conditional instruction but the found opcode is unconditional. */
8005 gas_assert (0);
8006 continue;
8007 }
8008
8009 if (parse_operands (p, opcode)
8010 && programmer_friendly_fixup (&inst)
8011 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8012 {
8013 /* Check that this instruction is supported for this CPU. */
8014 if (!opcode->avariant
8015 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8016 {
8017 as_bad (_("selected processor does not support `%s'"), str);
8018 return;
8019 }
8020
8021 warn_unpredictable_ldst (&inst, str);
8022
8023 if (inst.reloc.type == BFD_RELOC_UNUSED
8024 || !inst.reloc.need_libopcodes_p)
8025 output_inst (NULL);
8026 else
8027 {
8028 /* If there is relocation generated for the instruction,
8029 store the instruction information for the future fix-up. */
8030 struct aarch64_inst *copy;
8031 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8032 copy = XNEW (struct aarch64_inst);
8033 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8034 output_inst (copy);
8035 }
8036
8037 /* Issue non-fatal messages if any. */
8038 output_operand_error_report (str, true);
8039 return;
8040 }
8041
8042 template = template->next;
8043 if (template != NULL)
8044 {
8045 reset_aarch64_instruction (&inst);
8046 inst.cond = saved_cond;
8047 }
8048 }
8049 while (template != NULL);
8050
8051 /* Issue the error messages if any. */
8052 output_operand_error_report (str, false);
8053 }
8054
8055 /* Various frobbings of labels and their addresses. */
8056
8057 void
8058 aarch64_start_line_hook (void)
8059 {
8060 last_label_seen = NULL;
8061 }
8062
8063 void
8064 aarch64_frob_label (symbolS * sym)
8065 {
8066 last_label_seen = sym;
8067
8068 dwarf2_emit_label (sym);
8069 }
8070
8071 void
8072 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8073 {
8074 /* Check to see if we have a block to close. */
8075 force_automatic_sequence_close ();
8076 }
8077
8078 int
8079 aarch64_data_in_code (void)
8080 {
8081 if (startswith (input_line_pointer + 1, "data:"))
8082 {
8083 *input_line_pointer = '/';
8084 input_line_pointer += 5;
8085 *input_line_pointer = 0;
8086 return 1;
8087 }
8088
8089 return 0;
8090 }
8091
8092 char *
8093 aarch64_canonicalize_symbol_name (char *name)
8094 {
8095 int len;
8096
8097 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8098 *(name + len - 5) = 0;
8099
8100 return name;
8101 }
8102 \f
8103 /* Table of all register names defined by default. The user can
8104 define additional names with .req. Note that all register names
8105 should appear in both upper and lowercase variants. Some registers
8106 also have mixed-case names. */
8107
8108 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8109 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8110 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8111 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8112 #define REGSET16(p,t) \
8113 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8114 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8115 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8116 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8117 #define REGSET16S(p,s,t) \
8118 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8119 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8120 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8121 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8122 #define REGSET31(p,t) \
8123 REGSET16(p, t), \
8124 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8125 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8126 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8127 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8128 #define REGSET(p,t) \
8129 REGSET31(p,t), REGNUM(p,31,t)
8130
8131 /* These go into aarch64_reg_hsh hash-table. */
8132 static const reg_entry reg_names[] = {
8133 /* Integer registers. */
8134 REGSET31 (x, R_64), REGSET31 (X, R_64),
8135 REGSET31 (w, R_32), REGSET31 (W, R_32),
8136
8137 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8138 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8139 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8140 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8141 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8142 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8143
8144 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8145 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8146
8147 /* Floating-point single precision registers. */
8148 REGSET (s, FP_S), REGSET (S, FP_S),
8149
8150 /* Floating-point double precision registers. */
8151 REGSET (d, FP_D), REGSET (D, FP_D),
8152
8153 /* Floating-point half precision registers. */
8154 REGSET (h, FP_H), REGSET (H, FP_H),
8155
8156 /* Floating-point byte precision registers. */
8157 REGSET (b, FP_B), REGSET (B, FP_B),
8158
8159 /* Floating-point quad precision registers. */
8160 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8161
8162 /* FP/SIMD registers. */
8163 REGSET (v, VN), REGSET (V, VN),
8164
8165 /* SVE vector registers. */
8166 REGSET (z, ZN), REGSET (Z, ZN),
8167
8168 /* SVE predicate registers. */
8169 REGSET16 (p, PN), REGSET16 (P, PN),
8170
8171 /* SME ZA tile registers. */
8172 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8173
8174 /* SME ZA tile registers (horizontal slice). */
8175 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8176
8177 /* SME ZA tile registers (vertical slice). */
8178 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8179 };
8180
8181 #undef REGDEF
8182 #undef REGDEF_ALIAS
8183 #undef REGNUM
8184 #undef REGSET16
8185 #undef REGSET31
8186 #undef REGSET
8187
8188 #define N 1
8189 #define n 0
8190 #define Z 1
8191 #define z 0
8192 #define C 1
8193 #define c 0
8194 #define V 1
8195 #define v 0
8196 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8197 static const asm_nzcv nzcv_names[] = {
8198 {"nzcv", B (n, z, c, v)},
8199 {"nzcV", B (n, z, c, V)},
8200 {"nzCv", B (n, z, C, v)},
8201 {"nzCV", B (n, z, C, V)},
8202 {"nZcv", B (n, Z, c, v)},
8203 {"nZcV", B (n, Z, c, V)},
8204 {"nZCv", B (n, Z, C, v)},
8205 {"nZCV", B (n, Z, C, V)},
8206 {"Nzcv", B (N, z, c, v)},
8207 {"NzcV", B (N, z, c, V)},
8208 {"NzCv", B (N, z, C, v)},
8209 {"NzCV", B (N, z, C, V)},
8210 {"NZcv", B (N, Z, c, v)},
8211 {"NZcV", B (N, Z, c, V)},
8212 {"NZCv", B (N, Z, C, v)},
8213 {"NZCV", B (N, Z, C, V)}
8214 };
8215
8216 #undef N
8217 #undef n
8218 #undef Z
8219 #undef z
8220 #undef C
8221 #undef c
8222 #undef V
8223 #undef v
8224 #undef B
8225 \f
8226 /* MD interface: bits in the object file. */
8227
8228 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8229 for use in the a.out file, and stores them in the array pointed to by buf.
8230 This knows about the endian-ness of the target machine and does
8231 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8232 2 (short) and 4 (long) Floating numbers are put out as a series of
8233 LITTLENUMS (shorts, here at least). */
8234
8235 void
8236 md_number_to_chars (char *buf, valueT val, int n)
8237 {
8238 if (target_big_endian)
8239 number_to_chars_bigendian (buf, val, n);
8240 else
8241 number_to_chars_littleendian (buf, val, n);
8242 }
8243
8244 /* MD interface: Sections. */
8245
8246 /* Estimate the size of a frag before relaxing. Assume everything fits in
8247 4 bytes. */
8248
8249 int
8250 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8251 {
8252 fragp->fr_var = 4;
8253 return 4;
8254 }
8255
8256 /* Round up a section size to the appropriate boundary. */
8257
8258 valueT
8259 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8260 {
8261 return size;
8262 }
8263
8264 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8265 of an rs_align_code fragment.
8266
8267 Here we fill the frag with the appropriate info for padding the
8268 output stream. The resulting frag will consist of a fixed (fr_fix)
8269 and of a repeating (fr_var) part.
8270
8271 The fixed content is always emitted before the repeating content and
8272 these two parts are used as follows in constructing the output:
8273 - the fixed part will be used to align to a valid instruction word
8274 boundary, in case that we start at a misaligned address; as no
8275 executable instruction can live at the misaligned location, we
8276 simply fill with zeros;
8277 - the variable part will be used to cover the remaining padding and
8278 we fill using the AArch64 NOP instruction.
8279
8280 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8281 enough storage space for up to 3 bytes for padding the back to a valid
8282 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8283
8284 void
8285 aarch64_handle_align (fragS * fragP)
8286 {
8287 /* NOP = d503201f */
8288 /* AArch64 instructions are always little-endian. */
8289 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8290
8291 int bytes, fix, noop_size;
8292 char *p;
8293
8294 if (fragP->fr_type != rs_align_code)
8295 return;
8296
8297 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8298 p = fragP->fr_literal + fragP->fr_fix;
8299
8300 #ifdef OBJ_ELF
8301 gas_assert (fragP->tc_frag_data.recorded);
8302 #endif
8303
8304 noop_size = sizeof (aarch64_noop);
8305
8306 fix = bytes & (noop_size - 1);
8307 if (fix)
8308 {
8309 #ifdef OBJ_ELF
8310 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8311 #endif
8312 memset (p, 0, fix);
8313 p += fix;
8314 fragP->fr_fix += fix;
8315 }
8316
8317 if (noop_size)
8318 memcpy (p, aarch64_noop, noop_size);
8319 fragP->fr_var = noop_size;
8320 }
8321
8322 /* Perform target specific initialisation of a frag.
8323 Note - despite the name this initialisation is not done when the frag
8324 is created, but only when its type is assigned. A frag can be created
8325 and used a long time before its type is set, so beware of assuming that
8326 this initialisation is performed first. */
8327
8328 #ifndef OBJ_ELF
8329 void
8330 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8331 int max_chars ATTRIBUTE_UNUSED)
8332 {
8333 }
8334
8335 #else /* OBJ_ELF is defined. */
8336 void
8337 aarch64_init_frag (fragS * fragP, int max_chars)
8338 {
8339 /* Record a mapping symbol for alignment frags. We will delete this
8340 later if the alignment ends up empty. */
8341 if (!fragP->tc_frag_data.recorded)
8342 fragP->tc_frag_data.recorded = 1;
8343
8344 /* PR 21809: Do not set a mapping state for debug sections
8345 - it just confuses other tools. */
8346 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8347 return;
8348
8349 switch (fragP->fr_type)
8350 {
8351 case rs_align_test:
8352 case rs_fill:
8353 mapping_state_2 (MAP_DATA, max_chars);
8354 break;
8355 case rs_align:
8356 /* PR 20364: We can get alignment frags in code sections,
8357 so do not just assume that we should use the MAP_DATA state. */
8358 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8359 break;
8360 case rs_align_code:
8361 mapping_state_2 (MAP_INSN, max_chars);
8362 break;
8363 default:
8364 break;
8365 }
8366 }
8367 \f
8368 /* Initialize the DWARF-2 unwind information for this procedure. */
8369
8370 void
8371 tc_aarch64_frame_initial_instructions (void)
8372 {
8373 cfi_add_CFA_def_cfa (REG_SP, 0);
8374 }
8375 #endif /* OBJ_ELF */
8376
8377 /* Convert REGNAME to a DWARF-2 register number. */
8378
8379 int
8380 tc_aarch64_regname_to_dw2regnum (char *regname)
8381 {
8382 const reg_entry *reg = parse_reg (&regname);
8383 if (reg == NULL)
8384 return -1;
8385
8386 switch (reg->type)
8387 {
8388 case REG_TYPE_SP_32:
8389 case REG_TYPE_SP_64:
8390 case REG_TYPE_R_32:
8391 case REG_TYPE_R_64:
8392 return reg->number;
8393
8394 case REG_TYPE_FP_B:
8395 case REG_TYPE_FP_H:
8396 case REG_TYPE_FP_S:
8397 case REG_TYPE_FP_D:
8398 case REG_TYPE_FP_Q:
8399 return reg->number + 64;
8400
8401 default:
8402 break;
8403 }
8404 return -1;
8405 }
8406
8407 /* Implement DWARF2_ADDR_SIZE. */
8408
8409 int
8410 aarch64_dwarf2_addr_size (void)
8411 {
8412 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8413 if (ilp32_p)
8414 return 4;
8415 #endif
8416 return bfd_arch_bits_per_address (stdoutput) / 8;
8417 }
8418
8419 /* MD interface: Symbol and relocation handling. */
8420
8421 /* Return the address within the segment that a PC-relative fixup is
8422 relative to. For AArch64 PC-relative fixups applied to instructions
8423 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8424
8425 long
8426 md_pcrel_from_section (fixS * fixP, segT seg)
8427 {
8428 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8429
8430 /* If this is pc-relative and we are going to emit a relocation
8431 then we just want to put out any pipeline compensation that the linker
8432 will need. Otherwise we want to use the calculated base. */
8433 if (fixP->fx_pcrel
8434 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8435 || aarch64_force_relocation (fixP)))
8436 base = 0;
8437
8438 /* AArch64 should be consistent for all pc-relative relocations. */
8439 return base + AARCH64_PCREL_OFFSET;
8440 }
8441
8442 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8443 Otherwise we have no need to default values of symbols. */
8444
8445 symbolS *
8446 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8447 {
8448 #ifdef OBJ_ELF
8449 if (name[0] == '_' && name[1] == 'G'
8450 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8451 {
8452 if (!GOT_symbol)
8453 {
8454 if (symbol_find (name))
8455 as_bad (_("GOT already in the symbol table"));
8456
8457 GOT_symbol = symbol_new (name, undefined_section,
8458 &zero_address_frag, 0);
8459 }
8460
8461 return GOT_symbol;
8462 }
8463 #endif
8464
8465 return 0;
8466 }
8467
8468 /* Return non-zero if the indicated VALUE has overflowed the maximum
8469 range expressible by a unsigned number with the indicated number of
8470 BITS. */
8471
8472 static bool
8473 unsigned_overflow (valueT value, unsigned bits)
8474 {
8475 valueT lim;
8476 if (bits >= sizeof (valueT) * 8)
8477 return false;
8478 lim = (valueT) 1 << bits;
8479 return (value >= lim);
8480 }
8481
8482
8483 /* Return non-zero if the indicated VALUE has overflowed the maximum
8484 range expressible by an signed number with the indicated number of
8485 BITS. */
8486
8487 static bool
8488 signed_overflow (offsetT value, unsigned bits)
8489 {
8490 offsetT lim;
8491 if (bits >= sizeof (offsetT) * 8)
8492 return false;
8493 lim = (offsetT) 1 << (bits - 1);
8494 return (value < -lim || value >= lim);
8495 }
8496
8497 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8498 unsigned immediate offset load/store instruction, try to encode it as
8499 an unscaled, 9-bit, signed immediate offset load/store instruction.
8500 Return TRUE if it is successful; otherwise return FALSE.
8501
8502 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8503 in response to the standard LDR/STR mnemonics when the immediate offset is
8504 unambiguous, i.e. when it is negative or unaligned. */
8505
8506 static bool
8507 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8508 {
8509 int idx;
8510 enum aarch64_op new_op;
8511 const aarch64_opcode *new_opcode;
8512
8513 gas_assert (instr->opcode->iclass == ldst_pos);
8514
8515 switch (instr->opcode->op)
8516 {
8517 case OP_LDRB_POS:new_op = OP_LDURB; break;
8518 case OP_STRB_POS: new_op = OP_STURB; break;
8519 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8520 case OP_LDRH_POS: new_op = OP_LDURH; break;
8521 case OP_STRH_POS: new_op = OP_STURH; break;
8522 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8523 case OP_LDR_POS: new_op = OP_LDUR; break;
8524 case OP_STR_POS: new_op = OP_STUR; break;
8525 case OP_LDRF_POS: new_op = OP_LDURV; break;
8526 case OP_STRF_POS: new_op = OP_STURV; break;
8527 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8528 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8529 default: new_op = OP_NIL; break;
8530 }
8531
8532 if (new_op == OP_NIL)
8533 return false;
8534
8535 new_opcode = aarch64_get_opcode (new_op);
8536 gas_assert (new_opcode != NULL);
8537
8538 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8539 instr->opcode->op, new_opcode->op);
8540
8541 aarch64_replace_opcode (instr, new_opcode);
8542
8543 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8544 qualifier matching may fail because the out-of-date qualifier will
8545 prevent the operand being updated with a new and correct qualifier. */
8546 idx = aarch64_operand_index (instr->opcode->operands,
8547 AARCH64_OPND_ADDR_SIMM9);
8548 gas_assert (idx == 1);
8549 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8550
8551 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8552
8553 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8554 insn_sequence))
8555 return false;
8556
8557 return true;
8558 }
8559
8560 /* Called by fix_insn to fix a MOV immediate alias instruction.
8561
8562 Operand for a generic move immediate instruction, which is an alias
8563 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8564 a 32-bit/64-bit immediate value into general register. An assembler error
8565 shall result if the immediate cannot be created by a single one of these
8566 instructions. If there is a choice, then to ensure reversability an
8567 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8568
8569 static void
8570 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8571 {
8572 const aarch64_opcode *opcode;
8573
8574 /* Need to check if the destination is SP/ZR. The check has to be done
8575 before any aarch64_replace_opcode. */
8576 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8577 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8578
8579 instr->operands[1].imm.value = value;
8580 instr->operands[1].skip = 0;
8581
8582 if (try_mov_wide_p)
8583 {
8584 /* Try the MOVZ alias. */
8585 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8586 aarch64_replace_opcode (instr, opcode);
8587 if (aarch64_opcode_encode (instr->opcode, instr,
8588 &instr->value, NULL, NULL, insn_sequence))
8589 {
8590 put_aarch64_insn (buf, instr->value);
8591 return;
8592 }
8593 /* Try the MOVK alias. */
8594 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8595 aarch64_replace_opcode (instr, opcode);
8596 if (aarch64_opcode_encode (instr->opcode, instr,
8597 &instr->value, NULL, NULL, insn_sequence))
8598 {
8599 put_aarch64_insn (buf, instr->value);
8600 return;
8601 }
8602 }
8603
8604 if (try_mov_bitmask_p)
8605 {
8606 /* Try the ORR alias. */
8607 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8608 aarch64_replace_opcode (instr, opcode);
8609 if (aarch64_opcode_encode (instr->opcode, instr,
8610 &instr->value, NULL, NULL, insn_sequence))
8611 {
8612 put_aarch64_insn (buf, instr->value);
8613 return;
8614 }
8615 }
8616
8617 as_bad_where (fixP->fx_file, fixP->fx_line,
8618 _("immediate cannot be moved by a single instruction"));
8619 }
8620
8621 /* An instruction operand which is immediate related may have symbol used
8622 in the assembly, e.g.
8623
8624 mov w0, u32
8625 .set u32, 0x00ffff00
8626
8627 At the time when the assembly instruction is parsed, a referenced symbol,
8628 like 'u32' in the above example may not have been seen; a fixS is created
8629 in such a case and is handled here after symbols have been resolved.
8630 Instruction is fixed up with VALUE using the information in *FIXP plus
8631 extra information in FLAGS.
8632
8633 This function is called by md_apply_fix to fix up instructions that need
8634 a fix-up described above but does not involve any linker-time relocation. */
8635
8636 static void
8637 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8638 {
8639 int idx;
8640 uint32_t insn;
8641 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8642 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8643 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8644
8645 if (new_inst)
8646 {
8647 /* Now the instruction is about to be fixed-up, so the operand that
8648 was previously marked as 'ignored' needs to be unmarked in order
8649 to get the encoding done properly. */
8650 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8651 new_inst->operands[idx].skip = 0;
8652 }
8653
8654 gas_assert (opnd != AARCH64_OPND_NIL);
8655
8656 switch (opnd)
8657 {
8658 case AARCH64_OPND_EXCEPTION:
8659 case AARCH64_OPND_UNDEFINED:
8660 if (unsigned_overflow (value, 16))
8661 as_bad_where (fixP->fx_file, fixP->fx_line,
8662 _("immediate out of range"));
8663 insn = get_aarch64_insn (buf);
8664 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8665 put_aarch64_insn (buf, insn);
8666 break;
8667
8668 case AARCH64_OPND_AIMM:
8669 /* ADD or SUB with immediate.
8670 NOTE this assumes we come here with a add/sub shifted reg encoding
8671 3 322|2222|2 2 2 21111 111111
8672 1 098|7654|3 2 1 09876 543210 98765 43210
8673 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8674 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8675 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8676 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8677 ->
8678 3 322|2222|2 2 221111111111
8679 1 098|7654|3 2 109876543210 98765 43210
8680 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8681 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8682 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8683 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8684 Fields sf Rn Rd are already set. */
8685 insn = get_aarch64_insn (buf);
8686 if (value < 0)
8687 {
8688 /* Add <-> sub. */
8689 insn = reencode_addsub_switch_add_sub (insn);
8690 value = -value;
8691 }
8692
8693 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8694 && unsigned_overflow (value, 12))
8695 {
8696 /* Try to shift the value by 12 to make it fit. */
8697 if (((value >> 12) << 12) == value
8698 && ! unsigned_overflow (value, 12 + 12))
8699 {
8700 value >>= 12;
8701 insn |= encode_addsub_imm_shift_amount (1);
8702 }
8703 }
8704
8705 if (unsigned_overflow (value, 12))
8706 as_bad_where (fixP->fx_file, fixP->fx_line,
8707 _("immediate out of range"));
8708
8709 insn |= encode_addsub_imm (value);
8710
8711 put_aarch64_insn (buf, insn);
8712 break;
8713
8714 case AARCH64_OPND_SIMD_IMM:
8715 case AARCH64_OPND_SIMD_IMM_SFT:
8716 case AARCH64_OPND_LIMM:
8717 /* Bit mask immediate. */
8718 gas_assert (new_inst != NULL);
8719 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8720 new_inst->operands[idx].imm.value = value;
8721 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8722 &new_inst->value, NULL, NULL, insn_sequence))
8723 put_aarch64_insn (buf, new_inst->value);
8724 else
8725 as_bad_where (fixP->fx_file, fixP->fx_line,
8726 _("invalid immediate"));
8727 break;
8728
8729 case AARCH64_OPND_HALF:
8730 /* 16-bit unsigned immediate. */
8731 if (unsigned_overflow (value, 16))
8732 as_bad_where (fixP->fx_file, fixP->fx_line,
8733 _("immediate out of range"));
8734 insn = get_aarch64_insn (buf);
8735 insn |= encode_movw_imm (value & 0xffff);
8736 put_aarch64_insn (buf, insn);
8737 break;
8738
8739 case AARCH64_OPND_IMM_MOV:
8740 /* Operand for a generic move immediate instruction, which is
8741 an alias instruction that generates a single MOVZ, MOVN or ORR
8742 instruction to loads a 32-bit/64-bit immediate value into general
8743 register. An assembler error shall result if the immediate cannot be
8744 created by a single one of these instructions. If there is a choice,
8745 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8746 and MOVZ or MOVN to ORR. */
8747 gas_assert (new_inst != NULL);
8748 fix_mov_imm_insn (fixP, buf, new_inst, value);
8749 break;
8750
8751 case AARCH64_OPND_ADDR_SIMM7:
8752 case AARCH64_OPND_ADDR_SIMM9:
8753 case AARCH64_OPND_ADDR_SIMM9_2:
8754 case AARCH64_OPND_ADDR_SIMM10:
8755 case AARCH64_OPND_ADDR_UIMM12:
8756 case AARCH64_OPND_ADDR_SIMM11:
8757 case AARCH64_OPND_ADDR_SIMM13:
8758 /* Immediate offset in an address. */
8759 insn = get_aarch64_insn (buf);
8760
8761 gas_assert (new_inst != NULL && new_inst->value == insn);
8762 gas_assert (new_inst->opcode->operands[1] == opnd
8763 || new_inst->opcode->operands[2] == opnd);
8764
8765 /* Get the index of the address operand. */
8766 if (new_inst->opcode->operands[1] == opnd)
8767 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8768 idx = 1;
8769 else
8770 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8771 idx = 2;
8772
8773 /* Update the resolved offset value. */
8774 new_inst->operands[idx].addr.offset.imm = value;
8775
8776 /* Encode/fix-up. */
8777 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8778 &new_inst->value, NULL, NULL, insn_sequence))
8779 {
8780 put_aarch64_insn (buf, new_inst->value);
8781 break;
8782 }
8783 else if (new_inst->opcode->iclass == ldst_pos
8784 && try_to_encode_as_unscaled_ldst (new_inst))
8785 {
8786 put_aarch64_insn (buf, new_inst->value);
8787 break;
8788 }
8789
8790 as_bad_where (fixP->fx_file, fixP->fx_line,
8791 _("immediate offset out of range"));
8792 break;
8793
8794 default:
8795 gas_assert (0);
8796 as_fatal (_("unhandled operand code %d"), opnd);
8797 }
8798 }
8799
8800 /* Apply a fixup (fixP) to segment data, once it has been determined
8801 by our caller that we have all the info we need to fix it up.
8802
8803 Parameter valP is the pointer to the value of the bits. */
8804
8805 void
8806 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8807 {
8808 offsetT value = *valP;
8809 uint32_t insn;
8810 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8811 int scale;
8812 unsigned flags = fixP->fx_addnumber;
8813
8814 DEBUG_TRACE ("\n\n");
8815 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8816 DEBUG_TRACE ("Enter md_apply_fix");
8817
8818 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8819
8820 /* Note whether this will delete the relocation. */
8821
8822 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8823 fixP->fx_done = 1;
8824
8825 /* Process the relocations. */
8826 switch (fixP->fx_r_type)
8827 {
8828 case BFD_RELOC_NONE:
8829 /* This will need to go in the object file. */
8830 fixP->fx_done = 0;
8831 break;
8832
8833 case BFD_RELOC_8:
8834 case BFD_RELOC_8_PCREL:
8835 if (fixP->fx_done || !seg->use_rela_p)
8836 md_number_to_chars (buf, value, 1);
8837 break;
8838
8839 case BFD_RELOC_16:
8840 case BFD_RELOC_16_PCREL:
8841 if (fixP->fx_done || !seg->use_rela_p)
8842 md_number_to_chars (buf, value, 2);
8843 break;
8844
8845 case BFD_RELOC_32:
8846 case BFD_RELOC_32_PCREL:
8847 if (fixP->fx_done || !seg->use_rela_p)
8848 md_number_to_chars (buf, value, 4);
8849 break;
8850
8851 case BFD_RELOC_64:
8852 case BFD_RELOC_64_PCREL:
8853 if (fixP->fx_done || !seg->use_rela_p)
8854 md_number_to_chars (buf, value, 8);
8855 break;
8856
8857 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8858 /* We claim that these fixups have been processed here, even if
8859 in fact we generate an error because we do not have a reloc
8860 for them, so tc_gen_reloc() will reject them. */
8861 fixP->fx_done = 1;
8862 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8863 {
8864 as_bad_where (fixP->fx_file, fixP->fx_line,
8865 _("undefined symbol %s used as an immediate value"),
8866 S_GET_NAME (fixP->fx_addsy));
8867 goto apply_fix_return;
8868 }
8869 fix_insn (fixP, flags, value);
8870 break;
8871
8872 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8873 if (fixP->fx_done || !seg->use_rela_p)
8874 {
8875 if (value & 3)
8876 as_bad_where (fixP->fx_file, fixP->fx_line,
8877 _("pc-relative load offset not word aligned"));
8878 if (signed_overflow (value, 21))
8879 as_bad_where (fixP->fx_file, fixP->fx_line,
8880 _("pc-relative load offset out of range"));
8881 insn = get_aarch64_insn (buf);
8882 insn |= encode_ld_lit_ofs_19 (value >> 2);
8883 put_aarch64_insn (buf, insn);
8884 }
8885 break;
8886
8887 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8888 if (fixP->fx_done || !seg->use_rela_p)
8889 {
8890 if (signed_overflow (value, 21))
8891 as_bad_where (fixP->fx_file, fixP->fx_line,
8892 _("pc-relative address offset out of range"));
8893 insn = get_aarch64_insn (buf);
8894 insn |= encode_adr_imm (value);
8895 put_aarch64_insn (buf, insn);
8896 }
8897 break;
8898
8899 case BFD_RELOC_AARCH64_BRANCH19:
8900 if (fixP->fx_done || !seg->use_rela_p)
8901 {
8902 if (value & 3)
8903 as_bad_where (fixP->fx_file, fixP->fx_line,
8904 _("conditional branch target not word aligned"));
8905 if (signed_overflow (value, 21))
8906 as_bad_where (fixP->fx_file, fixP->fx_line,
8907 _("conditional branch out of range"));
8908 insn = get_aarch64_insn (buf);
8909 insn |= encode_cond_branch_ofs_19 (value >> 2);
8910 put_aarch64_insn (buf, insn);
8911 }
8912 break;
8913
8914 case BFD_RELOC_AARCH64_TSTBR14:
8915 if (fixP->fx_done || !seg->use_rela_p)
8916 {
8917 if (value & 3)
8918 as_bad_where (fixP->fx_file, fixP->fx_line,
8919 _("conditional branch target not word aligned"));
8920 if (signed_overflow (value, 16))
8921 as_bad_where (fixP->fx_file, fixP->fx_line,
8922 _("conditional branch out of range"));
8923 insn = get_aarch64_insn (buf);
8924 insn |= encode_tst_branch_ofs_14 (value >> 2);
8925 put_aarch64_insn (buf, insn);
8926 }
8927 break;
8928
8929 case BFD_RELOC_AARCH64_CALL26:
8930 case BFD_RELOC_AARCH64_JUMP26:
8931 if (fixP->fx_done || !seg->use_rela_p)
8932 {
8933 if (value & 3)
8934 as_bad_where (fixP->fx_file, fixP->fx_line,
8935 _("branch target not word aligned"));
8936 if (signed_overflow (value, 28))
8937 as_bad_where (fixP->fx_file, fixP->fx_line,
8938 _("branch out of range"));
8939 insn = get_aarch64_insn (buf);
8940 insn |= encode_branch_ofs_26 (value >> 2);
8941 put_aarch64_insn (buf, insn);
8942 }
8943 break;
8944
8945 case BFD_RELOC_AARCH64_MOVW_G0:
8946 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8947 case BFD_RELOC_AARCH64_MOVW_G0_S:
8948 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8949 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8950 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8951 scale = 0;
8952 goto movw_common;
8953 case BFD_RELOC_AARCH64_MOVW_G1:
8954 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8955 case BFD_RELOC_AARCH64_MOVW_G1_S:
8956 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8957 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8958 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8959 scale = 16;
8960 goto movw_common;
8961 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8962 scale = 0;
8963 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8964 /* Should always be exported to object file, see
8965 aarch64_force_relocation(). */
8966 gas_assert (!fixP->fx_done);
8967 gas_assert (seg->use_rela_p);
8968 goto movw_common;
8969 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8970 scale = 16;
8971 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8972 /* Should always be exported to object file, see
8973 aarch64_force_relocation(). */
8974 gas_assert (!fixP->fx_done);
8975 gas_assert (seg->use_rela_p);
8976 goto movw_common;
8977 case BFD_RELOC_AARCH64_MOVW_G2:
8978 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8979 case BFD_RELOC_AARCH64_MOVW_G2_S:
8980 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8981 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8982 scale = 32;
8983 goto movw_common;
8984 case BFD_RELOC_AARCH64_MOVW_G3:
8985 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8986 scale = 48;
8987 movw_common:
8988 if (fixP->fx_done || !seg->use_rela_p)
8989 {
8990 insn = get_aarch64_insn (buf);
8991
8992 if (!fixP->fx_done)
8993 {
8994 /* REL signed addend must fit in 16 bits */
8995 if (signed_overflow (value, 16))
8996 as_bad_where (fixP->fx_file, fixP->fx_line,
8997 _("offset out of range"));
8998 }
8999 else
9000 {
9001 /* Check for overflow and scale. */
9002 switch (fixP->fx_r_type)
9003 {
9004 case BFD_RELOC_AARCH64_MOVW_G0:
9005 case BFD_RELOC_AARCH64_MOVW_G1:
9006 case BFD_RELOC_AARCH64_MOVW_G2:
9007 case BFD_RELOC_AARCH64_MOVW_G3:
9008 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9009 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9010 if (unsigned_overflow (value, scale + 16))
9011 as_bad_where (fixP->fx_file, fixP->fx_line,
9012 _("unsigned value out of range"));
9013 break;
9014 case BFD_RELOC_AARCH64_MOVW_G0_S:
9015 case BFD_RELOC_AARCH64_MOVW_G1_S:
9016 case BFD_RELOC_AARCH64_MOVW_G2_S:
9017 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9018 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9019 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9020 /* NOTE: We can only come here with movz or movn. */
9021 if (signed_overflow (value, scale + 16))
9022 as_bad_where (fixP->fx_file, fixP->fx_line,
9023 _("signed value out of range"));
9024 if (value < 0)
9025 {
9026 /* Force use of MOVN. */
9027 value = ~value;
9028 insn = reencode_movzn_to_movn (insn);
9029 }
9030 else
9031 {
9032 /* Force use of MOVZ. */
9033 insn = reencode_movzn_to_movz (insn);
9034 }
9035 break;
9036 default:
9037 /* Unchecked relocations. */
9038 break;
9039 }
9040 value >>= scale;
9041 }
9042
9043 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9044 insn |= encode_movw_imm (value & 0xffff);
9045
9046 put_aarch64_insn (buf, insn);
9047 }
9048 break;
9049
9050 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9051 fixP->fx_r_type = (ilp32_p
9052 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9053 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9054 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9055 /* Should always be exported to object file, see
9056 aarch64_force_relocation(). */
9057 gas_assert (!fixP->fx_done);
9058 gas_assert (seg->use_rela_p);
9059 break;
9060
9061 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9062 fixP->fx_r_type = (ilp32_p
9063 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9064 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9065 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9066 /* Should always be exported to object file, see
9067 aarch64_force_relocation(). */
9068 gas_assert (!fixP->fx_done);
9069 gas_assert (seg->use_rela_p);
9070 break;
9071
9072 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9073 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9074 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9075 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9076 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9077 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9078 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9079 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9080 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9081 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9082 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9083 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9084 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9085 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9086 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9087 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9088 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9089 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9090 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9091 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9092 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9093 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9094 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9095 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9096 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9097 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9098 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9099 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9100 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9101 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9102 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9103 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9104 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9105 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9106 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9107 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9108 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9109 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9110 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9111 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9112 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9113 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9114 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9115 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9116 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9117 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9118 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9119 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9120 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9121 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9122 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9123 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9124 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9125 /* Should always be exported to object file, see
9126 aarch64_force_relocation(). */
9127 gas_assert (!fixP->fx_done);
9128 gas_assert (seg->use_rela_p);
9129 break;
9130
9131 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9132 /* Should always be exported to object file, see
9133 aarch64_force_relocation(). */
9134 fixP->fx_r_type = (ilp32_p
9135 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9136 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9137 gas_assert (!fixP->fx_done);
9138 gas_assert (seg->use_rela_p);
9139 break;
9140
9141 case BFD_RELOC_AARCH64_ADD_LO12:
9142 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9143 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9144 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9145 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9146 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9147 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9148 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9149 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9150 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9151 case BFD_RELOC_AARCH64_LDST128_LO12:
9152 case BFD_RELOC_AARCH64_LDST16_LO12:
9153 case BFD_RELOC_AARCH64_LDST32_LO12:
9154 case BFD_RELOC_AARCH64_LDST64_LO12:
9155 case BFD_RELOC_AARCH64_LDST8_LO12:
9156 /* Should always be exported to object file, see
9157 aarch64_force_relocation(). */
9158 gas_assert (!fixP->fx_done);
9159 gas_assert (seg->use_rela_p);
9160 break;
9161
9162 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9163 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9164 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9165 break;
9166
9167 case BFD_RELOC_UNUSED:
9168 /* An error will already have been reported. */
9169 break;
9170
9171 default:
9172 as_bad_where (fixP->fx_file, fixP->fx_line,
9173 _("unexpected %s fixup"),
9174 bfd_get_reloc_code_name (fixP->fx_r_type));
9175 break;
9176 }
9177
9178 apply_fix_return:
9179 /* Free the allocated the struct aarch64_inst.
9180 N.B. currently there are very limited number of fix-up types actually use
9181 this field, so the impact on the performance should be minimal . */
9182 free (fixP->tc_fix_data.inst);
9183
9184 return;
9185 }
9186
9187 /* Translate internal representation of relocation info to BFD target
9188 format. */
9189
9190 arelent *
9191 tc_gen_reloc (asection * section, fixS * fixp)
9192 {
9193 arelent *reloc;
9194 bfd_reloc_code_real_type code;
9195
9196 reloc = XNEW (arelent);
9197
9198 reloc->sym_ptr_ptr = XNEW (asymbol *);
9199 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9200 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9201
9202 if (fixp->fx_pcrel)
9203 {
9204 if (section->use_rela_p)
9205 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9206 else
9207 fixp->fx_offset = reloc->address;
9208 }
9209 reloc->addend = fixp->fx_offset;
9210
9211 code = fixp->fx_r_type;
9212 switch (code)
9213 {
9214 case BFD_RELOC_16:
9215 if (fixp->fx_pcrel)
9216 code = BFD_RELOC_16_PCREL;
9217 break;
9218
9219 case BFD_RELOC_32:
9220 if (fixp->fx_pcrel)
9221 code = BFD_RELOC_32_PCREL;
9222 break;
9223
9224 case BFD_RELOC_64:
9225 if (fixp->fx_pcrel)
9226 code = BFD_RELOC_64_PCREL;
9227 break;
9228
9229 default:
9230 break;
9231 }
9232
9233 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9234 if (reloc->howto == NULL)
9235 {
9236 as_bad_where (fixp->fx_file, fixp->fx_line,
9237 _
9238 ("cannot represent %s relocation in this object file format"),
9239 bfd_get_reloc_code_name (code));
9240 return NULL;
9241 }
9242
9243 return reloc;
9244 }
9245
9246 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9247
9248 void
9249 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9250 {
9251 bfd_reloc_code_real_type type;
9252 int pcrel = 0;
9253
9254 /* Pick a reloc.
9255 FIXME: @@ Should look at CPU word size. */
9256 switch (size)
9257 {
9258 case 1:
9259 type = BFD_RELOC_8;
9260 break;
9261 case 2:
9262 type = BFD_RELOC_16;
9263 break;
9264 case 4:
9265 type = BFD_RELOC_32;
9266 break;
9267 case 8:
9268 type = BFD_RELOC_64;
9269 break;
9270 default:
9271 as_bad (_("cannot do %u-byte relocation"), size);
9272 type = BFD_RELOC_UNUSED;
9273 break;
9274 }
9275
9276 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9277 }
9278
9279 #ifdef OBJ_ELF
9280
9281 /* Implement md_after_parse_args. This is the earliest time we need to decide
9282 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9283
9284 void
9285 aarch64_after_parse_args (void)
9286 {
9287 if (aarch64_abi != AARCH64_ABI_NONE)
9288 return;
9289
9290 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9291 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9292 aarch64_abi = AARCH64_ABI_ILP32;
9293 else
9294 aarch64_abi = AARCH64_ABI_LP64;
9295 }
9296
9297 const char *
9298 elf64_aarch64_target_format (void)
9299 {
9300 #ifdef TE_CLOUDABI
9301 /* FIXME: What to do for ilp32_p ? */
9302 if (target_big_endian)
9303 return "elf64-bigaarch64-cloudabi";
9304 else
9305 return "elf64-littleaarch64-cloudabi";
9306 #else
9307 if (target_big_endian)
9308 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9309 else
9310 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9311 #endif
9312 }
9313
9314 void
9315 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9316 {
9317 elf_frob_symbol (symp, puntp);
9318 }
9319 #endif
9320
9321 /* MD interface: Finalization. */
9322
9323 /* A good place to do this, although this was probably not intended
9324 for this kind of use. We need to dump the literal pool before
9325 references are made to a null symbol pointer. */
9326
9327 void
9328 aarch64_cleanup (void)
9329 {
9330 literal_pool *pool;
9331
9332 for (pool = list_of_pools; pool; pool = pool->next)
9333 {
9334 /* Put it at the end of the relevant section. */
9335 subseg_set (pool->section, pool->sub_section);
9336 s_ltorg (0);
9337 }
9338 }
9339
9340 #ifdef OBJ_ELF
9341 /* Remove any excess mapping symbols generated for alignment frags in
9342 SEC. We may have created a mapping symbol before a zero byte
9343 alignment; remove it if there's a mapping symbol after the
9344 alignment. */
9345 static void
9346 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9347 void *dummy ATTRIBUTE_UNUSED)
9348 {
9349 segment_info_type *seginfo = seg_info (sec);
9350 fragS *fragp;
9351
9352 if (seginfo == NULL || seginfo->frchainP == NULL)
9353 return;
9354
9355 for (fragp = seginfo->frchainP->frch_root;
9356 fragp != NULL; fragp = fragp->fr_next)
9357 {
9358 symbolS *sym = fragp->tc_frag_data.last_map;
9359 fragS *next = fragp->fr_next;
9360
9361 /* Variable-sized frags have been converted to fixed size by
9362 this point. But if this was variable-sized to start with,
9363 there will be a fixed-size frag after it. So don't handle
9364 next == NULL. */
9365 if (sym == NULL || next == NULL)
9366 continue;
9367
9368 if (S_GET_VALUE (sym) < next->fr_address)
9369 /* Not at the end of this frag. */
9370 continue;
9371 know (S_GET_VALUE (sym) == next->fr_address);
9372
9373 do
9374 {
9375 if (next->tc_frag_data.first_map != NULL)
9376 {
9377 /* Next frag starts with a mapping symbol. Discard this
9378 one. */
9379 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9380 break;
9381 }
9382
9383 if (next->fr_next == NULL)
9384 {
9385 /* This mapping symbol is at the end of the section. Discard
9386 it. */
9387 know (next->fr_fix == 0 && next->fr_var == 0);
9388 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9389 break;
9390 }
9391
9392 /* As long as we have empty frags without any mapping symbols,
9393 keep looking. */
9394 /* If the next frag is non-empty and does not start with a
9395 mapping symbol, then this mapping symbol is required. */
9396 if (next->fr_address != next->fr_next->fr_address)
9397 break;
9398
9399 next = next->fr_next;
9400 }
9401 while (next != NULL);
9402 }
9403 }
9404 #endif
9405
9406 /* Adjust the symbol table. */
9407
9408 void
9409 aarch64_adjust_symtab (void)
9410 {
9411 #ifdef OBJ_ELF
9412 /* Remove any overlapping mapping symbols generated by alignment frags. */
9413 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9414 /* Now do generic ELF adjustments. */
9415 elf_adjust_symtab ();
9416 #endif
9417 }
9418
9419 static void
9420 checked_hash_insert (htab_t table, const char *key, void *value)
9421 {
9422 str_hash_insert (table, key, value, 0);
9423 }
9424
9425 static void
9426 sysreg_hash_insert (htab_t table, const char *key, void *value)
9427 {
9428 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9429 checked_hash_insert (table, key, value);
9430 }
9431
9432 static void
9433 fill_instruction_hash_table (void)
9434 {
9435 const aarch64_opcode *opcode = aarch64_opcode_table;
9436
9437 while (opcode->name != NULL)
9438 {
9439 templates *templ, *new_templ;
9440 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9441
9442 new_templ = XNEW (templates);
9443 new_templ->opcode = opcode;
9444 new_templ->next = NULL;
9445
9446 if (!templ)
9447 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9448 else
9449 {
9450 new_templ->next = templ->next;
9451 templ->next = new_templ;
9452 }
9453 ++opcode;
9454 }
9455 }
9456
9457 static inline void
9458 convert_to_upper (char *dst, const char *src, size_t num)
9459 {
9460 unsigned int i;
9461 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9462 *dst = TOUPPER (*src);
9463 *dst = '\0';
9464 }
9465
9466 /* Assume STR point to a lower-case string, allocate, convert and return
9467 the corresponding upper-case string. */
9468 static inline const char*
9469 get_upper_str (const char *str)
9470 {
9471 char *ret;
9472 size_t len = strlen (str);
9473 ret = XNEWVEC (char, len + 1);
9474 convert_to_upper (ret, str, len);
9475 return ret;
9476 }
9477
9478 /* MD interface: Initialization. */
9479
9480 void
9481 md_begin (void)
9482 {
9483 unsigned mach;
9484 unsigned int i;
9485
9486 aarch64_ops_hsh = str_htab_create ();
9487 aarch64_cond_hsh = str_htab_create ();
9488 aarch64_shift_hsh = str_htab_create ();
9489 aarch64_sys_regs_hsh = str_htab_create ();
9490 aarch64_pstatefield_hsh = str_htab_create ();
9491 aarch64_sys_regs_ic_hsh = str_htab_create ();
9492 aarch64_sys_regs_dc_hsh = str_htab_create ();
9493 aarch64_sys_regs_at_hsh = str_htab_create ();
9494 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9495 aarch64_sys_regs_sr_hsh = str_htab_create ();
9496 aarch64_reg_hsh = str_htab_create ();
9497 aarch64_barrier_opt_hsh = str_htab_create ();
9498 aarch64_nzcv_hsh = str_htab_create ();
9499 aarch64_pldop_hsh = str_htab_create ();
9500 aarch64_hint_opt_hsh = str_htab_create ();
9501
9502 fill_instruction_hash_table ();
9503
9504 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9505 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9506 (void *) (aarch64_sys_regs + i));
9507
9508 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9509 sysreg_hash_insert (aarch64_pstatefield_hsh,
9510 aarch64_pstatefields[i].name,
9511 (void *) (aarch64_pstatefields + i));
9512
9513 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9514 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9515 aarch64_sys_regs_ic[i].name,
9516 (void *) (aarch64_sys_regs_ic + i));
9517
9518 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9519 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9520 aarch64_sys_regs_dc[i].name,
9521 (void *) (aarch64_sys_regs_dc + i));
9522
9523 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9524 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9525 aarch64_sys_regs_at[i].name,
9526 (void *) (aarch64_sys_regs_at + i));
9527
9528 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9529 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9530 aarch64_sys_regs_tlbi[i].name,
9531 (void *) (aarch64_sys_regs_tlbi + i));
9532
9533 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9534 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9535 aarch64_sys_regs_sr[i].name,
9536 (void *) (aarch64_sys_regs_sr + i));
9537
9538 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9539 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9540 (void *) (reg_names + i));
9541
9542 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9543 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9544 (void *) (nzcv_names + i));
9545
9546 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9547 {
9548 const char *name = aarch64_operand_modifiers[i].name;
9549 checked_hash_insert (aarch64_shift_hsh, name,
9550 (void *) (aarch64_operand_modifiers + i));
9551 /* Also hash the name in the upper case. */
9552 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9553 (void *) (aarch64_operand_modifiers + i));
9554 }
9555
9556 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9557 {
9558 unsigned int j;
9559 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9560 the same condition code. */
9561 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9562 {
9563 const char *name = aarch64_conds[i].names[j];
9564 if (name == NULL)
9565 break;
9566 checked_hash_insert (aarch64_cond_hsh, name,
9567 (void *) (aarch64_conds + i));
9568 /* Also hash the name in the upper case. */
9569 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9570 (void *) (aarch64_conds + i));
9571 }
9572 }
9573
9574 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9575 {
9576 const char *name = aarch64_barrier_options[i].name;
9577 /* Skip xx00 - the unallocated values of option. */
9578 if ((i & 0x3) == 0)
9579 continue;
9580 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9581 (void *) (aarch64_barrier_options + i));
9582 /* Also hash the name in the upper case. */
9583 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9584 (void *) (aarch64_barrier_options + i));
9585 }
9586
9587 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9588 {
9589 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9590 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9591 (void *) (aarch64_barrier_dsb_nxs_options + i));
9592 /* Also hash the name in the upper case. */
9593 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9594 (void *) (aarch64_barrier_dsb_nxs_options + i));
9595 }
9596
9597 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9598 {
9599 const char* name = aarch64_prfops[i].name;
9600 /* Skip the unallocated hint encodings. */
9601 if (name == NULL)
9602 continue;
9603 checked_hash_insert (aarch64_pldop_hsh, name,
9604 (void *) (aarch64_prfops + i));
9605 /* Also hash the name in the upper case. */
9606 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9607 (void *) (aarch64_prfops + i));
9608 }
9609
9610 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9611 {
9612 const char* name = aarch64_hint_options[i].name;
9613 const char* upper_name = get_upper_str(name);
9614
9615 checked_hash_insert (aarch64_hint_opt_hsh, name,
9616 (void *) (aarch64_hint_options + i));
9617
9618 /* Also hash the name in the upper case if not the same. */
9619 if (strcmp (name, upper_name) != 0)
9620 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9621 (void *) (aarch64_hint_options + i));
9622 }
9623
9624 /* Set the cpu variant based on the command-line options. */
9625 if (!mcpu_cpu_opt)
9626 mcpu_cpu_opt = march_cpu_opt;
9627
9628 if (!mcpu_cpu_opt)
9629 mcpu_cpu_opt = &cpu_default;
9630
9631 cpu_variant = *mcpu_cpu_opt;
9632
9633 /* Record the CPU type. */
9634 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9635
9636 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9637 }
9638
9639 /* Command line processing. */
9640
9641 const char *md_shortopts = "m:";
9642
9643 #ifdef AARCH64_BI_ENDIAN
9644 #define OPTION_EB (OPTION_MD_BASE + 0)
9645 #define OPTION_EL (OPTION_MD_BASE + 1)
9646 #else
9647 #if TARGET_BYTES_BIG_ENDIAN
9648 #define OPTION_EB (OPTION_MD_BASE + 0)
9649 #else
9650 #define OPTION_EL (OPTION_MD_BASE + 1)
9651 #endif
9652 #endif
9653
9654 struct option md_longopts[] = {
9655 #ifdef OPTION_EB
9656 {"EB", no_argument, NULL, OPTION_EB},
9657 #endif
9658 #ifdef OPTION_EL
9659 {"EL", no_argument, NULL, OPTION_EL},
9660 #endif
9661 {NULL, no_argument, NULL, 0}
9662 };
9663
9664 size_t md_longopts_size = sizeof (md_longopts);
9665
9666 struct aarch64_option_table
9667 {
9668 const char *option; /* Option name to match. */
9669 const char *help; /* Help information. */
9670 int *var; /* Variable to change. */
9671 int value; /* What to change it to. */
9672 char *deprecated; /* If non-null, print this message. */
9673 };
9674
9675 static struct aarch64_option_table aarch64_opts[] = {
9676 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9677 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9678 NULL},
9679 #ifdef DEBUG_AARCH64
9680 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9681 #endif /* DEBUG_AARCH64 */
9682 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9683 NULL},
9684 {"mno-verbose-error", N_("do not output verbose error messages"),
9685 &verbose_error_p, 0, NULL},
9686 {NULL, NULL, NULL, 0, NULL}
9687 };
9688
9689 struct aarch64_cpu_option_table
9690 {
9691 const char *name;
9692 const aarch64_feature_set value;
9693 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9694 case. */
9695 const char *canonical_name;
9696 };
9697
9698 /* This list should, at a minimum, contain all the cpu names
9699 recognized by GCC. */
9700 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9701 {"all", AARCH64_ANY, NULL},
9702 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9703 AARCH64_FEATURE_CRC), "Cortex-A34"},
9704 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9705 AARCH64_FEATURE_CRC), "Cortex-A35"},
9706 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9707 AARCH64_FEATURE_CRC), "Cortex-A53"},
9708 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9709 AARCH64_FEATURE_CRC), "Cortex-A57"},
9710 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9711 AARCH64_FEATURE_CRC), "Cortex-A72"},
9712 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9713 AARCH64_FEATURE_CRC), "Cortex-A73"},
9714 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9715 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9716 "Cortex-A55"},
9717 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9718 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9719 "Cortex-A75"},
9720 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9721 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9722 "Cortex-A76"},
9723 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9724 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9725 | AARCH64_FEATURE_DOTPROD
9726 | AARCH64_FEATURE_SSBS),
9727 "Cortex-A76AE"},
9728 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9729 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9730 | AARCH64_FEATURE_DOTPROD
9731 | AARCH64_FEATURE_SSBS),
9732 "Cortex-A77"},
9733 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9734 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9735 | AARCH64_FEATURE_DOTPROD
9736 | AARCH64_FEATURE_SSBS),
9737 "Cortex-A65"},
9738 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9739 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9740 | AARCH64_FEATURE_DOTPROD
9741 | AARCH64_FEATURE_SSBS),
9742 "Cortex-A65AE"},
9743 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9744 AARCH64_FEATURE_F16
9745 | AARCH64_FEATURE_RCPC
9746 | AARCH64_FEATURE_DOTPROD
9747 | AARCH64_FEATURE_SSBS
9748 | AARCH64_FEATURE_PROFILE),
9749 "Cortex-A78"},
9750 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9751 AARCH64_FEATURE_F16
9752 | AARCH64_FEATURE_RCPC
9753 | AARCH64_FEATURE_DOTPROD
9754 | AARCH64_FEATURE_SSBS
9755 | AARCH64_FEATURE_PROFILE),
9756 "Cortex-A78AE"},
9757 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9758 AARCH64_FEATURE_DOTPROD
9759 | AARCH64_FEATURE_F16
9760 | AARCH64_FEATURE_FLAGM
9761 | AARCH64_FEATURE_PAC
9762 | AARCH64_FEATURE_PROFILE
9763 | AARCH64_FEATURE_RCPC
9764 | AARCH64_FEATURE_SSBS),
9765 "Cortex-A78C"},
9766 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9767 AARCH64_FEATURE_BFLOAT16
9768 | AARCH64_FEATURE_I8MM
9769 | AARCH64_FEATURE_MEMTAG
9770 | AARCH64_FEATURE_SVE2_BITPERM),
9771 "Cortex-A510"},
9772 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9773 AARCH64_FEATURE_BFLOAT16
9774 | AARCH64_FEATURE_I8MM
9775 | AARCH64_FEATURE_MEMTAG
9776 | AARCH64_FEATURE_SVE2_BITPERM),
9777 "Cortex-A710"},
9778 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9779 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9780 | AARCH64_FEATURE_DOTPROD
9781 | AARCH64_FEATURE_PROFILE),
9782 "Ares"},
9783 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9784 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9785 "Samsung Exynos M1"},
9786 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9787 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9788 | AARCH64_FEATURE_RDMA),
9789 "Qualcomm Falkor"},
9790 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9791 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9792 | AARCH64_FEATURE_DOTPROD
9793 | AARCH64_FEATURE_SSBS),
9794 "Neoverse E1"},
9795 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9796 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9797 | AARCH64_FEATURE_DOTPROD
9798 | AARCH64_FEATURE_PROFILE),
9799 "Neoverse N1"},
9800 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9801 AARCH64_FEATURE_BFLOAT16
9802 | AARCH64_FEATURE_I8MM
9803 | AARCH64_FEATURE_F16
9804 | AARCH64_FEATURE_SVE
9805 | AARCH64_FEATURE_SVE2
9806 | AARCH64_FEATURE_SVE2_BITPERM
9807 | AARCH64_FEATURE_MEMTAG
9808 | AARCH64_FEATURE_RNG),
9809 "Neoverse N2"},
9810 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9811 AARCH64_FEATURE_PROFILE
9812 | AARCH64_FEATURE_CVADP
9813 | AARCH64_FEATURE_SVE
9814 | AARCH64_FEATURE_SSBS
9815 | AARCH64_FEATURE_RNG
9816 | AARCH64_FEATURE_F16
9817 | AARCH64_FEATURE_BFLOAT16
9818 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9819 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9820 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9821 | AARCH64_FEATURE_RDMA),
9822 "Qualcomm QDF24XX"},
9823 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9824 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9825 "Qualcomm Saphira"},
9826 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9827 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9828 "Cavium ThunderX"},
9829 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9830 AARCH64_FEATURE_CRYPTO),
9831 "Broadcom Vulcan"},
9832 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9833 in earlier releases and is superseded by 'xgene1' in all
9834 tools. */
9835 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9836 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9837 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9838 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9839 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9840 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9841 AARCH64_FEATURE_F16
9842 | AARCH64_FEATURE_RCPC
9843 | AARCH64_FEATURE_DOTPROD
9844 | AARCH64_FEATURE_SSBS
9845 | AARCH64_FEATURE_PROFILE),
9846 "Cortex-X1"},
9847 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9848 AARCH64_FEATURE_BFLOAT16
9849 | AARCH64_FEATURE_I8MM
9850 | AARCH64_FEATURE_MEMTAG
9851 | AARCH64_FEATURE_SVE2_BITPERM),
9852 "Cortex-X2"},
9853 {"generic", AARCH64_ARCH_V8, NULL},
9854
9855 {NULL, AARCH64_ARCH_NONE, NULL}
9856 };
9857
9858 struct aarch64_arch_option_table
9859 {
9860 const char *name;
9861 const aarch64_feature_set value;
9862 };
9863
9864 /* This list should, at a minimum, contain all the architecture names
9865 recognized by GCC. */
9866 static const struct aarch64_arch_option_table aarch64_archs[] = {
9867 {"all", AARCH64_ANY},
9868 {"armv8-a", AARCH64_ARCH_V8},
9869 {"armv8.1-a", AARCH64_ARCH_V8_1},
9870 {"armv8.2-a", AARCH64_ARCH_V8_2},
9871 {"armv8.3-a", AARCH64_ARCH_V8_3},
9872 {"armv8.4-a", AARCH64_ARCH_V8_4},
9873 {"armv8.5-a", AARCH64_ARCH_V8_5},
9874 {"armv8.6-a", AARCH64_ARCH_V8_6},
9875 {"armv8.7-a", AARCH64_ARCH_V8_7},
9876 {"armv8.8-a", AARCH64_ARCH_V8_8},
9877 {"armv8-r", AARCH64_ARCH_V8_R},
9878 {"armv9-a", AARCH64_ARCH_V9},
9879 {"armv9.1-a", AARCH64_ARCH_V9_1},
9880 {"armv9.2-a", AARCH64_ARCH_V9_2},
9881 {"armv9.3-a", AARCH64_ARCH_V9_3},
9882 {NULL, AARCH64_ARCH_NONE}
9883 };
9884
9885 /* ISA extensions. */
9886 struct aarch64_option_cpu_value_table
9887 {
9888 const char *name;
9889 const aarch64_feature_set value;
9890 const aarch64_feature_set require; /* Feature dependencies. */
9891 };
9892
9893 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9894 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9895 AARCH64_ARCH_NONE},
9896 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9897 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9898 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9899 AARCH64_ARCH_NONE},
9900 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9901 AARCH64_ARCH_NONE},
9902 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9903 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9904 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9905 AARCH64_ARCH_NONE},
9906 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9907 AARCH64_ARCH_NONE},
9908 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9909 AARCH64_ARCH_NONE},
9910 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9911 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9912 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9913 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9914 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9915 AARCH64_FEATURE (AARCH64_FEATURE_FP
9916 | AARCH64_FEATURE_F16, 0)},
9917 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9918 AARCH64_ARCH_NONE},
9919 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9920 AARCH64_FEATURE (AARCH64_FEATURE_F16
9921 | AARCH64_FEATURE_SIMD
9922 | AARCH64_FEATURE_COMPNUM, 0)},
9923 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9924 AARCH64_ARCH_NONE},
9925 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9926 AARCH64_FEATURE (AARCH64_FEATURE_F16
9927 | AARCH64_FEATURE_SIMD, 0)},
9928 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9929 AARCH64_ARCH_NONE},
9930 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9931 AARCH64_ARCH_NONE},
9932 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9933 AARCH64_ARCH_NONE},
9934 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9935 AARCH64_ARCH_NONE},
9936 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9937 AARCH64_ARCH_NONE},
9938 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9939 AARCH64_ARCH_NONE},
9940 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9941 AARCH64_ARCH_NONE},
9942 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9943 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9944 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9945 AARCH64_ARCH_NONE},
9946 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9947 AARCH64_ARCH_NONE},
9948 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9949 AARCH64_ARCH_NONE},
9950 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9951 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9952 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9953 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9954 | AARCH64_FEATURE_SM4, 0)},
9955 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9956 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9957 | AARCH64_FEATURE_AES, 0)},
9958 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9959 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9960 | AARCH64_FEATURE_SHA3, 0)},
9961 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9962 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9963 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9964 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9965 | AARCH64_FEATURE_BFLOAT16, 0)},
9966 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9967 AARCH64_FEATURE (AARCH64_FEATURE_SME
9968 | AARCH64_FEATURE_SVE2
9969 | AARCH64_FEATURE_BFLOAT16, 0)},
9970 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9971 AARCH64_FEATURE (AARCH64_FEATURE_SME
9972 | AARCH64_FEATURE_SVE2
9973 | AARCH64_FEATURE_BFLOAT16, 0)},
9974 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9975 AARCH64_ARCH_NONE},
9976 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9977 AARCH64_ARCH_NONE},
9978 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9979 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9980 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9981 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9982 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9983 AARCH64_ARCH_NONE},
9984 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9985 AARCH64_ARCH_NONE},
9986 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9987 AARCH64_ARCH_NONE},
9988 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
9989 AARCH64_ARCH_NONE},
9990 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
9991 AARCH64_ARCH_NONE},
9992 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9993 };
9994
9995 struct aarch64_long_option_table
9996 {
9997 const char *option; /* Substring to match. */
9998 const char *help; /* Help information. */
9999 int (*func) (const char *subopt); /* Function to decode sub-option. */
10000 char *deprecated; /* If non-null, print this message. */
10001 };
10002
10003 /* Transitive closure of features depending on set. */
10004 static aarch64_feature_set
10005 aarch64_feature_disable_set (aarch64_feature_set set)
10006 {
10007 const struct aarch64_option_cpu_value_table *opt;
10008 aarch64_feature_set prev = 0;
10009
10010 while (prev != set) {
10011 prev = set;
10012 for (opt = aarch64_features; opt->name != NULL; opt++)
10013 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10014 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10015 }
10016 return set;
10017 }
10018
10019 /* Transitive closure of dependencies of set. */
10020 static aarch64_feature_set
10021 aarch64_feature_enable_set (aarch64_feature_set set)
10022 {
10023 const struct aarch64_option_cpu_value_table *opt;
10024 aarch64_feature_set prev = 0;
10025
10026 while (prev != set) {
10027 prev = set;
10028 for (opt = aarch64_features; opt->name != NULL; opt++)
10029 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10030 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10031 }
10032 return set;
10033 }
10034
10035 static int
10036 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10037 bool ext_only)
10038 {
10039 /* We insist on extensions being added before being removed. We achieve
10040 this by using the ADDING_VALUE variable to indicate whether we are
10041 adding an extension (1) or removing it (0) and only allowing it to
10042 change in the order -1 -> 1 -> 0. */
10043 int adding_value = -1;
10044 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10045
10046 /* Copy the feature set, so that we can modify it. */
10047 *ext_set = **opt_p;
10048 *opt_p = ext_set;
10049
10050 while (str != NULL && *str != 0)
10051 {
10052 const struct aarch64_option_cpu_value_table *opt;
10053 const char *ext = NULL;
10054 int optlen;
10055
10056 if (!ext_only)
10057 {
10058 if (*str != '+')
10059 {
10060 as_bad (_("invalid architectural extension"));
10061 return 0;
10062 }
10063
10064 ext = strchr (++str, '+');
10065 }
10066
10067 if (ext != NULL)
10068 optlen = ext - str;
10069 else
10070 optlen = strlen (str);
10071
10072 if (optlen >= 2 && startswith (str, "no"))
10073 {
10074 if (adding_value != 0)
10075 adding_value = 0;
10076 optlen -= 2;
10077 str += 2;
10078 }
10079 else if (optlen > 0)
10080 {
10081 if (adding_value == -1)
10082 adding_value = 1;
10083 else if (adding_value != 1)
10084 {
10085 as_bad (_("must specify extensions to add before specifying "
10086 "those to remove"));
10087 return false;
10088 }
10089 }
10090
10091 if (optlen == 0)
10092 {
10093 as_bad (_("missing architectural extension"));
10094 return 0;
10095 }
10096
10097 gas_assert (adding_value != -1);
10098
10099 for (opt = aarch64_features; opt->name != NULL; opt++)
10100 if (strncmp (opt->name, str, optlen) == 0)
10101 {
10102 aarch64_feature_set set;
10103
10104 /* Add or remove the extension. */
10105 if (adding_value)
10106 {
10107 set = aarch64_feature_enable_set (opt->value);
10108 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10109 }
10110 else
10111 {
10112 set = aarch64_feature_disable_set (opt->value);
10113 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10114 }
10115 break;
10116 }
10117
10118 if (opt->name == NULL)
10119 {
10120 as_bad (_("unknown architectural extension `%s'"), str);
10121 return 0;
10122 }
10123
10124 str = ext;
10125 };
10126
10127 return 1;
10128 }
10129
10130 static int
10131 aarch64_parse_cpu (const char *str)
10132 {
10133 const struct aarch64_cpu_option_table *opt;
10134 const char *ext = strchr (str, '+');
10135 size_t optlen;
10136
10137 if (ext != NULL)
10138 optlen = ext - str;
10139 else
10140 optlen = strlen (str);
10141
10142 if (optlen == 0)
10143 {
10144 as_bad (_("missing cpu name `%s'"), str);
10145 return 0;
10146 }
10147
10148 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10149 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10150 {
10151 mcpu_cpu_opt = &opt->value;
10152 if (ext != NULL)
10153 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10154
10155 return 1;
10156 }
10157
10158 as_bad (_("unknown cpu `%s'"), str);
10159 return 0;
10160 }
10161
10162 static int
10163 aarch64_parse_arch (const char *str)
10164 {
10165 const struct aarch64_arch_option_table *opt;
10166 const char *ext = strchr (str, '+');
10167 size_t optlen;
10168
10169 if (ext != NULL)
10170 optlen = ext - str;
10171 else
10172 optlen = strlen (str);
10173
10174 if (optlen == 0)
10175 {
10176 as_bad (_("missing architecture name `%s'"), str);
10177 return 0;
10178 }
10179
10180 for (opt = aarch64_archs; opt->name != NULL; opt++)
10181 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10182 {
10183 march_cpu_opt = &opt->value;
10184 if (ext != NULL)
10185 return aarch64_parse_features (ext, &march_cpu_opt, false);
10186
10187 return 1;
10188 }
10189
10190 as_bad (_("unknown architecture `%s'\n"), str);
10191 return 0;
10192 }
10193
10194 /* ABIs. */
10195 struct aarch64_option_abi_value_table
10196 {
10197 const char *name;
10198 enum aarch64_abi_type value;
10199 };
10200
10201 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10202 {"ilp32", AARCH64_ABI_ILP32},
10203 {"lp64", AARCH64_ABI_LP64},
10204 };
10205
10206 static int
10207 aarch64_parse_abi (const char *str)
10208 {
10209 unsigned int i;
10210
10211 if (str[0] == '\0')
10212 {
10213 as_bad (_("missing abi name `%s'"), str);
10214 return 0;
10215 }
10216
10217 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10218 if (strcmp (str, aarch64_abis[i].name) == 0)
10219 {
10220 aarch64_abi = aarch64_abis[i].value;
10221 return 1;
10222 }
10223
10224 as_bad (_("unknown abi `%s'\n"), str);
10225 return 0;
10226 }
10227
10228 static struct aarch64_long_option_table aarch64_long_opts[] = {
10229 #ifdef OBJ_ELF
10230 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10231 aarch64_parse_abi, NULL},
10232 #endif /* OBJ_ELF */
10233 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10234 aarch64_parse_cpu, NULL},
10235 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10236 aarch64_parse_arch, NULL},
10237 {NULL, NULL, 0, NULL}
10238 };
10239
10240 int
10241 md_parse_option (int c, const char *arg)
10242 {
10243 struct aarch64_option_table *opt;
10244 struct aarch64_long_option_table *lopt;
10245
10246 switch (c)
10247 {
10248 #ifdef OPTION_EB
10249 case OPTION_EB:
10250 target_big_endian = 1;
10251 break;
10252 #endif
10253
10254 #ifdef OPTION_EL
10255 case OPTION_EL:
10256 target_big_endian = 0;
10257 break;
10258 #endif
10259
10260 case 'a':
10261 /* Listing option. Just ignore these, we don't support additional
10262 ones. */
10263 return 0;
10264
10265 default:
10266 for (opt = aarch64_opts; opt->option != NULL; opt++)
10267 {
10268 if (c == opt->option[0]
10269 && ((arg == NULL && opt->option[1] == 0)
10270 || streq (arg, opt->option + 1)))
10271 {
10272 /* If the option is deprecated, tell the user. */
10273 if (opt->deprecated != NULL)
10274 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10275 arg ? arg : "", _(opt->deprecated));
10276
10277 if (opt->var != NULL)
10278 *opt->var = opt->value;
10279
10280 return 1;
10281 }
10282 }
10283
10284 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10285 {
10286 /* These options are expected to have an argument. */
10287 if (c == lopt->option[0]
10288 && arg != NULL
10289 && startswith (arg, lopt->option + 1))
10290 {
10291 /* If the option is deprecated, tell the user. */
10292 if (lopt->deprecated != NULL)
10293 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10294 _(lopt->deprecated));
10295
10296 /* Call the sup-option parser. */
10297 return lopt->func (arg + strlen (lopt->option) - 1);
10298 }
10299 }
10300
10301 return 0;
10302 }
10303
10304 return 1;
10305 }
10306
10307 void
10308 md_show_usage (FILE * fp)
10309 {
10310 struct aarch64_option_table *opt;
10311 struct aarch64_long_option_table *lopt;
10312
10313 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10314
10315 for (opt = aarch64_opts; opt->option != NULL; opt++)
10316 if (opt->help != NULL)
10317 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10318
10319 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10320 if (lopt->help != NULL)
10321 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10322
10323 #ifdef OPTION_EB
10324 fprintf (fp, _("\
10325 -EB assemble code for a big-endian cpu\n"));
10326 #endif
10327
10328 #ifdef OPTION_EL
10329 fprintf (fp, _("\
10330 -EL assemble code for a little-endian cpu\n"));
10331 #endif
10332 }
10333
10334 /* Parse a .cpu directive. */
10335
10336 static void
10337 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10338 {
10339 const struct aarch64_cpu_option_table *opt;
10340 char saved_char;
10341 char *name;
10342 char *ext;
10343 size_t optlen;
10344
10345 name = input_line_pointer;
10346 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10347 input_line_pointer++;
10348 saved_char = *input_line_pointer;
10349 *input_line_pointer = 0;
10350
10351 ext = strchr (name, '+');
10352
10353 if (ext != NULL)
10354 optlen = ext - name;
10355 else
10356 optlen = strlen (name);
10357
10358 /* Skip the first "all" entry. */
10359 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10360 if (strlen (opt->name) == optlen
10361 && strncmp (name, opt->name, optlen) == 0)
10362 {
10363 mcpu_cpu_opt = &opt->value;
10364 if (ext != NULL)
10365 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10366 return;
10367
10368 cpu_variant = *mcpu_cpu_opt;
10369
10370 *input_line_pointer = saved_char;
10371 demand_empty_rest_of_line ();
10372 return;
10373 }
10374 as_bad (_("unknown cpu `%s'"), name);
10375 *input_line_pointer = saved_char;
10376 ignore_rest_of_line ();
10377 }
10378
10379
10380 /* Parse a .arch directive. */
10381
10382 static void
10383 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10384 {
10385 const struct aarch64_arch_option_table *opt;
10386 char saved_char;
10387 char *name;
10388 char *ext;
10389 size_t optlen;
10390
10391 name = input_line_pointer;
10392 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10393 input_line_pointer++;
10394 saved_char = *input_line_pointer;
10395 *input_line_pointer = 0;
10396
10397 ext = strchr (name, '+');
10398
10399 if (ext != NULL)
10400 optlen = ext - name;
10401 else
10402 optlen = strlen (name);
10403
10404 /* Skip the first "all" entry. */
10405 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10406 if (strlen (opt->name) == optlen
10407 && strncmp (name, opt->name, optlen) == 0)
10408 {
10409 mcpu_cpu_opt = &opt->value;
10410 if (ext != NULL)
10411 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10412 return;
10413
10414 cpu_variant = *mcpu_cpu_opt;
10415
10416 *input_line_pointer = saved_char;
10417 demand_empty_rest_of_line ();
10418 return;
10419 }
10420
10421 as_bad (_("unknown architecture `%s'\n"), name);
10422 *input_line_pointer = saved_char;
10423 ignore_rest_of_line ();
10424 }
10425
10426 /* Parse a .arch_extension directive. */
10427
10428 static void
10429 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10430 {
10431 char saved_char;
10432 char *ext = input_line_pointer;;
10433
10434 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10435 input_line_pointer++;
10436 saved_char = *input_line_pointer;
10437 *input_line_pointer = 0;
10438
10439 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10440 return;
10441
10442 cpu_variant = *mcpu_cpu_opt;
10443
10444 *input_line_pointer = saved_char;
10445 demand_empty_rest_of_line ();
10446 }
10447
10448 /* Copy symbol information. */
10449
10450 void
10451 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10452 {
10453 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10454 }
10455
10456 #ifdef OBJ_ELF
10457 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10458 This is needed so AArch64 specific st_other values can be independently
10459 specified for an IFUNC resolver (that is called by the dynamic linker)
10460 and the symbol it resolves (aliased to the resolver). In particular,
10461 if a function symbol has special st_other value set via directives,
10462 then attaching an IFUNC resolver to that symbol should not override
10463 the st_other setting. Requiring the directive on the IFUNC resolver
10464 symbol would be unexpected and problematic in C code, where the two
10465 symbols appear as two independent function declarations. */
10466
10467 void
10468 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10469 {
10470 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10471 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10472 /* If size is unset, copy size from src. Because we don't track whether
10473 .size has been used, we can't differentiate .size dest, 0 from the case
10474 where dest's size is unset. */
10475 if (!destelf->size && S_GET_SIZE (dest) == 0)
10476 {
10477 if (srcelf->size)
10478 {
10479 destelf->size = XNEW (expressionS);
10480 *destelf->size = *srcelf->size;
10481 }
10482 S_SET_SIZE (dest, S_GET_SIZE (src));
10483 }
10484 }
10485 #endif