]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Consolidate ZA tile range checks
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* SME horizontal or vertical slice indicator, encoded in "V".
118 Values:
119 0 - Horizontal
120 1 - vertical
121 */
122 enum sme_hv_slice
123 {
124 HV_horizontal = 0,
125 HV_vertical = 1
126 };
127
128 /* Bits for DEFINED field in vector_type_el. */
129 #define NTA_HASTYPE 1
130 #define NTA_HASINDEX 2
131 #define NTA_HASVARWIDTH 4
132
133 struct vector_type_el
134 {
135 enum vector_el_type type;
136 unsigned char defined;
137 unsigned element_size;
138 unsigned width;
139 int64_t index;
140 };
141
142 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
143
144 struct reloc
145 {
146 bfd_reloc_code_real_type type;
147 expressionS exp;
148 int pc_rel;
149 enum aarch64_opnd opnd;
150 uint32_t flags;
151 unsigned need_libopcodes_p : 1;
152 };
153
154 struct aarch64_instruction
155 {
156 /* libopcodes structure for instruction intermediate representation. */
157 aarch64_inst base;
158 /* Record assembly errors found during the parsing. */
159 aarch64_operand_error parsing_error;
160 /* The condition that appears in the assembly line. */
161 int cond;
162 /* Relocation information (including the GAS internal fixup). */
163 struct reloc reloc;
164 /* Need to generate an immediate in the literal pool. */
165 unsigned gen_lit_pool : 1;
166 };
167
168 typedef struct aarch64_instruction aarch64_instruction;
169
170 static aarch64_instruction inst;
171
172 static bool parse_operands (char *, const aarch64_opcode *);
173 static bool programmer_friendly_fixup (aarch64_instruction *);
174
175 /* Diagnostics inline function utilities.
176
177 These are lightweight utilities which should only be called by parse_operands
178 and other parsers. GAS processes each assembly line by parsing it against
179 instruction template(s), in the case of multiple templates (for the same
180 mnemonic name), those templates are tried one by one until one succeeds or
181 all fail. An assembly line may fail a few templates before being
182 successfully parsed; an error saved here in most cases is not a user error
183 but an error indicating the current template is not the right template.
184 Therefore it is very important that errors can be saved at a low cost during
185 the parsing; we don't want to slow down the whole parsing by recording
186 non-user errors in detail.
187
188 Remember that the objective is to help GAS pick up the most appropriate
189 error message in the case of multiple templates, e.g. FMOV which has 8
190 templates. */
191
192 static inline void
193 clear_error (void)
194 {
195 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
196 inst.parsing_error.kind = AARCH64_OPDE_NIL;
197 }
198
199 static inline bool
200 error_p (void)
201 {
202 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
209 inst.parsing_error.index = -1;
210 inst.parsing_error.kind = kind;
211 inst.parsing_error.error = error;
212 }
213
214 static inline void
215 set_recoverable_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_RECOVERABLE, error);
218 }
219
220 /* Use the DESC field of the corresponding aarch64_operand entry to compose
221 the error message. */
222 static inline void
223 set_default_error (void)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
226 }
227
228 static inline void
229 set_syntax_error (const char *error)
230 {
231 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
232 }
233
234 static inline void
235 set_first_syntax_error (const char *error)
236 {
237 if (! error_p ())
238 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
239 }
240
241 static inline void
242 set_fatal_syntax_error (const char *error)
243 {
244 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
245 }
246 \f
247 /* Return value for certain parsers when the parsing fails; those parsers
248 return the information of the parsed result, e.g. register number, on
249 success. */
250 #define PARSE_FAIL -1
251
252 /* This is an invalid condition code that means no conditional field is
253 present. */
254 #define COND_ALWAYS 0x10
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
288 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
289 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
294 | REG_TYPE(ZN)) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
301 | REG_TYPE(ZN)) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* A horizontal or vertical slice of a ZA tile. */ \
331 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
332 /* Pseudo type to mark the end of the enumerator sequence. */ \
333 BASIC_REG_TYPE(MAX)
334
335 #undef BASIC_REG_TYPE
336 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
337 #undef MULTI_REG_TYPE
338 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
339
340 /* Register type enumerators. */
341 typedef enum aarch64_reg_type_
342 {
343 /* A list of REG_TYPE_*. */
344 AARCH64_REG_TYPES
345 } aarch64_reg_type;
346
347 #undef BASIC_REG_TYPE
348 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
349 #undef REG_TYPE
350 #define REG_TYPE(T) (1 << REG_TYPE_##T)
351 #undef MULTI_REG_TYPE
352 #define MULTI_REG_TYPE(T,V) V,
353
354 /* Structure for a hash table entry for a register. */
355 typedef struct
356 {
357 const char *name;
358 unsigned char number;
359 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
360 unsigned char builtin;
361 } reg_entry;
362
363 /* Values indexed by aarch64_reg_type to assist the type checking. */
364 static const unsigned reg_type_masks[] =
365 {
366 AARCH64_REG_TYPES
367 };
368
369 #undef BASIC_REG_TYPE
370 #undef REG_TYPE
371 #undef MULTI_REG_TYPE
372 #undef AARCH64_REG_TYPES
373
374 /* Diagnostics used when we don't get a register of the expected type.
375 Note: this has to synchronized with aarch64_reg_type definitions
376 above. */
377 static const char *
378 get_reg_expected_msg (aarch64_reg_type reg_type)
379 {
380 const char *msg;
381
382 switch (reg_type)
383 {
384 case REG_TYPE_R_32:
385 msg = N_("integer 32-bit register expected");
386 break;
387 case REG_TYPE_R_64:
388 msg = N_("integer 64-bit register expected");
389 break;
390 case REG_TYPE_R_N:
391 msg = N_("integer register expected");
392 break;
393 case REG_TYPE_R64_SP:
394 msg = N_("64-bit integer or SP register expected");
395 break;
396 case REG_TYPE_SVE_BASE:
397 msg = N_("base register expected");
398 break;
399 case REG_TYPE_R_Z:
400 msg = N_("integer or zero register expected");
401 break;
402 case REG_TYPE_SVE_OFFSET:
403 msg = N_("offset register expected");
404 break;
405 case REG_TYPE_R_SP:
406 msg = N_("integer or SP register expected");
407 break;
408 case REG_TYPE_R_Z_SP:
409 msg = N_("integer, zero or SP register expected");
410 break;
411 case REG_TYPE_FP_B:
412 msg = N_("8-bit SIMD scalar register expected");
413 break;
414 case REG_TYPE_FP_H:
415 msg = N_("16-bit SIMD scalar or floating-point half precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_S:
419 msg = N_("32-bit SIMD scalar or floating-point single precision "
420 "register expected");
421 break;
422 case REG_TYPE_FP_D:
423 msg = N_("64-bit SIMD scalar or floating-point double precision "
424 "register expected");
425 break;
426 case REG_TYPE_FP_Q:
427 msg = N_("128-bit SIMD scalar or floating-point quad precision "
428 "register expected");
429 break;
430 case REG_TYPE_R_Z_BHSDQ_V:
431 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
432 msg = N_("register expected");
433 break;
434 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
435 msg = N_("SIMD scalar or floating-point register expected");
436 break;
437 case REG_TYPE_VN: /* any V reg */
438 msg = N_("vector register expected");
439 break;
440 case REG_TYPE_ZN:
441 msg = N_("SVE vector register expected");
442 break;
443 case REG_TYPE_PN:
444 msg = N_("SVE predicate register expected");
445 break;
446 default:
447 as_fatal (_("invalid register type %d"), reg_type);
448 }
449 return msg;
450 }
451
452 /* Some well known registers that we refer to directly elsewhere. */
453 #define REG_SP 31
454 #define REG_ZR 31
455
456 /* Instructions take 4 bytes in the object file. */
457 #define INSN_SIZE 4
458
459 static htab_t aarch64_ops_hsh;
460 static htab_t aarch64_cond_hsh;
461 static htab_t aarch64_shift_hsh;
462 static htab_t aarch64_sys_regs_hsh;
463 static htab_t aarch64_pstatefield_hsh;
464 static htab_t aarch64_sys_regs_ic_hsh;
465 static htab_t aarch64_sys_regs_dc_hsh;
466 static htab_t aarch64_sys_regs_at_hsh;
467 static htab_t aarch64_sys_regs_tlbi_hsh;
468 static htab_t aarch64_sys_regs_sr_hsh;
469 static htab_t aarch64_reg_hsh;
470 static htab_t aarch64_barrier_opt_hsh;
471 static htab_t aarch64_nzcv_hsh;
472 static htab_t aarch64_pldop_hsh;
473 static htab_t aarch64_hint_opt_hsh;
474
475 /* Stuff needed to resolve the label ambiguity
476 As:
477 ...
478 label: <insn>
479 may differ from:
480 ...
481 label:
482 <insn> */
483
484 static symbolS *last_label_seen;
485
486 /* Literal pool structure. Held on a per-section
487 and per-sub-section basis. */
488
489 #define MAX_LITERAL_POOL_SIZE 1024
490 typedef struct literal_expression
491 {
492 expressionS exp;
493 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
494 LITTLENUM_TYPE * bignum;
495 } literal_expression;
496
497 typedef struct literal_pool
498 {
499 literal_expression literals[MAX_LITERAL_POOL_SIZE];
500 unsigned int next_free_entry;
501 unsigned int id;
502 symbolS *symbol;
503 segT section;
504 subsegT sub_section;
505 int size;
506 struct literal_pool *next;
507 } literal_pool;
508
509 /* Pointer to a linked list of literal pools. */
510 static literal_pool *list_of_pools = NULL;
511 \f
512 /* Pure syntax. */
513
514 /* This array holds the chars that always start a comment. If the
515 pre-processor is disabled, these aren't very useful. */
516 const char comment_chars[] = "";
517
518 /* This array holds the chars that only start a comment at the beginning of
519 a line. If the line seems to have the form '# 123 filename'
520 .line and .file directives will appear in the pre-processed output. */
521 /* Note that input_file.c hand checks for '#' at the beginning of the
522 first line of the input file. This is because the compiler outputs
523 #NO_APP at the beginning of its output. */
524 /* Also note that comments like this one will always work. */
525 const char line_comment_chars[] = "#";
526
527 const char line_separator_chars[] = ";";
528
529 /* Chars that can be used to separate mant
530 from exp in floating point numbers. */
531 const char EXP_CHARS[] = "eE";
532
533 /* Chars that mean this number is a floating point constant. */
534 /* As in 0f12.456 */
535 /* or 0d1.2345e12 */
536
537 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
538
539 /* Prefix character that indicates the start of an immediate value. */
540 #define is_immediate_prefix(C) ((C) == '#')
541
542 /* Separator character handling. */
543
544 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
545
546 static inline bool
547 skip_past_char (char **str, char c)
548 {
549 if (**str == c)
550 {
551 (*str)++;
552 return true;
553 }
554 else
555 return false;
556 }
557
558 #define skip_past_comma(str) skip_past_char (str, ',')
559
560 /* Arithmetic expressions (possibly involving symbols). */
561
562 static bool in_aarch64_get_expression = false;
563
564 /* Third argument to aarch64_get_expression. */
565 #define GE_NO_PREFIX false
566 #define GE_OPT_PREFIX true
567
568 /* Fourth argument to aarch64_get_expression. */
569 #define ALLOW_ABSENT false
570 #define REJECT_ABSENT true
571
572 /* Return TRUE if the string pointed by *STR is successfully parsed
573 as an valid expression; *EP will be filled with the information of
574 such an expression. Otherwise return FALSE.
575
576 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
577 If REJECT_ABSENT is true then trat missing expressions as an error. */
578
579 static bool
580 aarch64_get_expression (expressionS * ep,
581 char ** str,
582 bool allow_immediate_prefix,
583 bool reject_absent)
584 {
585 char *save_in;
586 segT seg;
587 bool prefix_present = false;
588
589 if (allow_immediate_prefix)
590 {
591 if (is_immediate_prefix (**str))
592 {
593 (*str)++;
594 prefix_present = true;
595 }
596 }
597
598 memset (ep, 0, sizeof (expressionS));
599
600 save_in = input_line_pointer;
601 input_line_pointer = *str;
602 in_aarch64_get_expression = true;
603 seg = expression (ep);
604 in_aarch64_get_expression = false;
605
606 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
607 {
608 /* We found a bad expression in md_operand(). */
609 *str = input_line_pointer;
610 input_line_pointer = save_in;
611 if (prefix_present && ! error_p ())
612 set_fatal_syntax_error (_("bad expression"));
613 else
614 set_first_syntax_error (_("bad expression"));
615 return false;
616 }
617
618 #ifdef OBJ_AOUT
619 if (seg != absolute_section
620 && seg != text_section
621 && seg != data_section
622 && seg != bss_section
623 && seg != undefined_section)
624 {
625 set_syntax_error (_("bad segment"));
626 *str = input_line_pointer;
627 input_line_pointer = save_in;
628 return false;
629 }
630 #else
631 (void) seg;
632 #endif
633
634 *str = input_line_pointer;
635 input_line_pointer = save_in;
636 return true;
637 }
638
639 /* Turn a string in input_line_pointer into a floating point constant
640 of type TYPE, and store the appropriate bytes in *LITP. The number
641 of LITTLENUMS emitted is stored in *SIZEP. An error message is
642 returned, or NULL on OK. */
643
644 const char *
645 md_atof (int type, char *litP, int *sizeP)
646 {
647 return ieee_md_atof (type, litP, sizeP, target_big_endian);
648 }
649
650 /* We handle all bad expressions here, so that we can report the faulty
651 instruction in the error message. */
652 void
653 md_operand (expressionS * exp)
654 {
655 if (in_aarch64_get_expression)
656 exp->X_op = O_illegal;
657 }
658
659 /* Immediate values. */
660
661 /* Errors may be set multiple times during parsing or bit encoding
662 (particularly in the Neon bits), but usually the earliest error which is set
663 will be the most meaningful. Avoid overwriting it with later (cascading)
664 errors by calling this function. */
665
666 static void
667 first_error (const char *error)
668 {
669 if (! error_p ())
670 set_syntax_error (error);
671 }
672
673 /* Similar to first_error, but this function accepts formatted error
674 message. */
675 static void
676 first_error_fmt (const char *format, ...)
677 {
678 va_list args;
679 enum
680 { size = 100 };
681 /* N.B. this single buffer will not cause error messages for different
682 instructions to pollute each other; this is because at the end of
683 processing of each assembly line, error message if any will be
684 collected by as_bad. */
685 static char buffer[size];
686
687 if (! error_p ())
688 {
689 int ret ATTRIBUTE_UNUSED;
690 va_start (args, format);
691 ret = vsnprintf (buffer, size, format, args);
692 know (ret <= size - 1 && ret >= 0);
693 va_end (args);
694 set_syntax_error (buffer);
695 }
696 }
697
698 /* Internal helper routine converting a vector_type_el structure *VECTYPE
699 to a corresponding operand qualifier. */
700
701 static inline aarch64_opnd_qualifier_t
702 vectype_to_qualifier (const struct vector_type_el *vectype)
703 {
704 /* Element size in bytes indexed by vector_el_type. */
705 const unsigned char ele_size[5]
706 = {1, 2, 4, 8, 16};
707 const unsigned int ele_base [5] =
708 {
709 AARCH64_OPND_QLF_V_4B,
710 AARCH64_OPND_QLF_V_2H,
711 AARCH64_OPND_QLF_V_2S,
712 AARCH64_OPND_QLF_V_1D,
713 AARCH64_OPND_QLF_V_1Q
714 };
715
716 if (!vectype->defined || vectype->type == NT_invtype)
717 goto vectype_conversion_fail;
718
719 if (vectype->type == NT_zero)
720 return AARCH64_OPND_QLF_P_Z;
721 if (vectype->type == NT_merge)
722 return AARCH64_OPND_QLF_P_M;
723
724 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
725
726 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
727 {
728 /* Special case S_4B. */
729 if (vectype->type == NT_b && vectype->width == 4)
730 return AARCH64_OPND_QLF_S_4B;
731
732 /* Special case S_2H. */
733 if (vectype->type == NT_h && vectype->width == 2)
734 return AARCH64_OPND_QLF_S_2H;
735
736 /* Vector element register. */
737 return AARCH64_OPND_QLF_S_B + vectype->type;
738 }
739 else
740 {
741 /* Vector register. */
742 int reg_size = ele_size[vectype->type] * vectype->width;
743 unsigned offset;
744 unsigned shift;
745 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
746 goto vectype_conversion_fail;
747
748 /* The conversion is by calculating the offset from the base operand
749 qualifier for the vector type. The operand qualifiers are regular
750 enough that the offset can established by shifting the vector width by
751 a vector-type dependent amount. */
752 shift = 0;
753 if (vectype->type == NT_b)
754 shift = 3;
755 else if (vectype->type == NT_h || vectype->type == NT_s)
756 shift = 2;
757 else if (vectype->type >= NT_d)
758 shift = 1;
759 else
760 gas_assert (0);
761
762 offset = ele_base [vectype->type] + (vectype->width >> shift);
763 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
764 && offset <= AARCH64_OPND_QLF_V_1Q);
765 return offset;
766 }
767
768 vectype_conversion_fail:
769 first_error (_("bad vector arrangement type"));
770 return AARCH64_OPND_QLF_NIL;
771 }
772
773 /* Register parsing. */
774
775 /* Generic register parser which is called by other specialized
776 register parsers.
777 CCP points to what should be the beginning of a register name.
778 If it is indeed a valid register name, advance CCP over it and
779 return the reg_entry structure; otherwise return NULL.
780 It does not issue diagnostics. */
781
782 static reg_entry *
783 parse_reg (char **ccp)
784 {
785 char *start = *ccp;
786 char *p;
787 reg_entry *reg;
788
789 #ifdef REGISTER_PREFIX
790 if (*start != REGISTER_PREFIX)
791 return NULL;
792 start++;
793 #endif
794
795 p = start;
796 if (!ISALPHA (*p) || !is_name_beginner (*p))
797 return NULL;
798
799 do
800 p++;
801 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
802
803 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
804
805 if (!reg)
806 return NULL;
807
808 *ccp = p;
809 return reg;
810 }
811
812 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
813 return FALSE. */
814 static bool
815 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
816 {
817 return (reg_type_masks[type] & (1 << reg->type)) != 0;
818 }
819
820 /* Try to parse a base or offset register. Allow SVE base and offset
821 registers if REG_TYPE includes SVE registers. Return the register
822 entry on success, setting *QUALIFIER to the register qualifier.
823 Return null otherwise.
824
825 Note that this function does not issue any diagnostics. */
826
827 static const reg_entry *
828 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
829 aarch64_opnd_qualifier_t *qualifier)
830 {
831 char *str = *ccp;
832 const reg_entry *reg = parse_reg (&str);
833
834 if (reg == NULL)
835 return NULL;
836
837 switch (reg->type)
838 {
839 case REG_TYPE_R_32:
840 case REG_TYPE_SP_32:
841 case REG_TYPE_Z_32:
842 *qualifier = AARCH64_OPND_QLF_W;
843 break;
844
845 case REG_TYPE_R_64:
846 case REG_TYPE_SP_64:
847 case REG_TYPE_Z_64:
848 *qualifier = AARCH64_OPND_QLF_X;
849 break;
850
851 case REG_TYPE_ZN:
852 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
853 || str[0] != '.')
854 return NULL;
855 switch (TOLOWER (str[1]))
856 {
857 case 's':
858 *qualifier = AARCH64_OPND_QLF_S_S;
859 break;
860 case 'd':
861 *qualifier = AARCH64_OPND_QLF_S_D;
862 break;
863 default:
864 return NULL;
865 }
866 str += 2;
867 break;
868
869 default:
870 return NULL;
871 }
872
873 *ccp = str;
874
875 return reg;
876 }
877
878 /* Try to parse a base or offset register. Return the register entry
879 on success, setting *QUALIFIER to the register qualifier. Return null
880 otherwise.
881
882 Note that this function does not issue any diagnostics. */
883
884 static const reg_entry *
885 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
886 {
887 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
888 }
889
890 /* Parse the qualifier of a vector register or vector element of type
891 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
892 succeeds; otherwise return FALSE.
893
894 Accept only one occurrence of:
895 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
896 b h s d q */
897 static bool
898 parse_vector_type_for_operand (aarch64_reg_type reg_type,
899 struct vector_type_el *parsed_type, char **str)
900 {
901 char *ptr = *str;
902 unsigned width;
903 unsigned element_size;
904 enum vector_el_type type;
905
906 /* skip '.' */
907 gas_assert (*ptr == '.');
908 ptr++;
909
910 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
911 {
912 width = 0;
913 goto elt_size;
914 }
915 width = strtoul (ptr, &ptr, 10);
916 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
917 {
918 first_error_fmt (_("bad size %d in vector width specifier"), width);
919 return false;
920 }
921
922 elt_size:
923 switch (TOLOWER (*ptr))
924 {
925 case 'b':
926 type = NT_b;
927 element_size = 8;
928 break;
929 case 'h':
930 type = NT_h;
931 element_size = 16;
932 break;
933 case 's':
934 type = NT_s;
935 element_size = 32;
936 break;
937 case 'd':
938 type = NT_d;
939 element_size = 64;
940 break;
941 case 'q':
942 if (reg_type != REG_TYPE_VN || width == 1)
943 {
944 type = NT_q;
945 element_size = 128;
946 break;
947 }
948 /* fall through. */
949 default:
950 if (*ptr != '\0')
951 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
952 else
953 first_error (_("missing element size"));
954 return false;
955 }
956 if (width != 0 && width * element_size != 64
957 && width * element_size != 128
958 && !(width == 2 && element_size == 16)
959 && !(width == 4 && element_size == 8))
960 {
961 first_error_fmt (_
962 ("invalid element size %d and vector size combination %c"),
963 width, *ptr);
964 return false;
965 }
966 ptr++;
967
968 parsed_type->type = type;
969 parsed_type->width = width;
970 parsed_type->element_size = element_size;
971
972 *str = ptr;
973
974 return true;
975 }
976
977 /* *STR contains an SVE zero/merge predication suffix. Parse it into
978 *PARSED_TYPE and point *STR at the end of the suffix. */
979
980 static bool
981 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
982 {
983 char *ptr = *str;
984
985 /* Skip '/'. */
986 gas_assert (*ptr == '/');
987 ptr++;
988 switch (TOLOWER (*ptr))
989 {
990 case 'z':
991 parsed_type->type = NT_zero;
992 break;
993 case 'm':
994 parsed_type->type = NT_merge;
995 break;
996 default:
997 if (*ptr != '\0' && *ptr != ',')
998 first_error_fmt (_("unexpected character `%c' in predication type"),
999 *ptr);
1000 else
1001 first_error (_("missing predication type"));
1002 return false;
1003 }
1004 parsed_type->width = 0;
1005 *str = ptr + 1;
1006 return true;
1007 }
1008
1009 /* Return true if CH is a valid suffix character for registers of
1010 type TYPE. */
1011
1012 static bool
1013 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1014 {
1015 switch (type)
1016 {
1017 case REG_TYPE_VN:
1018 case REG_TYPE_ZN:
1019 case REG_TYPE_ZAT:
1020 case REG_TYPE_ZATH:
1021 case REG_TYPE_ZATV:
1022 return ch == '.';
1023
1024 case REG_TYPE_PN:
1025 return ch == '.' || ch == '/';
1026
1027 default:
1028 return false;
1029 }
1030 }
1031
1032 /* Parse a register of the type TYPE.
1033
1034 Return null if the string pointed to by *CCP is not a valid register
1035 name or the parsed register is not of TYPE.
1036
1037 Otherwise return the register, and optionally return the register
1038 shape and element index information in *TYPEINFO.
1039
1040 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1041
1042 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1043 register index. */
1044
1045 #define PTR_IN_REGLIST (1U << 0)
1046 #define PTR_FULL_REG (1U << 1)
1047
1048 static const reg_entry *
1049 parse_typed_reg (char **ccp, aarch64_reg_type type,
1050 struct vector_type_el *typeinfo, unsigned int flags)
1051 {
1052 char *str = *ccp;
1053 const reg_entry *reg = parse_reg (&str);
1054 struct vector_type_el atype;
1055 struct vector_type_el parsetype;
1056 bool is_typed_vecreg = false;
1057
1058 atype.defined = 0;
1059 atype.type = NT_invtype;
1060 atype.width = -1;
1061 atype.element_size = 0;
1062 atype.index = 0;
1063
1064 if (reg == NULL)
1065 {
1066 if (typeinfo)
1067 *typeinfo = atype;
1068 set_default_error ();
1069 return NULL;
1070 }
1071
1072 if (! aarch64_check_reg_type (reg, type))
1073 {
1074 DEBUG_TRACE ("reg type check failed");
1075 set_default_error ();
1076 return NULL;
1077 }
1078 type = reg->type;
1079
1080 if (aarch64_valid_suffix_char_p (reg->type, *str))
1081 {
1082 if (*str == '.')
1083 {
1084 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1085 return NULL;
1086 if ((reg->type == REG_TYPE_ZAT
1087 || reg->type == REG_TYPE_ZATH
1088 || reg->type == REG_TYPE_ZATV)
1089 && reg->number * 8 >= parsetype.element_size)
1090 {
1091 set_syntax_error (_("ZA tile number out of range"));
1092 return NULL;
1093 }
1094 }
1095 else
1096 {
1097 if (!parse_predication_for_operand (&parsetype, &str))
1098 return NULL;
1099 }
1100
1101 /* Register if of the form Vn.[bhsdq]. */
1102 is_typed_vecreg = true;
1103
1104 if (type != REG_TYPE_VN)
1105 {
1106 /* The width is always variable; we don't allow an integer width
1107 to be specified. */
1108 gas_assert (parsetype.width == 0);
1109 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1110 }
1111 else if (parsetype.width == 0)
1112 /* Expect index. In the new scheme we cannot have
1113 Vn.[bhsdq] represent a scalar. Therefore any
1114 Vn.[bhsdq] should have an index following it.
1115 Except in reglists of course. */
1116 atype.defined |= NTA_HASINDEX;
1117 else
1118 atype.defined |= NTA_HASTYPE;
1119
1120 atype.type = parsetype.type;
1121 atype.width = parsetype.width;
1122 }
1123
1124 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1125 {
1126 expressionS exp;
1127
1128 /* Reject Sn[index] syntax. */
1129 if (!is_typed_vecreg)
1130 {
1131 first_error (_("this type of register can't be indexed"));
1132 return NULL;
1133 }
1134
1135 if (flags & PTR_IN_REGLIST)
1136 {
1137 first_error (_("index not allowed inside register list"));
1138 return NULL;
1139 }
1140
1141 atype.defined |= NTA_HASINDEX;
1142
1143 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1144
1145 if (exp.X_op != O_constant)
1146 {
1147 first_error (_("constant expression required"));
1148 return NULL;
1149 }
1150
1151 if (! skip_past_char (&str, ']'))
1152 return NULL;
1153
1154 atype.index = exp.X_add_number;
1155 }
1156 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1157 {
1158 /* Indexed vector register expected. */
1159 first_error (_("indexed vector register expected"));
1160 return NULL;
1161 }
1162
1163 /* A vector reg Vn should be typed or indexed. */
1164 if (type == REG_TYPE_VN && atype.defined == 0)
1165 {
1166 first_error (_("invalid use of vector register"));
1167 }
1168
1169 if (typeinfo)
1170 *typeinfo = atype;
1171
1172 *ccp = str;
1173
1174 return reg;
1175 }
1176
1177 /* Parse register.
1178
1179 Return the register on success; return null otherwise.
1180
1181 If this is a NEON vector register with additional type information, fill
1182 in the struct pointed to by VECTYPE (if non-NULL).
1183
1184 This parser does not handle register lists. */
1185
1186 static const reg_entry *
1187 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1188 struct vector_type_el *vectype)
1189 {
1190 return parse_typed_reg (ccp, type, vectype, 0);
1191 }
1192
1193 static inline bool
1194 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1195 {
1196 return (e1.type == e2.type
1197 && e1.defined == e2.defined
1198 && e1.width == e2.width
1199 && e1.element_size == e2.element_size
1200 && e1.index == e2.index);
1201 }
1202
1203 /* This function parses a list of vector registers of type TYPE.
1204 On success, it returns the parsed register list information in the
1205 following encoded format:
1206
1207 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1208 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1209
1210 The information of the register shape and/or index is returned in
1211 *VECTYPE.
1212
1213 It returns PARSE_FAIL if the register list is invalid.
1214
1215 The list contains one to four registers.
1216 Each register can be one of:
1217 <Vt>.<T>[<index>]
1218 <Vt>.<T>
1219 All <T> should be identical.
1220 All <index> should be identical.
1221 There are restrictions on <Vt> numbers which are checked later
1222 (by reg_list_valid_p). */
1223
1224 static int
1225 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1226 struct vector_type_el *vectype)
1227 {
1228 char *str = *ccp;
1229 int nb_regs;
1230 struct vector_type_el typeinfo, typeinfo_first;
1231 int val, val_range;
1232 int in_range;
1233 int ret_val;
1234 int i;
1235 bool error = false;
1236 bool expect_index = false;
1237
1238 if (*str != '{')
1239 {
1240 set_syntax_error (_("expecting {"));
1241 return PARSE_FAIL;
1242 }
1243 str++;
1244
1245 nb_regs = 0;
1246 typeinfo_first.defined = 0;
1247 typeinfo_first.type = NT_invtype;
1248 typeinfo_first.width = -1;
1249 typeinfo_first.element_size = 0;
1250 typeinfo_first.index = 0;
1251 ret_val = 0;
1252 val = -1;
1253 val_range = -1;
1254 in_range = 0;
1255 do
1256 {
1257 if (in_range)
1258 {
1259 str++; /* skip over '-' */
1260 val_range = val;
1261 }
1262 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1263 PTR_IN_REGLIST);
1264 if (!reg)
1265 {
1266 set_first_syntax_error (_("invalid vector register in list"));
1267 error = true;
1268 continue;
1269 }
1270 val = reg->number;
1271 /* reject [bhsd]n */
1272 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1273 {
1274 set_first_syntax_error (_("invalid scalar register in list"));
1275 error = true;
1276 continue;
1277 }
1278
1279 if (typeinfo.defined & NTA_HASINDEX)
1280 expect_index = true;
1281
1282 if (in_range)
1283 {
1284 if (val < val_range)
1285 {
1286 set_first_syntax_error
1287 (_("invalid range in vector register list"));
1288 error = true;
1289 }
1290 val_range++;
1291 }
1292 else
1293 {
1294 val_range = val;
1295 if (nb_regs == 0)
1296 typeinfo_first = typeinfo;
1297 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1298 {
1299 set_first_syntax_error
1300 (_("type mismatch in vector register list"));
1301 error = true;
1302 }
1303 }
1304 if (! error)
1305 for (i = val_range; i <= val; i++)
1306 {
1307 ret_val |= i << (5 * nb_regs);
1308 nb_regs++;
1309 }
1310 in_range = 0;
1311 }
1312 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1313
1314 skip_whitespace (str);
1315 if (*str != '}')
1316 {
1317 set_first_syntax_error (_("end of vector register list not found"));
1318 error = true;
1319 }
1320 str++;
1321
1322 skip_whitespace (str);
1323
1324 if (expect_index)
1325 {
1326 if (skip_past_char (&str, '['))
1327 {
1328 expressionS exp;
1329
1330 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1331 if (exp.X_op != O_constant)
1332 {
1333 set_first_syntax_error (_("constant expression required."));
1334 error = true;
1335 }
1336 if (! skip_past_char (&str, ']'))
1337 error = true;
1338 else
1339 typeinfo_first.index = exp.X_add_number;
1340 }
1341 else
1342 {
1343 set_first_syntax_error (_("expected index"));
1344 error = true;
1345 }
1346 }
1347
1348 if (nb_regs > 4)
1349 {
1350 set_first_syntax_error (_("too many registers in vector register list"));
1351 error = true;
1352 }
1353 else if (nb_regs == 0)
1354 {
1355 set_first_syntax_error (_("empty vector register list"));
1356 error = true;
1357 }
1358
1359 *ccp = str;
1360 if (! error)
1361 *vectype = typeinfo_first;
1362
1363 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1364 }
1365
1366 /* Directives: register aliases. */
1367
1368 static reg_entry *
1369 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1370 {
1371 reg_entry *new;
1372 const char *name;
1373
1374 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1375 {
1376 if (new->builtin)
1377 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1378 str);
1379
1380 /* Only warn about a redefinition if it's not defined as the
1381 same register. */
1382 else if (new->number != number || new->type != type)
1383 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1384
1385 return NULL;
1386 }
1387
1388 name = xstrdup (str);
1389 new = XNEW (reg_entry);
1390
1391 new->name = name;
1392 new->number = number;
1393 new->type = type;
1394 new->builtin = false;
1395
1396 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1397
1398 return new;
1399 }
1400
1401 /* Look for the .req directive. This is of the form:
1402
1403 new_register_name .req existing_register_name
1404
1405 If we find one, or if it looks sufficiently like one that we want to
1406 handle any error here, return TRUE. Otherwise return FALSE. */
1407
1408 static bool
1409 create_register_alias (char *newname, char *p)
1410 {
1411 const reg_entry *old;
1412 char *oldname, *nbuf;
1413 size_t nlen;
1414
1415 /* The input scrubber ensures that whitespace after the mnemonic is
1416 collapsed to single spaces. */
1417 oldname = p;
1418 if (!startswith (oldname, " .req "))
1419 return false;
1420
1421 oldname += 6;
1422 if (*oldname == '\0')
1423 return false;
1424
1425 old = str_hash_find (aarch64_reg_hsh, oldname);
1426 if (!old)
1427 {
1428 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1429 return true;
1430 }
1431
1432 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1433 the desired alias name, and p points to its end. If not, then
1434 the desired alias name is in the global original_case_string. */
1435 #ifdef TC_CASE_SENSITIVE
1436 nlen = p - newname;
1437 #else
1438 newname = original_case_string;
1439 nlen = strlen (newname);
1440 #endif
1441
1442 nbuf = xmemdup0 (newname, nlen);
1443
1444 /* Create aliases under the new name as stated; an all-lowercase
1445 version of the new name; and an all-uppercase version of the new
1446 name. */
1447 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1448 {
1449 for (p = nbuf; *p; p++)
1450 *p = TOUPPER (*p);
1451
1452 if (strncmp (nbuf, newname, nlen))
1453 {
1454 /* If this attempt to create an additional alias fails, do not bother
1455 trying to create the all-lower case alias. We will fail and issue
1456 a second, duplicate error message. This situation arises when the
1457 programmer does something like:
1458 foo .req r0
1459 Foo .req r1
1460 The second .req creates the "Foo" alias but then fails to create
1461 the artificial FOO alias because it has already been created by the
1462 first .req. */
1463 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1464 {
1465 free (nbuf);
1466 return true;
1467 }
1468 }
1469
1470 for (p = nbuf; *p; p++)
1471 *p = TOLOWER (*p);
1472
1473 if (strncmp (nbuf, newname, nlen))
1474 insert_reg_alias (nbuf, old->number, old->type);
1475 }
1476
1477 free (nbuf);
1478 return true;
1479 }
1480
1481 /* Should never be called, as .req goes between the alias and the
1482 register name, not at the beginning of the line. */
1483 static void
1484 s_req (int a ATTRIBUTE_UNUSED)
1485 {
1486 as_bad (_("invalid syntax for .req directive"));
1487 }
1488
1489 /* The .unreq directive deletes an alias which was previously defined
1490 by .req. For example:
1491
1492 my_alias .req r11
1493 .unreq my_alias */
1494
1495 static void
1496 s_unreq (int a ATTRIBUTE_UNUSED)
1497 {
1498 char *name;
1499 char saved_char;
1500
1501 name = input_line_pointer;
1502 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1503 saved_char = *input_line_pointer;
1504 *input_line_pointer = 0;
1505
1506 if (!*name)
1507 as_bad (_("invalid syntax for .unreq directive"));
1508 else
1509 {
1510 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1511
1512 if (!reg)
1513 as_bad (_("unknown register alias '%s'"), name);
1514 else if (reg->builtin)
1515 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1516 name);
1517 else
1518 {
1519 char *p;
1520 char *nbuf;
1521
1522 str_hash_delete (aarch64_reg_hsh, name);
1523 free ((char *) reg->name);
1524 free (reg);
1525
1526 /* Also locate the all upper case and all lower case versions.
1527 Do not complain if we cannot find one or the other as it
1528 was probably deleted above. */
1529
1530 nbuf = strdup (name);
1531 for (p = nbuf; *p; p++)
1532 *p = TOUPPER (*p);
1533 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1534 if (reg)
1535 {
1536 str_hash_delete (aarch64_reg_hsh, nbuf);
1537 free ((char *) reg->name);
1538 free (reg);
1539 }
1540
1541 for (p = nbuf; *p; p++)
1542 *p = TOLOWER (*p);
1543 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1544 if (reg)
1545 {
1546 str_hash_delete (aarch64_reg_hsh, nbuf);
1547 free ((char *) reg->name);
1548 free (reg);
1549 }
1550
1551 free (nbuf);
1552 }
1553 }
1554
1555 *input_line_pointer = saved_char;
1556 demand_empty_rest_of_line ();
1557 }
1558
1559 /* Directives: Instruction set selection. */
1560
1561 #if defined OBJ_ELF || defined OBJ_COFF
1562 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1563 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1564 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1565 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1566
1567 /* Create a new mapping symbol for the transition to STATE. */
1568
1569 static void
1570 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1571 {
1572 symbolS *symbolP;
1573 const char *symname;
1574 int type;
1575
1576 switch (state)
1577 {
1578 case MAP_DATA:
1579 symname = "$d";
1580 type = BSF_NO_FLAGS;
1581 break;
1582 case MAP_INSN:
1583 symname = "$x";
1584 type = BSF_NO_FLAGS;
1585 break;
1586 default:
1587 abort ();
1588 }
1589
1590 symbolP = symbol_new (symname, now_seg, frag, value);
1591 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1592
1593 /* Save the mapping symbols for future reference. Also check that
1594 we do not place two mapping symbols at the same offset within a
1595 frag. We'll handle overlap between frags in
1596 check_mapping_symbols.
1597
1598 If .fill or other data filling directive generates zero sized data,
1599 the mapping symbol for the following code will have the same value
1600 as the one generated for the data filling directive. In this case,
1601 we replace the old symbol with the new one at the same address. */
1602 if (value == 0)
1603 {
1604 if (frag->tc_frag_data.first_map != NULL)
1605 {
1606 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1607 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1608 &symbol_lastP);
1609 }
1610 frag->tc_frag_data.first_map = symbolP;
1611 }
1612 if (frag->tc_frag_data.last_map != NULL)
1613 {
1614 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1615 S_GET_VALUE (symbolP));
1616 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1617 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1618 &symbol_lastP);
1619 }
1620 frag->tc_frag_data.last_map = symbolP;
1621 }
1622
1623 /* We must sometimes convert a region marked as code to data during
1624 code alignment, if an odd number of bytes have to be padded. The
1625 code mapping symbol is pushed to an aligned address. */
1626
1627 static void
1628 insert_data_mapping_symbol (enum mstate state,
1629 valueT value, fragS * frag, offsetT bytes)
1630 {
1631 /* If there was already a mapping symbol, remove it. */
1632 if (frag->tc_frag_data.last_map != NULL
1633 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1634 frag->fr_address + value)
1635 {
1636 symbolS *symp = frag->tc_frag_data.last_map;
1637
1638 if (value == 0)
1639 {
1640 know (frag->tc_frag_data.first_map == symp);
1641 frag->tc_frag_data.first_map = NULL;
1642 }
1643 frag->tc_frag_data.last_map = NULL;
1644 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1645 }
1646
1647 make_mapping_symbol (MAP_DATA, value, frag);
1648 make_mapping_symbol (state, value + bytes, frag);
1649 }
1650
1651 static void mapping_state_2 (enum mstate state, int max_chars);
1652
1653 /* Set the mapping state to STATE. Only call this when about to
1654 emit some STATE bytes to the file. */
1655
1656 void
1657 mapping_state (enum mstate state)
1658 {
1659 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1660
1661 if (state == MAP_INSN)
1662 /* AArch64 instructions require 4-byte alignment. When emitting
1663 instructions into any section, record the appropriate section
1664 alignment. */
1665 record_alignment (now_seg, 2);
1666
1667 if (mapstate == state)
1668 /* The mapping symbol has already been emitted.
1669 There is nothing else to do. */
1670 return;
1671
1672 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1673 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1674 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1675 evaluated later in the next else. */
1676 return;
1677 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1678 {
1679 /* Only add the symbol if the offset is > 0:
1680 if we're at the first frag, check it's size > 0;
1681 if we're not at the first frag, then for sure
1682 the offset is > 0. */
1683 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1684 const int add_symbol = (frag_now != frag_first)
1685 || (frag_now_fix () > 0);
1686
1687 if (add_symbol)
1688 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1689 }
1690 #undef TRANSITION
1691
1692 mapping_state_2 (state, 0);
1693 }
1694
1695 /* Same as mapping_state, but MAX_CHARS bytes have already been
1696 allocated. Put the mapping symbol that far back. */
1697
1698 static void
1699 mapping_state_2 (enum mstate state, int max_chars)
1700 {
1701 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1702
1703 if (!SEG_NORMAL (now_seg))
1704 return;
1705
1706 if (mapstate == state)
1707 /* The mapping symbol has already been emitted.
1708 There is nothing else to do. */
1709 return;
1710
1711 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1712 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1713 }
1714 #else
1715 #define mapping_state(x) /* nothing */
1716 #define mapping_state_2(x, y) /* nothing */
1717 #endif
1718
1719 /* Directives: sectioning and alignment. */
1720
1721 static void
1722 s_bss (int ignore ATTRIBUTE_UNUSED)
1723 {
1724 /* We don't support putting frags in the BSS segment, we fake it by
1725 marking in_bss, then looking at s_skip for clues. */
1726 subseg_set (bss_section, 0);
1727 demand_empty_rest_of_line ();
1728 mapping_state (MAP_DATA);
1729 }
1730
1731 static void
1732 s_even (int ignore ATTRIBUTE_UNUSED)
1733 {
1734 /* Never make frag if expect extra pass. */
1735 if (!need_pass_2)
1736 frag_align (1, 0, 0);
1737
1738 record_alignment (now_seg, 1);
1739
1740 demand_empty_rest_of_line ();
1741 }
1742
1743 /* Directives: Literal pools. */
1744
1745 static literal_pool *
1746 find_literal_pool (int size)
1747 {
1748 literal_pool *pool;
1749
1750 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1751 {
1752 if (pool->section == now_seg
1753 && pool->sub_section == now_subseg && pool->size == size)
1754 break;
1755 }
1756
1757 return pool;
1758 }
1759
1760 static literal_pool *
1761 find_or_make_literal_pool (int size)
1762 {
1763 /* Next literal pool ID number. */
1764 static unsigned int latest_pool_num = 1;
1765 literal_pool *pool;
1766
1767 pool = find_literal_pool (size);
1768
1769 if (pool == NULL)
1770 {
1771 /* Create a new pool. */
1772 pool = XNEW (literal_pool);
1773 if (!pool)
1774 return NULL;
1775
1776 /* Currently we always put the literal pool in the current text
1777 section. If we were generating "small" model code where we
1778 knew that all code and initialised data was within 1MB then
1779 we could output literals to mergeable, read-only data
1780 sections. */
1781
1782 pool->next_free_entry = 0;
1783 pool->section = now_seg;
1784 pool->sub_section = now_subseg;
1785 pool->size = size;
1786 pool->next = list_of_pools;
1787 pool->symbol = NULL;
1788
1789 /* Add it to the list. */
1790 list_of_pools = pool;
1791 }
1792
1793 /* New pools, and emptied pools, will have a NULL symbol. */
1794 if (pool->symbol == NULL)
1795 {
1796 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1797 &zero_address_frag, 0);
1798 pool->id = latest_pool_num++;
1799 }
1800
1801 /* Done. */
1802 return pool;
1803 }
1804
1805 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1806 Return TRUE on success, otherwise return FALSE. */
1807 static bool
1808 add_to_lit_pool (expressionS *exp, int size)
1809 {
1810 literal_pool *pool;
1811 unsigned int entry;
1812
1813 pool = find_or_make_literal_pool (size);
1814
1815 /* Check if this literal value is already in the pool. */
1816 for (entry = 0; entry < pool->next_free_entry; entry++)
1817 {
1818 expressionS * litexp = & pool->literals[entry].exp;
1819
1820 if ((litexp->X_op == exp->X_op)
1821 && (exp->X_op == O_constant)
1822 && (litexp->X_add_number == exp->X_add_number)
1823 && (litexp->X_unsigned == exp->X_unsigned))
1824 break;
1825
1826 if ((litexp->X_op == exp->X_op)
1827 && (exp->X_op == O_symbol)
1828 && (litexp->X_add_number == exp->X_add_number)
1829 && (litexp->X_add_symbol == exp->X_add_symbol)
1830 && (litexp->X_op_symbol == exp->X_op_symbol))
1831 break;
1832 }
1833
1834 /* Do we need to create a new entry? */
1835 if (entry == pool->next_free_entry)
1836 {
1837 if (entry >= MAX_LITERAL_POOL_SIZE)
1838 {
1839 set_syntax_error (_("literal pool overflow"));
1840 return false;
1841 }
1842
1843 pool->literals[entry].exp = *exp;
1844 pool->next_free_entry += 1;
1845 if (exp->X_op == O_big)
1846 {
1847 /* PR 16688: Bignums are held in a single global array. We must
1848 copy and preserve that value now, before it is overwritten. */
1849 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1850 exp->X_add_number);
1851 memcpy (pool->literals[entry].bignum, generic_bignum,
1852 CHARS_PER_LITTLENUM * exp->X_add_number);
1853 }
1854 else
1855 pool->literals[entry].bignum = NULL;
1856 }
1857
1858 exp->X_op = O_symbol;
1859 exp->X_add_number = ((int) entry) * size;
1860 exp->X_add_symbol = pool->symbol;
1861
1862 return true;
1863 }
1864
1865 /* Can't use symbol_new here, so have to create a symbol and then at
1866 a later date assign it a value. That's what these functions do. */
1867
1868 static void
1869 symbol_locate (symbolS * symbolP,
1870 const char *name,/* It is copied, the caller can modify. */
1871 segT segment, /* Segment identifier (SEG_<something>). */
1872 valueT valu, /* Symbol value. */
1873 fragS * frag) /* Associated fragment. */
1874 {
1875 size_t name_length;
1876 char *preserved_copy_of_name;
1877
1878 name_length = strlen (name) + 1; /* +1 for \0. */
1879 obstack_grow (&notes, name, name_length);
1880 preserved_copy_of_name = obstack_finish (&notes);
1881
1882 #ifdef tc_canonicalize_symbol_name
1883 preserved_copy_of_name =
1884 tc_canonicalize_symbol_name (preserved_copy_of_name);
1885 #endif
1886
1887 S_SET_NAME (symbolP, preserved_copy_of_name);
1888
1889 S_SET_SEGMENT (symbolP, segment);
1890 S_SET_VALUE (symbolP, valu);
1891 symbol_clear_list_pointers (symbolP);
1892
1893 symbol_set_frag (symbolP, frag);
1894
1895 /* Link to end of symbol chain. */
1896 {
1897 extern int symbol_table_frozen;
1898
1899 if (symbol_table_frozen)
1900 abort ();
1901 }
1902
1903 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1904
1905 obj_symbol_new_hook (symbolP);
1906
1907 #ifdef tc_symbol_new_hook
1908 tc_symbol_new_hook (symbolP);
1909 #endif
1910
1911 #ifdef DEBUG_SYMS
1912 verify_symbol_chain (symbol_rootP, symbol_lastP);
1913 #endif /* DEBUG_SYMS */
1914 }
1915
1916
1917 static void
1918 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1919 {
1920 unsigned int entry;
1921 literal_pool *pool;
1922 char sym_name[20];
1923 int align;
1924
1925 for (align = 2; align <= 4; align++)
1926 {
1927 int size = 1 << align;
1928
1929 pool = find_literal_pool (size);
1930 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1931 continue;
1932
1933 /* Align pool as you have word accesses.
1934 Only make a frag if we have to. */
1935 if (!need_pass_2)
1936 frag_align (align, 0, 0);
1937
1938 mapping_state (MAP_DATA);
1939
1940 record_alignment (now_seg, align);
1941
1942 sprintf (sym_name, "$$lit_\002%x", pool->id);
1943
1944 symbol_locate (pool->symbol, sym_name, now_seg,
1945 (valueT) frag_now_fix (), frag_now);
1946 symbol_table_insert (pool->symbol);
1947
1948 for (entry = 0; entry < pool->next_free_entry; entry++)
1949 {
1950 expressionS * exp = & pool->literals[entry].exp;
1951
1952 if (exp->X_op == O_big)
1953 {
1954 /* PR 16688: Restore the global bignum value. */
1955 gas_assert (pool->literals[entry].bignum != NULL);
1956 memcpy (generic_bignum, pool->literals[entry].bignum,
1957 CHARS_PER_LITTLENUM * exp->X_add_number);
1958 }
1959
1960 /* First output the expression in the instruction to the pool. */
1961 emit_expr (exp, size); /* .word|.xword */
1962
1963 if (exp->X_op == O_big)
1964 {
1965 free (pool->literals[entry].bignum);
1966 pool->literals[entry].bignum = NULL;
1967 }
1968 }
1969
1970 /* Mark the pool as empty. */
1971 pool->next_free_entry = 0;
1972 pool->symbol = NULL;
1973 }
1974 }
1975
1976 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1977 /* Forward declarations for functions below, in the MD interface
1978 section. */
1979 static struct reloc_table_entry * find_reloc_table_entry (char **);
1980
1981 /* Directives: Data. */
1982 /* N.B. the support for relocation suffix in this directive needs to be
1983 implemented properly. */
1984
1985 static void
1986 s_aarch64_cons (int nbytes)
1987 {
1988 expressionS exp;
1989
1990 #ifdef md_flush_pending_output
1991 md_flush_pending_output ();
1992 #endif
1993
1994 if (is_it_end_of_statement ())
1995 {
1996 demand_empty_rest_of_line ();
1997 return;
1998 }
1999
2000 #ifdef md_cons_align
2001 md_cons_align (nbytes);
2002 #endif
2003
2004 mapping_state (MAP_DATA);
2005 do
2006 {
2007 struct reloc_table_entry *reloc;
2008
2009 expression (&exp);
2010
2011 if (exp.X_op != O_symbol)
2012 emit_expr (&exp, (unsigned int) nbytes);
2013 else
2014 {
2015 skip_past_char (&input_line_pointer, '#');
2016 if (skip_past_char (&input_line_pointer, ':'))
2017 {
2018 reloc = find_reloc_table_entry (&input_line_pointer);
2019 if (reloc == NULL)
2020 as_bad (_("unrecognized relocation suffix"));
2021 else
2022 as_bad (_("unimplemented relocation suffix"));
2023 ignore_rest_of_line ();
2024 return;
2025 }
2026 else
2027 emit_expr (&exp, (unsigned int) nbytes);
2028 }
2029 }
2030 while (*input_line_pointer++ == ',');
2031
2032 /* Put terminator back into stream. */
2033 input_line_pointer--;
2034 demand_empty_rest_of_line ();
2035 }
2036 #endif
2037
2038 #ifdef OBJ_ELF
2039 /* Forward declarations for functions below, in the MD interface
2040 section. */
2041 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2042
2043 /* Mark symbol that it follows a variant PCS convention. */
2044
2045 static void
2046 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2047 {
2048 char *name;
2049 char c;
2050 symbolS *sym;
2051 asymbol *bfdsym;
2052 elf_symbol_type *elfsym;
2053
2054 c = get_symbol_name (&name);
2055 if (!*name)
2056 as_bad (_("Missing symbol name in directive"));
2057 sym = symbol_find_or_make (name);
2058 restore_line_pointer (c);
2059 demand_empty_rest_of_line ();
2060 bfdsym = symbol_get_bfdsym (sym);
2061 elfsym = elf_symbol_from (bfdsym);
2062 gas_assert (elfsym);
2063 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2064 }
2065 #endif /* OBJ_ELF */
2066
2067 /* Output a 32-bit word, but mark as an instruction. */
2068
2069 static void
2070 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2071 {
2072 expressionS exp;
2073 unsigned n = 0;
2074
2075 #ifdef md_flush_pending_output
2076 md_flush_pending_output ();
2077 #endif
2078
2079 if (is_it_end_of_statement ())
2080 {
2081 demand_empty_rest_of_line ();
2082 return;
2083 }
2084
2085 /* Sections are assumed to start aligned. In executable section, there is no
2086 MAP_DATA symbol pending. So we only align the address during
2087 MAP_DATA --> MAP_INSN transition.
2088 For other sections, this is not guaranteed. */
2089 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2090 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2091 frag_align_code (2, 0);
2092
2093 #ifdef OBJ_ELF
2094 mapping_state (MAP_INSN);
2095 #endif
2096
2097 do
2098 {
2099 expression (&exp);
2100 if (exp.X_op != O_constant)
2101 {
2102 as_bad (_("constant expression required"));
2103 ignore_rest_of_line ();
2104 return;
2105 }
2106
2107 if (target_big_endian)
2108 {
2109 unsigned int val = exp.X_add_number;
2110 exp.X_add_number = SWAP_32 (val);
2111 }
2112 emit_expr (&exp, INSN_SIZE);
2113 ++n;
2114 }
2115 while (*input_line_pointer++ == ',');
2116
2117 dwarf2_emit_insn (n * INSN_SIZE);
2118
2119 /* Put terminator back into stream. */
2120 input_line_pointer--;
2121 demand_empty_rest_of_line ();
2122 }
2123
2124 static void
2125 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2126 {
2127 demand_empty_rest_of_line ();
2128 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2129 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2130 }
2131
2132 #ifdef OBJ_ELF
2133 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2134
2135 static void
2136 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2137 {
2138 expressionS exp;
2139
2140 expression (&exp);
2141 frag_grow (4);
2142 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2143 BFD_RELOC_AARCH64_TLSDESC_ADD);
2144
2145 demand_empty_rest_of_line ();
2146 }
2147
2148 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2149
2150 static void
2151 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2152 {
2153 expressionS exp;
2154
2155 /* Since we're just labelling the code, there's no need to define a
2156 mapping symbol. */
2157 expression (&exp);
2158 /* Make sure there is enough room in this frag for the following
2159 blr. This trick only works if the blr follows immediately after
2160 the .tlsdesc directive. */
2161 frag_grow (4);
2162 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2163 BFD_RELOC_AARCH64_TLSDESC_CALL);
2164
2165 demand_empty_rest_of_line ();
2166 }
2167
2168 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2169
2170 static void
2171 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2172 {
2173 expressionS exp;
2174
2175 expression (&exp);
2176 frag_grow (4);
2177 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2178 BFD_RELOC_AARCH64_TLSDESC_LDR);
2179
2180 demand_empty_rest_of_line ();
2181 }
2182 #endif /* OBJ_ELF */
2183
2184 #ifdef TE_PE
2185 static void
2186 s_secrel (int dummy ATTRIBUTE_UNUSED)
2187 {
2188 expressionS exp;
2189
2190 do
2191 {
2192 expression (&exp);
2193 if (exp.X_op == O_symbol)
2194 exp.X_op = O_secrel;
2195
2196 emit_expr (&exp, 4);
2197 }
2198 while (*input_line_pointer++ == ',');
2199
2200 input_line_pointer--;
2201 demand_empty_rest_of_line ();
2202 }
2203
2204 void
2205 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2206 {
2207 expressionS exp;
2208
2209 exp.X_op = O_secrel;
2210 exp.X_add_symbol = symbol;
2211 exp.X_add_number = 0;
2212 emit_expr (&exp, size);
2213 }
2214
2215 static void
2216 s_secidx (int dummy ATTRIBUTE_UNUSED)
2217 {
2218 expressionS exp;
2219
2220 do
2221 {
2222 expression (&exp);
2223 if (exp.X_op == O_symbol)
2224 exp.X_op = O_secidx;
2225
2226 emit_expr (&exp, 2);
2227 }
2228 while (*input_line_pointer++ == ',');
2229
2230 input_line_pointer--;
2231 demand_empty_rest_of_line ();
2232 }
2233 #endif /* TE_PE */
2234
2235 static void s_aarch64_arch (int);
2236 static void s_aarch64_cpu (int);
2237 static void s_aarch64_arch_extension (int);
2238
2239 /* This table describes all the machine specific pseudo-ops the assembler
2240 has to support. The fields are:
2241 pseudo-op name without dot
2242 function to call to execute this pseudo-op
2243 Integer arg to pass to the function. */
2244
2245 const pseudo_typeS md_pseudo_table[] = {
2246 /* Never called because '.req' does not start a line. */
2247 {"req", s_req, 0},
2248 {"unreq", s_unreq, 0},
2249 {"bss", s_bss, 0},
2250 {"even", s_even, 0},
2251 {"ltorg", s_ltorg, 0},
2252 {"pool", s_ltorg, 0},
2253 {"cpu", s_aarch64_cpu, 0},
2254 {"arch", s_aarch64_arch, 0},
2255 {"arch_extension", s_aarch64_arch_extension, 0},
2256 {"inst", s_aarch64_inst, 0},
2257 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2258 #ifdef OBJ_ELF
2259 {"tlsdescadd", s_tlsdescadd, 0},
2260 {"tlsdesccall", s_tlsdesccall, 0},
2261 {"tlsdescldr", s_tlsdescldr, 0},
2262 {"variant_pcs", s_variant_pcs, 0},
2263 #endif
2264 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2265 {"word", s_aarch64_cons, 4},
2266 {"long", s_aarch64_cons, 4},
2267 {"xword", s_aarch64_cons, 8},
2268 {"dword", s_aarch64_cons, 8},
2269 #endif
2270 #ifdef TE_PE
2271 {"secrel32", s_secrel, 0},
2272 {"secidx", s_secidx, 0},
2273 #endif
2274 {"float16", float_cons, 'h'},
2275 {"bfloat16", float_cons, 'b'},
2276 {0, 0, 0}
2277 };
2278 \f
2279
2280 /* Check whether STR points to a register name followed by a comma or the
2281 end of line; REG_TYPE indicates which register types are checked
2282 against. Return TRUE if STR is such a register name; otherwise return
2283 FALSE. The function does not intend to produce any diagnostics, but since
2284 the register parser aarch64_reg_parse, which is called by this function,
2285 does produce diagnostics, we call clear_error to clear any diagnostics
2286 that may be generated by aarch64_reg_parse.
2287 Also, the function returns FALSE directly if there is any user error
2288 present at the function entry. This prevents the existing diagnostics
2289 state from being spoiled.
2290 The function currently serves parse_constant_immediate and
2291 parse_big_immediate only. */
2292 static bool
2293 reg_name_p (char *str, aarch64_reg_type reg_type)
2294 {
2295 const reg_entry *reg;
2296
2297 /* Prevent the diagnostics state from being spoiled. */
2298 if (error_p ())
2299 return false;
2300
2301 reg = aarch64_reg_parse (&str, reg_type, NULL);
2302
2303 /* Clear the parsing error that may be set by the reg parser. */
2304 clear_error ();
2305
2306 if (!reg)
2307 return false;
2308
2309 skip_whitespace (str);
2310 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2311 return true;
2312
2313 return false;
2314 }
2315
2316 /* Parser functions used exclusively in instruction operands. */
2317
2318 /* Parse an immediate expression which may not be constant.
2319
2320 To prevent the expression parser from pushing a register name
2321 into the symbol table as an undefined symbol, firstly a check is
2322 done to find out whether STR is a register of type REG_TYPE followed
2323 by a comma or the end of line. Return FALSE if STR is such a string. */
2324
2325 static bool
2326 parse_immediate_expression (char **str, expressionS *exp,
2327 aarch64_reg_type reg_type)
2328 {
2329 if (reg_name_p (*str, reg_type))
2330 {
2331 set_recoverable_error (_("immediate operand required"));
2332 return false;
2333 }
2334
2335 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2336
2337 if (exp->X_op == O_absent)
2338 {
2339 set_fatal_syntax_error (_("missing immediate expression"));
2340 return false;
2341 }
2342
2343 return true;
2344 }
2345
2346 /* Constant immediate-value read function for use in insn parsing.
2347 STR points to the beginning of the immediate (with the optional
2348 leading #); *VAL receives the value. REG_TYPE says which register
2349 names should be treated as registers rather than as symbolic immediates.
2350
2351 Return TRUE on success; otherwise return FALSE. */
2352
2353 static bool
2354 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2355 {
2356 expressionS exp;
2357
2358 if (! parse_immediate_expression (str, &exp, reg_type))
2359 return false;
2360
2361 if (exp.X_op != O_constant)
2362 {
2363 set_syntax_error (_("constant expression required"));
2364 return false;
2365 }
2366
2367 *val = exp.X_add_number;
2368 return true;
2369 }
2370
2371 static uint32_t
2372 encode_imm_float_bits (uint32_t imm)
2373 {
2374 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2375 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2376 }
2377
2378 /* Return TRUE if the single-precision floating-point value encoded in IMM
2379 can be expressed in the AArch64 8-bit signed floating-point format with
2380 3-bit exponent and normalized 4 bits of precision; in other words, the
2381 floating-point value must be expressable as
2382 (+/-) n / 16 * power (2, r)
2383 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2384
2385 static bool
2386 aarch64_imm_float_p (uint32_t imm)
2387 {
2388 /* If a single-precision floating-point value has the following bit
2389 pattern, it can be expressed in the AArch64 8-bit floating-point
2390 format:
2391
2392 3 32222222 2221111111111
2393 1 09876543 21098765432109876543210
2394 n Eeeeeexx xxxx0000000000000000000
2395
2396 where n, e and each x are either 0 or 1 independently, with
2397 E == ~ e. */
2398
2399 uint32_t pattern;
2400
2401 /* Prepare the pattern for 'Eeeeee'. */
2402 if (((imm >> 30) & 0x1) == 0)
2403 pattern = 0x3e000000;
2404 else
2405 pattern = 0x40000000;
2406
2407 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2408 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2409 }
2410
2411 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2412 as an IEEE float without any loss of precision. Store the value in
2413 *FPWORD if so. */
2414
2415 static bool
2416 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2417 {
2418 /* If a double-precision floating-point value has the following bit
2419 pattern, it can be expressed in a float:
2420
2421 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2422 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2423 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2424
2425 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2426 if Eeee_eeee != 1111_1111
2427
2428 where n, e, s and S are either 0 or 1 independently and where ~ is the
2429 inverse of E. */
2430
2431 uint32_t pattern;
2432 uint32_t high32 = imm >> 32;
2433 uint32_t low32 = imm;
2434
2435 /* Lower 29 bits need to be 0s. */
2436 if ((imm & 0x1fffffff) != 0)
2437 return false;
2438
2439 /* Prepare the pattern for 'Eeeeeeeee'. */
2440 if (((high32 >> 30) & 0x1) == 0)
2441 pattern = 0x38000000;
2442 else
2443 pattern = 0x40000000;
2444
2445 /* Check E~~~. */
2446 if ((high32 & 0x78000000) != pattern)
2447 return false;
2448
2449 /* Check Eeee_eeee != 1111_1111. */
2450 if ((high32 & 0x7ff00000) == 0x47f00000)
2451 return false;
2452
2453 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2454 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2455 | (low32 >> 29)); /* 3 S bits. */
2456 return true;
2457 }
2458
2459 /* Return true if we should treat OPERAND as a double-precision
2460 floating-point operand rather than a single-precision one. */
2461 static bool
2462 double_precision_operand_p (const aarch64_opnd_info *operand)
2463 {
2464 /* Check for unsuffixed SVE registers, which are allowed
2465 for LDR and STR but not in instructions that require an
2466 immediate. We get better error messages if we arbitrarily
2467 pick one size, parse the immediate normally, and then
2468 report the match failure in the normal way. */
2469 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2470 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2471 }
2472
2473 /* Parse a floating-point immediate. Return TRUE on success and return the
2474 value in *IMMED in the format of IEEE754 single-precision encoding.
2475 *CCP points to the start of the string; DP_P is TRUE when the immediate
2476 is expected to be in double-precision (N.B. this only matters when
2477 hexadecimal representation is involved). REG_TYPE says which register
2478 names should be treated as registers rather than as symbolic immediates.
2479
2480 This routine accepts any IEEE float; it is up to the callers to reject
2481 invalid ones. */
2482
2483 static bool
2484 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2485 aarch64_reg_type reg_type)
2486 {
2487 char *str = *ccp;
2488 char *fpnum;
2489 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2490 int64_t val = 0;
2491 unsigned fpword = 0;
2492 bool hex_p = false;
2493
2494 skip_past_char (&str, '#');
2495
2496 fpnum = str;
2497 skip_whitespace (fpnum);
2498
2499 if (startswith (fpnum, "0x"))
2500 {
2501 /* Support the hexadecimal representation of the IEEE754 encoding.
2502 Double-precision is expected when DP_P is TRUE, otherwise the
2503 representation should be in single-precision. */
2504 if (! parse_constant_immediate (&str, &val, reg_type))
2505 goto invalid_fp;
2506
2507 if (dp_p)
2508 {
2509 if (!can_convert_double_to_float (val, &fpword))
2510 goto invalid_fp;
2511 }
2512 else if ((uint64_t) val > 0xffffffff)
2513 goto invalid_fp;
2514 else
2515 fpword = val;
2516
2517 hex_p = true;
2518 }
2519 else if (reg_name_p (str, reg_type))
2520 {
2521 set_recoverable_error (_("immediate operand required"));
2522 return false;
2523 }
2524
2525 if (! hex_p)
2526 {
2527 int i;
2528
2529 if ((str = atof_ieee (str, 's', words)) == NULL)
2530 goto invalid_fp;
2531
2532 /* Our FP word must be 32 bits (single-precision FP). */
2533 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2534 {
2535 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2536 fpword |= words[i];
2537 }
2538 }
2539
2540 *immed = fpword;
2541 *ccp = str;
2542 return true;
2543
2544 invalid_fp:
2545 set_fatal_syntax_error (_("invalid floating-point constant"));
2546 return false;
2547 }
2548
2549 /* Less-generic immediate-value read function with the possibility of loading
2550 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2551 instructions.
2552
2553 To prevent the expression parser from pushing a register name into the
2554 symbol table as an undefined symbol, a check is firstly done to find
2555 out whether STR is a register of type REG_TYPE followed by a comma or
2556 the end of line. Return FALSE if STR is such a register. */
2557
2558 static bool
2559 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2560 {
2561 char *ptr = *str;
2562
2563 if (reg_name_p (ptr, reg_type))
2564 {
2565 set_syntax_error (_("immediate operand required"));
2566 return false;
2567 }
2568
2569 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2570
2571 if (inst.reloc.exp.X_op == O_constant)
2572 *imm = inst.reloc.exp.X_add_number;
2573
2574 *str = ptr;
2575
2576 return true;
2577 }
2578
2579 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2580 if NEED_LIBOPCODES is non-zero, the fixup will need
2581 assistance from the libopcodes. */
2582
2583 static inline void
2584 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2585 const aarch64_opnd_info *operand,
2586 int need_libopcodes_p)
2587 {
2588 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2589 reloc->opnd = operand->type;
2590 if (need_libopcodes_p)
2591 reloc->need_libopcodes_p = 1;
2592 };
2593
2594 /* Return TRUE if the instruction needs to be fixed up later internally by
2595 the GAS; otherwise return FALSE. */
2596
2597 static inline bool
2598 aarch64_gas_internal_fixup_p (void)
2599 {
2600 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2601 }
2602
2603 /* Assign the immediate value to the relevant field in *OPERAND if
2604 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2605 needs an internal fixup in a later stage.
2606 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2607 IMM.VALUE that may get assigned with the constant. */
2608 static inline void
2609 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2610 aarch64_opnd_info *operand,
2611 int addr_off_p,
2612 int need_libopcodes_p,
2613 int skip_p)
2614 {
2615 if (reloc->exp.X_op == O_constant)
2616 {
2617 if (addr_off_p)
2618 operand->addr.offset.imm = reloc->exp.X_add_number;
2619 else
2620 operand->imm.value = reloc->exp.X_add_number;
2621 reloc->type = BFD_RELOC_UNUSED;
2622 }
2623 else
2624 {
2625 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2626 /* Tell libopcodes to ignore this operand or not. This is helpful
2627 when one of the operands needs to be fixed up later but we need
2628 libopcodes to check the other operands. */
2629 operand->skip = skip_p;
2630 }
2631 }
2632
2633 /* Relocation modifiers. Each entry in the table contains the textual
2634 name for the relocation which may be placed before a symbol used as
2635 a load/store offset, or add immediate. It must be surrounded by a
2636 leading and trailing colon, for example:
2637
2638 ldr x0, [x1, #:rello:varsym]
2639 add x0, x1, #:rello:varsym */
2640
2641 struct reloc_table_entry
2642 {
2643 const char *name;
2644 int pc_rel;
2645 bfd_reloc_code_real_type adr_type;
2646 bfd_reloc_code_real_type adrp_type;
2647 bfd_reloc_code_real_type movw_type;
2648 bfd_reloc_code_real_type add_type;
2649 bfd_reloc_code_real_type ldst_type;
2650 bfd_reloc_code_real_type ld_literal_type;
2651 };
2652
2653 static struct reloc_table_entry reloc_table[] =
2654 {
2655 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2656 {"lo12", 0,
2657 0, /* adr_type */
2658 0,
2659 0,
2660 BFD_RELOC_AARCH64_ADD_LO12,
2661 BFD_RELOC_AARCH64_LDST_LO12,
2662 0},
2663
2664 /* Higher 21 bits of pc-relative page offset: ADRP */
2665 {"pg_hi21", 1,
2666 0, /* adr_type */
2667 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2668 0,
2669 0,
2670 0,
2671 0},
2672
2673 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2674 {"pg_hi21_nc", 1,
2675 0, /* adr_type */
2676 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2677 0,
2678 0,
2679 0,
2680 0},
2681
2682 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2683 {"abs_g0", 0,
2684 0, /* adr_type */
2685 0,
2686 BFD_RELOC_AARCH64_MOVW_G0,
2687 0,
2688 0,
2689 0},
2690
2691 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2692 {"abs_g0_s", 0,
2693 0, /* adr_type */
2694 0,
2695 BFD_RELOC_AARCH64_MOVW_G0_S,
2696 0,
2697 0,
2698 0},
2699
2700 /* Less significant bits 0-15 of address/value: MOVK, no check */
2701 {"abs_g0_nc", 0,
2702 0, /* adr_type */
2703 0,
2704 BFD_RELOC_AARCH64_MOVW_G0_NC,
2705 0,
2706 0,
2707 0},
2708
2709 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2710 {"abs_g1", 0,
2711 0, /* adr_type */
2712 0,
2713 BFD_RELOC_AARCH64_MOVW_G1,
2714 0,
2715 0,
2716 0},
2717
2718 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2719 {"abs_g1_s", 0,
2720 0, /* adr_type */
2721 0,
2722 BFD_RELOC_AARCH64_MOVW_G1_S,
2723 0,
2724 0,
2725 0},
2726
2727 /* Less significant bits 16-31 of address/value: MOVK, no check */
2728 {"abs_g1_nc", 0,
2729 0, /* adr_type */
2730 0,
2731 BFD_RELOC_AARCH64_MOVW_G1_NC,
2732 0,
2733 0,
2734 0},
2735
2736 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2737 {"abs_g2", 0,
2738 0, /* adr_type */
2739 0,
2740 BFD_RELOC_AARCH64_MOVW_G2,
2741 0,
2742 0,
2743 0},
2744
2745 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2746 {"abs_g2_s", 0,
2747 0, /* adr_type */
2748 0,
2749 BFD_RELOC_AARCH64_MOVW_G2_S,
2750 0,
2751 0,
2752 0},
2753
2754 /* Less significant bits 32-47 of address/value: MOVK, no check */
2755 {"abs_g2_nc", 0,
2756 0, /* adr_type */
2757 0,
2758 BFD_RELOC_AARCH64_MOVW_G2_NC,
2759 0,
2760 0,
2761 0},
2762
2763 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2764 {"abs_g3", 0,
2765 0, /* adr_type */
2766 0,
2767 BFD_RELOC_AARCH64_MOVW_G3,
2768 0,
2769 0,
2770 0},
2771
2772 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2773 {"prel_g0", 1,
2774 0, /* adr_type */
2775 0,
2776 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2777 0,
2778 0,
2779 0},
2780
2781 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2782 {"prel_g0_nc", 1,
2783 0, /* adr_type */
2784 0,
2785 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2786 0,
2787 0,
2788 0},
2789
2790 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2791 {"prel_g1", 1,
2792 0, /* adr_type */
2793 0,
2794 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2795 0,
2796 0,
2797 0},
2798
2799 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2800 {"prel_g1_nc", 1,
2801 0, /* adr_type */
2802 0,
2803 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2804 0,
2805 0,
2806 0},
2807
2808 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2809 {"prel_g2", 1,
2810 0, /* adr_type */
2811 0,
2812 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2813 0,
2814 0,
2815 0},
2816
2817 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2818 {"prel_g2_nc", 1,
2819 0, /* adr_type */
2820 0,
2821 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2822 0,
2823 0,
2824 0},
2825
2826 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2827 {"prel_g3", 1,
2828 0, /* adr_type */
2829 0,
2830 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2831 0,
2832 0,
2833 0},
2834
2835 /* Get to the page containing GOT entry for a symbol. */
2836 {"got", 1,
2837 0, /* adr_type */
2838 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2839 0,
2840 0,
2841 0,
2842 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2843
2844 /* 12 bit offset into the page containing GOT entry for that symbol. */
2845 {"got_lo12", 0,
2846 0, /* adr_type */
2847 0,
2848 0,
2849 0,
2850 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2851 0},
2852
2853 /* 0-15 bits of address/value: MOVk, no check. */
2854 {"gotoff_g0_nc", 0,
2855 0, /* adr_type */
2856 0,
2857 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2858 0,
2859 0,
2860 0},
2861
2862 /* Most significant bits 16-31 of address/value: MOVZ. */
2863 {"gotoff_g1", 0,
2864 0, /* adr_type */
2865 0,
2866 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2867 0,
2868 0,
2869 0},
2870
2871 /* 15 bit offset into the page containing GOT entry for that symbol. */
2872 {"gotoff_lo15", 0,
2873 0, /* adr_type */
2874 0,
2875 0,
2876 0,
2877 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2878 0},
2879
2880 /* Get to the page containing GOT TLS entry for a symbol */
2881 {"gottprel_g0_nc", 0,
2882 0, /* adr_type */
2883 0,
2884 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2885 0,
2886 0,
2887 0},
2888
2889 /* Get to the page containing GOT TLS entry for a symbol */
2890 {"gottprel_g1", 0,
2891 0, /* adr_type */
2892 0,
2893 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2894 0,
2895 0,
2896 0},
2897
2898 /* Get to the page containing GOT TLS entry for a symbol */
2899 {"tlsgd", 0,
2900 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2901 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2902 0,
2903 0,
2904 0,
2905 0},
2906
2907 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2908 {"tlsgd_lo12", 0,
2909 0, /* adr_type */
2910 0,
2911 0,
2912 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2913 0,
2914 0},
2915
2916 /* Lower 16 bits address/value: MOVk. */
2917 {"tlsgd_g0_nc", 0,
2918 0, /* adr_type */
2919 0,
2920 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2921 0,
2922 0,
2923 0},
2924
2925 /* Most significant bits 16-31 of address/value: MOVZ. */
2926 {"tlsgd_g1", 0,
2927 0, /* adr_type */
2928 0,
2929 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2930 0,
2931 0,
2932 0},
2933
2934 /* Get to the page containing GOT TLS entry for a symbol */
2935 {"tlsdesc", 0,
2936 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2937 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2938 0,
2939 0,
2940 0,
2941 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2942
2943 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2944 {"tlsdesc_lo12", 0,
2945 0, /* adr_type */
2946 0,
2947 0,
2948 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2949 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2950 0},
2951
2952 /* Get to the page containing GOT TLS entry for a symbol.
2953 The same as GD, we allocate two consecutive GOT slots
2954 for module index and module offset, the only difference
2955 with GD is the module offset should be initialized to
2956 zero without any outstanding runtime relocation. */
2957 {"tlsldm", 0,
2958 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2959 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2960 0,
2961 0,
2962 0,
2963 0},
2964
2965 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2966 {"tlsldm_lo12_nc", 0,
2967 0, /* adr_type */
2968 0,
2969 0,
2970 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2971 0,
2972 0},
2973
2974 /* 12 bit offset into the module TLS base address. */
2975 {"dtprel_lo12", 0,
2976 0, /* adr_type */
2977 0,
2978 0,
2979 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2980 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2981 0},
2982
2983 /* Same as dtprel_lo12, no overflow check. */
2984 {"dtprel_lo12_nc", 0,
2985 0, /* adr_type */
2986 0,
2987 0,
2988 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2989 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2990 0},
2991
2992 /* bits[23:12] of offset to the module TLS base address. */
2993 {"dtprel_hi12", 0,
2994 0, /* adr_type */
2995 0,
2996 0,
2997 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2998 0,
2999 0},
3000
3001 /* bits[15:0] of offset to the module TLS base address. */
3002 {"dtprel_g0", 0,
3003 0, /* adr_type */
3004 0,
3005 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3006 0,
3007 0,
3008 0},
3009
3010 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3011 {"dtprel_g0_nc", 0,
3012 0, /* adr_type */
3013 0,
3014 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3015 0,
3016 0,
3017 0},
3018
3019 /* bits[31:16] of offset to the module TLS base address. */
3020 {"dtprel_g1", 0,
3021 0, /* adr_type */
3022 0,
3023 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3024 0,
3025 0,
3026 0},
3027
3028 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3029 {"dtprel_g1_nc", 0,
3030 0, /* adr_type */
3031 0,
3032 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3033 0,
3034 0,
3035 0},
3036
3037 /* bits[47:32] of offset to the module TLS base address. */
3038 {"dtprel_g2", 0,
3039 0, /* adr_type */
3040 0,
3041 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3042 0,
3043 0,
3044 0},
3045
3046 /* Lower 16 bit offset into GOT entry for a symbol */
3047 {"tlsdesc_off_g0_nc", 0,
3048 0, /* adr_type */
3049 0,
3050 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3051 0,
3052 0,
3053 0},
3054
3055 /* Higher 16 bit offset into GOT entry for a symbol */
3056 {"tlsdesc_off_g1", 0,
3057 0, /* adr_type */
3058 0,
3059 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3060 0,
3061 0,
3062 0},
3063
3064 /* Get to the page containing GOT TLS entry for a symbol */
3065 {"gottprel", 0,
3066 0, /* adr_type */
3067 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3068 0,
3069 0,
3070 0,
3071 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3072
3073 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3074 {"gottprel_lo12", 0,
3075 0, /* adr_type */
3076 0,
3077 0,
3078 0,
3079 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3080 0},
3081
3082 /* Get tp offset for a symbol. */
3083 {"tprel", 0,
3084 0, /* adr_type */
3085 0,
3086 0,
3087 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3088 0,
3089 0},
3090
3091 /* Get tp offset for a symbol. */
3092 {"tprel_lo12", 0,
3093 0, /* adr_type */
3094 0,
3095 0,
3096 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3097 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3098 0},
3099
3100 /* Get tp offset for a symbol. */
3101 {"tprel_hi12", 0,
3102 0, /* adr_type */
3103 0,
3104 0,
3105 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3106 0,
3107 0},
3108
3109 /* Get tp offset for a symbol. */
3110 {"tprel_lo12_nc", 0,
3111 0, /* adr_type */
3112 0,
3113 0,
3114 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3115 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3116 0},
3117
3118 /* Most significant bits 32-47 of address/value: MOVZ. */
3119 {"tprel_g2", 0,
3120 0, /* adr_type */
3121 0,
3122 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3123 0,
3124 0,
3125 0},
3126
3127 /* Most significant bits 16-31 of address/value: MOVZ. */
3128 {"tprel_g1", 0,
3129 0, /* adr_type */
3130 0,
3131 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3132 0,
3133 0,
3134 0},
3135
3136 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3137 {"tprel_g1_nc", 0,
3138 0, /* adr_type */
3139 0,
3140 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3141 0,
3142 0,
3143 0},
3144
3145 /* Most significant bits 0-15 of address/value: MOVZ. */
3146 {"tprel_g0", 0,
3147 0, /* adr_type */
3148 0,
3149 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3150 0,
3151 0,
3152 0},
3153
3154 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3155 {"tprel_g0_nc", 0,
3156 0, /* adr_type */
3157 0,
3158 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3159 0,
3160 0,
3161 0},
3162
3163 /* 15bit offset from got entry to base address of GOT table. */
3164 {"gotpage_lo15", 0,
3165 0,
3166 0,
3167 0,
3168 0,
3169 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3170 0},
3171
3172 /* 14bit offset from got entry to base address of GOT table. */
3173 {"gotpage_lo14", 0,
3174 0,
3175 0,
3176 0,
3177 0,
3178 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3179 0},
3180 };
3181
3182 /* Given the address of a pointer pointing to the textual name of a
3183 relocation as may appear in assembler source, attempt to find its
3184 details in reloc_table. The pointer will be updated to the character
3185 after the trailing colon. On failure, NULL will be returned;
3186 otherwise return the reloc_table_entry. */
3187
3188 static struct reloc_table_entry *
3189 find_reloc_table_entry (char **str)
3190 {
3191 unsigned int i;
3192 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3193 {
3194 int length = strlen (reloc_table[i].name);
3195
3196 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3197 && (*str)[length] == ':')
3198 {
3199 *str += (length + 1);
3200 return &reloc_table[i];
3201 }
3202 }
3203
3204 return NULL;
3205 }
3206
3207 /* Returns 0 if the relocation should never be forced,
3208 1 if the relocation must be forced, and -1 if either
3209 result is OK. */
3210
3211 static signed int
3212 aarch64_force_reloc (unsigned int type)
3213 {
3214 switch (type)
3215 {
3216 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3217 /* Perform these "immediate" internal relocations
3218 even if the symbol is extern or weak. */
3219 return 0;
3220
3221 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3222 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3223 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3224 /* Pseudo relocs that need to be fixed up according to
3225 ilp32_p. */
3226 return 1;
3227
3228 case BFD_RELOC_AARCH64_ADD_LO12:
3229 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3230 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3231 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3232 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3233 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3234 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3235 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3236 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3237 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3238 case BFD_RELOC_AARCH64_LDST128_LO12:
3239 case BFD_RELOC_AARCH64_LDST16_LO12:
3240 case BFD_RELOC_AARCH64_LDST32_LO12:
3241 case BFD_RELOC_AARCH64_LDST64_LO12:
3242 case BFD_RELOC_AARCH64_LDST8_LO12:
3243 case BFD_RELOC_AARCH64_LDST_LO12:
3244 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3245 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3246 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3247 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3248 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3249 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3250 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3251 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3252 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3253 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3254 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3255 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3256 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3257 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3258 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3259 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3260 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3261 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3262 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3263 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3264 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3265 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3266 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3267 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3268 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3269 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3270 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3271 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3272 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3273 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3274 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3275 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3276 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3277 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3278 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3279 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3280 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3281 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3282 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3283 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3284 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3285 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3286 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3287 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3288 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3289 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3290 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3291 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3292 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3293 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3294 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3295 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3296 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3297 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3298 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3302 /* Always leave these relocations for the linker. */
3303 return 1;
3304
3305 default:
3306 return -1;
3307 }
3308 }
3309
3310 int
3311 aarch64_force_relocation (struct fix *fixp)
3312 {
3313 int res = aarch64_force_reloc (fixp->fx_r_type);
3314
3315 if (res == -1)
3316 return generic_force_reloc (fixp);
3317 return res;
3318 }
3319
3320 /* Mode argument to parse_shift and parser_shifter_operand. */
3321 enum parse_shift_mode
3322 {
3323 SHIFTED_NONE, /* no shifter allowed */
3324 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3325 "#imm{,lsl #n}" */
3326 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3327 "#imm" */
3328 SHIFTED_LSL, /* bare "lsl #n" */
3329 SHIFTED_MUL, /* bare "mul #n" */
3330 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3331 SHIFTED_MUL_VL, /* "mul vl" */
3332 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3333 };
3334
3335 /* Parse a <shift> operator on an AArch64 data processing instruction.
3336 Return TRUE on success; otherwise return FALSE. */
3337 static bool
3338 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3339 {
3340 const struct aarch64_name_value_pair *shift_op;
3341 enum aarch64_modifier_kind kind;
3342 expressionS exp;
3343 int exp_has_prefix;
3344 char *s = *str;
3345 char *p = s;
3346
3347 for (p = *str; ISALPHA (*p); p++)
3348 ;
3349
3350 if (p == *str)
3351 {
3352 set_syntax_error (_("shift expression expected"));
3353 return false;
3354 }
3355
3356 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3357
3358 if (shift_op == NULL)
3359 {
3360 set_syntax_error (_("shift operator expected"));
3361 return false;
3362 }
3363
3364 kind = aarch64_get_operand_modifier (shift_op);
3365
3366 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3367 {
3368 set_syntax_error (_("invalid use of 'MSL'"));
3369 return false;
3370 }
3371
3372 if (kind == AARCH64_MOD_MUL
3373 && mode != SHIFTED_MUL
3374 && mode != SHIFTED_MUL_VL)
3375 {
3376 set_syntax_error (_("invalid use of 'MUL'"));
3377 return false;
3378 }
3379
3380 switch (mode)
3381 {
3382 case SHIFTED_LOGIC_IMM:
3383 if (aarch64_extend_operator_p (kind))
3384 {
3385 set_syntax_error (_("extending shift is not permitted"));
3386 return false;
3387 }
3388 break;
3389
3390 case SHIFTED_ARITH_IMM:
3391 if (kind == AARCH64_MOD_ROR)
3392 {
3393 set_syntax_error (_("'ROR' shift is not permitted"));
3394 return false;
3395 }
3396 break;
3397
3398 case SHIFTED_LSL:
3399 if (kind != AARCH64_MOD_LSL)
3400 {
3401 set_syntax_error (_("only 'LSL' shift is permitted"));
3402 return false;
3403 }
3404 break;
3405
3406 case SHIFTED_MUL:
3407 if (kind != AARCH64_MOD_MUL)
3408 {
3409 set_syntax_error (_("only 'MUL' is permitted"));
3410 return false;
3411 }
3412 break;
3413
3414 case SHIFTED_MUL_VL:
3415 /* "MUL VL" consists of two separate tokens. Require the first
3416 token to be "MUL" and look for a following "VL". */
3417 if (kind == AARCH64_MOD_MUL)
3418 {
3419 skip_whitespace (p);
3420 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3421 {
3422 p += 2;
3423 kind = AARCH64_MOD_MUL_VL;
3424 break;
3425 }
3426 }
3427 set_syntax_error (_("only 'MUL VL' is permitted"));
3428 return false;
3429
3430 case SHIFTED_REG_OFFSET:
3431 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3432 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3433 {
3434 set_fatal_syntax_error
3435 (_("invalid shift for the register offset addressing mode"));
3436 return false;
3437 }
3438 break;
3439
3440 case SHIFTED_LSL_MSL:
3441 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3442 {
3443 set_syntax_error (_("invalid shift operator"));
3444 return false;
3445 }
3446 break;
3447
3448 default:
3449 abort ();
3450 }
3451
3452 /* Whitespace can appear here if the next thing is a bare digit. */
3453 skip_whitespace (p);
3454
3455 /* Parse shift amount. */
3456 exp_has_prefix = 0;
3457 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3458 exp.X_op = O_absent;
3459 else
3460 {
3461 if (is_immediate_prefix (*p))
3462 {
3463 p++;
3464 exp_has_prefix = 1;
3465 }
3466 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3467 }
3468 if (kind == AARCH64_MOD_MUL_VL)
3469 /* For consistency, give MUL VL the same shift amount as an implicit
3470 MUL #1. */
3471 operand->shifter.amount = 1;
3472 else if (exp.X_op == O_absent)
3473 {
3474 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3475 {
3476 set_syntax_error (_("missing shift amount"));
3477 return false;
3478 }
3479 operand->shifter.amount = 0;
3480 }
3481 else if (exp.X_op != O_constant)
3482 {
3483 set_syntax_error (_("constant shift amount required"));
3484 return false;
3485 }
3486 /* For parsing purposes, MUL #n has no inherent range. The range
3487 depends on the operand and will be checked by operand-specific
3488 routines. */
3489 else if (kind != AARCH64_MOD_MUL
3490 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3491 {
3492 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3493 return false;
3494 }
3495 else
3496 {
3497 operand->shifter.amount = exp.X_add_number;
3498 operand->shifter.amount_present = 1;
3499 }
3500
3501 operand->shifter.operator_present = 1;
3502 operand->shifter.kind = kind;
3503
3504 *str = p;
3505 return true;
3506 }
3507
3508 /* Parse a <shifter_operand> for a data processing instruction:
3509
3510 #<immediate>
3511 #<immediate>, LSL #imm
3512
3513 Validation of immediate operands is deferred to md_apply_fix.
3514
3515 Return TRUE on success; otherwise return FALSE. */
3516
3517 static bool
3518 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3519 enum parse_shift_mode mode)
3520 {
3521 char *p;
3522
3523 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3524 return false;
3525
3526 p = *str;
3527
3528 /* Accept an immediate expression. */
3529 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3530 REJECT_ABSENT))
3531 return false;
3532
3533 /* Accept optional LSL for arithmetic immediate values. */
3534 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3535 if (! parse_shift (&p, operand, SHIFTED_LSL))
3536 return false;
3537
3538 /* Not accept any shifter for logical immediate values. */
3539 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3540 && parse_shift (&p, operand, mode))
3541 {
3542 set_syntax_error (_("unexpected shift operator"));
3543 return false;
3544 }
3545
3546 *str = p;
3547 return true;
3548 }
3549
3550 /* Parse a <shifter_operand> for a data processing instruction:
3551
3552 <Rm>
3553 <Rm>, <shift>
3554 #<immediate>
3555 #<immediate>, LSL #imm
3556
3557 where <shift> is handled by parse_shift above, and the last two
3558 cases are handled by the function above.
3559
3560 Validation of immediate operands is deferred to md_apply_fix.
3561
3562 Return TRUE on success; otherwise return FALSE. */
3563
3564 static bool
3565 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3566 enum parse_shift_mode mode)
3567 {
3568 const reg_entry *reg;
3569 aarch64_opnd_qualifier_t qualifier;
3570 enum aarch64_operand_class opd_class
3571 = aarch64_get_operand_class (operand->type);
3572
3573 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3574 if (reg)
3575 {
3576 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3577 {
3578 set_syntax_error (_("unexpected register in the immediate operand"));
3579 return false;
3580 }
3581
3582 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3583 {
3584 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3585 return false;
3586 }
3587
3588 operand->reg.regno = reg->number;
3589 operand->qualifier = qualifier;
3590
3591 /* Accept optional shift operation on register. */
3592 if (! skip_past_comma (str))
3593 return true;
3594
3595 if (! parse_shift (str, operand, mode))
3596 return false;
3597
3598 return true;
3599 }
3600 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3601 {
3602 set_syntax_error
3603 (_("integer register expected in the extended/shifted operand "
3604 "register"));
3605 return false;
3606 }
3607
3608 /* We have a shifted immediate variable. */
3609 return parse_shifter_operand_imm (str, operand, mode);
3610 }
3611
3612 /* Return TRUE on success; return FALSE otherwise. */
3613
3614 static bool
3615 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3616 enum parse_shift_mode mode)
3617 {
3618 char *p = *str;
3619
3620 /* Determine if we have the sequence of characters #: or just :
3621 coming next. If we do, then we check for a :rello: relocation
3622 modifier. If we don't, punt the whole lot to
3623 parse_shifter_operand. */
3624
3625 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3626 {
3627 struct reloc_table_entry *entry;
3628
3629 if (p[0] == '#')
3630 p += 2;
3631 else
3632 p++;
3633 *str = p;
3634
3635 /* Try to parse a relocation. Anything else is an error. */
3636 if (!(entry = find_reloc_table_entry (str)))
3637 {
3638 set_syntax_error (_("unknown relocation modifier"));
3639 return false;
3640 }
3641
3642 if (entry->add_type == 0)
3643 {
3644 set_syntax_error
3645 (_("this relocation modifier is not allowed on this instruction"));
3646 return false;
3647 }
3648
3649 /* Save str before we decompose it. */
3650 p = *str;
3651
3652 /* Next, we parse the expression. */
3653 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3654 REJECT_ABSENT))
3655 return false;
3656
3657 /* Record the relocation type (use the ADD variant here). */
3658 inst.reloc.type = entry->add_type;
3659 inst.reloc.pc_rel = entry->pc_rel;
3660
3661 /* If str is empty, we've reached the end, stop here. */
3662 if (**str == '\0')
3663 return true;
3664
3665 /* Otherwise, we have a shifted reloc modifier, so rewind to
3666 recover the variable name and continue parsing for the shifter. */
3667 *str = p;
3668 return parse_shifter_operand_imm (str, operand, mode);
3669 }
3670
3671 return parse_shifter_operand (str, operand, mode);
3672 }
3673
3674 /* Parse all forms of an address expression. Information is written
3675 to *OPERAND and/or inst.reloc.
3676
3677 The A64 instruction set has the following addressing modes:
3678
3679 Offset
3680 [base] // in SIMD ld/st structure
3681 [base{,#0}] // in ld/st exclusive
3682 [base{,#imm}]
3683 [base,Xm{,LSL #imm}]
3684 [base,Xm,SXTX {#imm}]
3685 [base,Wm,(S|U)XTW {#imm}]
3686 Pre-indexed
3687 [base]! // in ldraa/ldrab exclusive
3688 [base,#imm]!
3689 Post-indexed
3690 [base],#imm
3691 [base],Xm // in SIMD ld/st structure
3692 PC-relative (literal)
3693 label
3694 SVE:
3695 [base,#imm,MUL VL]
3696 [base,Zm.D{,LSL #imm}]
3697 [base,Zm.S,(S|U)XTW {#imm}]
3698 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3699 [Zn.S,#imm]
3700 [Zn.D,#imm]
3701 [Zn.S{, Xm}]
3702 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3703 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3704 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3705
3706 (As a convenience, the notation "=immediate" is permitted in conjunction
3707 with the pc-relative literal load instructions to automatically place an
3708 immediate value or symbolic address in a nearby literal pool and generate
3709 a hidden label which references it.)
3710
3711 Upon a successful parsing, the address structure in *OPERAND will be
3712 filled in the following way:
3713
3714 .base_regno = <base>
3715 .offset.is_reg // 1 if the offset is a register
3716 .offset.imm = <imm>
3717 .offset.regno = <Rm>
3718
3719 For different addressing modes defined in the A64 ISA:
3720
3721 Offset
3722 .pcrel=0; .preind=1; .postind=0; .writeback=0
3723 Pre-indexed
3724 .pcrel=0; .preind=1; .postind=0; .writeback=1
3725 Post-indexed
3726 .pcrel=0; .preind=0; .postind=1; .writeback=1
3727 PC-relative (literal)
3728 .pcrel=1; .preind=1; .postind=0; .writeback=0
3729
3730 The shift/extension information, if any, will be stored in .shifter.
3731 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3732 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3733 corresponding register.
3734
3735 BASE_TYPE says which types of base register should be accepted and
3736 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3737 is the type of shifter that is allowed for immediate offsets,
3738 or SHIFTED_NONE if none.
3739
3740 In all other respects, it is the caller's responsibility to check
3741 for addressing modes not supported by the instruction, and to set
3742 inst.reloc.type. */
3743
3744 static bool
3745 parse_address_main (char **str, aarch64_opnd_info *operand,
3746 aarch64_opnd_qualifier_t *base_qualifier,
3747 aarch64_opnd_qualifier_t *offset_qualifier,
3748 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3749 enum parse_shift_mode imm_shift_mode)
3750 {
3751 char *p = *str;
3752 const reg_entry *reg;
3753 expressionS *exp = &inst.reloc.exp;
3754
3755 *base_qualifier = AARCH64_OPND_QLF_NIL;
3756 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3757 if (! skip_past_char (&p, '['))
3758 {
3759 /* =immediate or label. */
3760 operand->addr.pcrel = 1;
3761 operand->addr.preind = 1;
3762
3763 /* #:<reloc_op>:<symbol> */
3764 skip_past_char (&p, '#');
3765 if (skip_past_char (&p, ':'))
3766 {
3767 bfd_reloc_code_real_type ty;
3768 struct reloc_table_entry *entry;
3769
3770 /* Try to parse a relocation modifier. Anything else is
3771 an error. */
3772 entry = find_reloc_table_entry (&p);
3773 if (! entry)
3774 {
3775 set_syntax_error (_("unknown relocation modifier"));
3776 return false;
3777 }
3778
3779 switch (operand->type)
3780 {
3781 case AARCH64_OPND_ADDR_PCREL21:
3782 /* adr */
3783 ty = entry->adr_type;
3784 break;
3785
3786 default:
3787 ty = entry->ld_literal_type;
3788 break;
3789 }
3790
3791 if (ty == 0)
3792 {
3793 set_syntax_error
3794 (_("this relocation modifier is not allowed on this "
3795 "instruction"));
3796 return false;
3797 }
3798
3799 /* #:<reloc_op>: */
3800 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3801 {
3802 set_syntax_error (_("invalid relocation expression"));
3803 return false;
3804 }
3805 /* #:<reloc_op>:<expr> */
3806 /* Record the relocation type. */
3807 inst.reloc.type = ty;
3808 inst.reloc.pc_rel = entry->pc_rel;
3809 }
3810 else
3811 {
3812 if (skip_past_char (&p, '='))
3813 /* =immediate; need to generate the literal in the literal pool. */
3814 inst.gen_lit_pool = 1;
3815
3816 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3817 {
3818 set_syntax_error (_("invalid address"));
3819 return false;
3820 }
3821 }
3822
3823 *str = p;
3824 return true;
3825 }
3826
3827 /* [ */
3828
3829 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3830 if (!reg || !aarch64_check_reg_type (reg, base_type))
3831 {
3832 set_syntax_error (_(get_reg_expected_msg (base_type)));
3833 return false;
3834 }
3835 operand->addr.base_regno = reg->number;
3836
3837 /* [Xn */
3838 if (skip_past_comma (&p))
3839 {
3840 /* [Xn, */
3841 operand->addr.preind = 1;
3842
3843 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3844 if (reg)
3845 {
3846 if (!aarch64_check_reg_type (reg, offset_type))
3847 {
3848 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3849 return false;
3850 }
3851
3852 /* [Xn,Rm */
3853 operand->addr.offset.regno = reg->number;
3854 operand->addr.offset.is_reg = 1;
3855 /* Shifted index. */
3856 if (skip_past_comma (&p))
3857 {
3858 /* [Xn,Rm, */
3859 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3860 /* Use the diagnostics set in parse_shift, so not set new
3861 error message here. */
3862 return false;
3863 }
3864 /* We only accept:
3865 [base,Xm] # For vector plus scalar SVE2 indexing.
3866 [base,Xm{,LSL #imm}]
3867 [base,Xm,SXTX {#imm}]
3868 [base,Wm,(S|U)XTW {#imm}] */
3869 if (operand->shifter.kind == AARCH64_MOD_NONE
3870 || operand->shifter.kind == AARCH64_MOD_LSL
3871 || operand->shifter.kind == AARCH64_MOD_SXTX)
3872 {
3873 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3874 {
3875 set_syntax_error (_("invalid use of 32-bit register offset"));
3876 return false;
3877 }
3878 if (aarch64_get_qualifier_esize (*base_qualifier)
3879 != aarch64_get_qualifier_esize (*offset_qualifier)
3880 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3881 || *base_qualifier != AARCH64_OPND_QLF_S_S
3882 || *offset_qualifier != AARCH64_OPND_QLF_X))
3883 {
3884 set_syntax_error (_("offset has different size from base"));
3885 return false;
3886 }
3887 }
3888 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3889 {
3890 set_syntax_error (_("invalid use of 64-bit register offset"));
3891 return false;
3892 }
3893 }
3894 else
3895 {
3896 /* [Xn,#:<reloc_op>:<symbol> */
3897 skip_past_char (&p, '#');
3898 if (skip_past_char (&p, ':'))
3899 {
3900 struct reloc_table_entry *entry;
3901
3902 /* Try to parse a relocation modifier. Anything else is
3903 an error. */
3904 if (!(entry = find_reloc_table_entry (&p)))
3905 {
3906 set_syntax_error (_("unknown relocation modifier"));
3907 return false;
3908 }
3909
3910 if (entry->ldst_type == 0)
3911 {
3912 set_syntax_error
3913 (_("this relocation modifier is not allowed on this "
3914 "instruction"));
3915 return false;
3916 }
3917
3918 /* [Xn,#:<reloc_op>: */
3919 /* We now have the group relocation table entry corresponding to
3920 the name in the assembler source. Next, we parse the
3921 expression. */
3922 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3923 {
3924 set_syntax_error (_("invalid relocation expression"));
3925 return false;
3926 }
3927
3928 /* [Xn,#:<reloc_op>:<expr> */
3929 /* Record the load/store relocation type. */
3930 inst.reloc.type = entry->ldst_type;
3931 inst.reloc.pc_rel = entry->pc_rel;
3932 }
3933 else
3934 {
3935 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3936 {
3937 set_syntax_error (_("invalid expression in the address"));
3938 return false;
3939 }
3940 /* [Xn,<expr> */
3941 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3942 /* [Xn,<expr>,<shifter> */
3943 if (! parse_shift (&p, operand, imm_shift_mode))
3944 return false;
3945 }
3946 }
3947 }
3948
3949 if (! skip_past_char (&p, ']'))
3950 {
3951 set_syntax_error (_("']' expected"));
3952 return false;
3953 }
3954
3955 if (skip_past_char (&p, '!'))
3956 {
3957 if (operand->addr.preind && operand->addr.offset.is_reg)
3958 {
3959 set_syntax_error (_("register offset not allowed in pre-indexed "
3960 "addressing mode"));
3961 return false;
3962 }
3963 /* [Xn]! */
3964 operand->addr.writeback = 1;
3965 }
3966 else if (skip_past_comma (&p))
3967 {
3968 /* [Xn], */
3969 operand->addr.postind = 1;
3970 operand->addr.writeback = 1;
3971
3972 if (operand->addr.preind)
3973 {
3974 set_syntax_error (_("cannot combine pre- and post-indexing"));
3975 return false;
3976 }
3977
3978 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3979 if (reg)
3980 {
3981 /* [Xn],Xm */
3982 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3983 {
3984 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3985 return false;
3986 }
3987
3988 operand->addr.offset.regno = reg->number;
3989 operand->addr.offset.is_reg = 1;
3990 }
3991 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3992 {
3993 /* [Xn],#expr */
3994 set_syntax_error (_("invalid expression in the address"));
3995 return false;
3996 }
3997 }
3998
3999 /* If at this point neither .preind nor .postind is set, we have a
4000 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4001 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4002 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4003 [Zn.<T>, xzr]. */
4004 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4005 {
4006 if (operand->addr.writeback)
4007 {
4008 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4009 {
4010 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4011 operand->addr.offset.is_reg = 0;
4012 operand->addr.offset.imm = 0;
4013 operand->addr.preind = 1;
4014 }
4015 else
4016 {
4017 /* Reject [Rn]! */
4018 set_syntax_error (_("missing offset in the pre-indexed address"));
4019 return false;
4020 }
4021 }
4022 else
4023 {
4024 operand->addr.preind = 1;
4025 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4026 {
4027 operand->addr.offset.is_reg = 1;
4028 operand->addr.offset.regno = REG_ZR;
4029 *offset_qualifier = AARCH64_OPND_QLF_X;
4030 }
4031 else
4032 {
4033 inst.reloc.exp.X_op = O_constant;
4034 inst.reloc.exp.X_add_number = 0;
4035 }
4036 }
4037 }
4038
4039 *str = p;
4040 return true;
4041 }
4042
4043 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4044 on success. */
4045 static bool
4046 parse_address (char **str, aarch64_opnd_info *operand)
4047 {
4048 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4049 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4050 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4051 }
4052
4053 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4054 The arguments have the same meaning as for parse_address_main.
4055 Return TRUE on success. */
4056 static bool
4057 parse_sve_address (char **str, aarch64_opnd_info *operand,
4058 aarch64_opnd_qualifier_t *base_qualifier,
4059 aarch64_opnd_qualifier_t *offset_qualifier)
4060 {
4061 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4062 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4063 SHIFTED_MUL_VL);
4064 }
4065
4066 /* Parse a register X0-X30. The register must be 64-bit and register 31
4067 is unallocated. */
4068 static bool
4069 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4070 {
4071 const reg_entry *reg = parse_reg (str);
4072 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4073 {
4074 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
4075 return false;
4076 }
4077 operand->reg.regno = reg->number;
4078 operand->qualifier = AARCH64_OPND_QLF_X;
4079 return true;
4080 }
4081
4082 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4083 Return TRUE on success; otherwise return FALSE. */
4084 static bool
4085 parse_half (char **str, int *internal_fixup_p)
4086 {
4087 char *p = *str;
4088
4089 skip_past_char (&p, '#');
4090
4091 gas_assert (internal_fixup_p);
4092 *internal_fixup_p = 0;
4093
4094 if (*p == ':')
4095 {
4096 struct reloc_table_entry *entry;
4097
4098 /* Try to parse a relocation. Anything else is an error. */
4099 ++p;
4100
4101 if (!(entry = find_reloc_table_entry (&p)))
4102 {
4103 set_syntax_error (_("unknown relocation modifier"));
4104 return false;
4105 }
4106
4107 if (entry->movw_type == 0)
4108 {
4109 set_syntax_error
4110 (_("this relocation modifier is not allowed on this instruction"));
4111 return false;
4112 }
4113
4114 inst.reloc.type = entry->movw_type;
4115 }
4116 else
4117 *internal_fixup_p = 1;
4118
4119 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4120 return false;
4121
4122 *str = p;
4123 return true;
4124 }
4125
4126 /* Parse an operand for an ADRP instruction:
4127 ADRP <Xd>, <label>
4128 Return TRUE on success; otherwise return FALSE. */
4129
4130 static bool
4131 parse_adrp (char **str)
4132 {
4133 char *p;
4134
4135 p = *str;
4136 if (*p == ':')
4137 {
4138 struct reloc_table_entry *entry;
4139
4140 /* Try to parse a relocation. Anything else is an error. */
4141 ++p;
4142 if (!(entry = find_reloc_table_entry (&p)))
4143 {
4144 set_syntax_error (_("unknown relocation modifier"));
4145 return false;
4146 }
4147
4148 if (entry->adrp_type == 0)
4149 {
4150 set_syntax_error
4151 (_("this relocation modifier is not allowed on this instruction"));
4152 return false;
4153 }
4154
4155 inst.reloc.type = entry->adrp_type;
4156 }
4157 else
4158 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4159
4160 inst.reloc.pc_rel = 1;
4161 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4162 return false;
4163 *str = p;
4164 return true;
4165 }
4166
4167 /* Miscellaneous. */
4168
4169 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4170 of SIZE tokens in which index I gives the token for field value I,
4171 or is null if field value I is invalid. REG_TYPE says which register
4172 names should be treated as registers rather than as symbolic immediates.
4173
4174 Return true on success, moving *STR past the operand and storing the
4175 field value in *VAL. */
4176
4177 static int
4178 parse_enum_string (char **str, int64_t *val, const char *const *array,
4179 size_t size, aarch64_reg_type reg_type)
4180 {
4181 expressionS exp;
4182 char *p, *q;
4183 size_t i;
4184
4185 /* Match C-like tokens. */
4186 p = q = *str;
4187 while (ISALNUM (*q))
4188 q++;
4189
4190 for (i = 0; i < size; ++i)
4191 if (array[i]
4192 && strncasecmp (array[i], p, q - p) == 0
4193 && array[i][q - p] == 0)
4194 {
4195 *val = i;
4196 *str = q;
4197 return true;
4198 }
4199
4200 if (!parse_immediate_expression (&p, &exp, reg_type))
4201 return false;
4202
4203 if (exp.X_op == O_constant
4204 && (uint64_t) exp.X_add_number < size)
4205 {
4206 *val = exp.X_add_number;
4207 *str = p;
4208 return true;
4209 }
4210
4211 /* Use the default error for this operand. */
4212 return false;
4213 }
4214
4215 /* Parse an option for a preload instruction. Returns the encoding for the
4216 option, or PARSE_FAIL. */
4217
4218 static int
4219 parse_pldop (char **str)
4220 {
4221 char *p, *q;
4222 const struct aarch64_name_value_pair *o;
4223
4224 p = q = *str;
4225 while (ISALNUM (*q))
4226 q++;
4227
4228 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4229 if (!o)
4230 return PARSE_FAIL;
4231
4232 *str = q;
4233 return o->value;
4234 }
4235
4236 /* Parse an option for a barrier instruction. Returns the encoding for the
4237 option, or PARSE_FAIL. */
4238
4239 static int
4240 parse_barrier (char **str)
4241 {
4242 char *p, *q;
4243 const struct aarch64_name_value_pair *o;
4244
4245 p = q = *str;
4246 while (ISALPHA (*q))
4247 q++;
4248
4249 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4250 if (!o)
4251 return PARSE_FAIL;
4252
4253 *str = q;
4254 return o->value;
4255 }
4256
4257 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4258 return 0 if successful. Otherwise return PARSE_FAIL. */
4259
4260 static int
4261 parse_barrier_psb (char **str,
4262 const struct aarch64_name_value_pair ** hint_opt)
4263 {
4264 char *p, *q;
4265 const struct aarch64_name_value_pair *o;
4266
4267 p = q = *str;
4268 while (ISALPHA (*q))
4269 q++;
4270
4271 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4272 if (!o)
4273 {
4274 set_fatal_syntax_error
4275 ( _("unknown or missing option to PSB/TSB"));
4276 return PARSE_FAIL;
4277 }
4278
4279 if (o->value != 0x11)
4280 {
4281 /* PSB only accepts option name 'CSYNC'. */
4282 set_syntax_error
4283 (_("the specified option is not accepted for PSB/TSB"));
4284 return PARSE_FAIL;
4285 }
4286
4287 *str = q;
4288 *hint_opt = o;
4289 return 0;
4290 }
4291
4292 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4293 return 0 if successful. Otherwise return PARSE_FAIL. */
4294
4295 static int
4296 parse_bti_operand (char **str,
4297 const struct aarch64_name_value_pair ** hint_opt)
4298 {
4299 char *p, *q;
4300 const struct aarch64_name_value_pair *o;
4301
4302 p = q = *str;
4303 while (ISALPHA (*q))
4304 q++;
4305
4306 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4307 if (!o)
4308 {
4309 set_fatal_syntax_error
4310 ( _("unknown option to BTI"));
4311 return PARSE_FAIL;
4312 }
4313
4314 switch (o->value)
4315 {
4316 /* Valid BTI operands. */
4317 case HINT_OPD_C:
4318 case HINT_OPD_J:
4319 case HINT_OPD_JC:
4320 break;
4321
4322 default:
4323 set_syntax_error
4324 (_("unknown option to BTI"));
4325 return PARSE_FAIL;
4326 }
4327
4328 *str = q;
4329 *hint_opt = o;
4330 return 0;
4331 }
4332
4333 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4334 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4335 on failure. Format:
4336
4337 REG_TYPE.QUALIFIER
4338
4339 Side effect: Update STR with current parse position of success.
4340 */
4341
4342 static const reg_entry *
4343 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4344 aarch64_opnd_qualifier_t *qualifier)
4345 {
4346 struct vector_type_el vectype;
4347 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4348 PTR_FULL_REG);
4349 if (!reg)
4350 return NULL;
4351
4352 *qualifier = vectype_to_qualifier (&vectype);
4353 if (*qualifier == AARCH64_OPND_QLF_NIL)
4354 return NULL;
4355
4356 return reg;
4357 }
4358
4359 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4360
4361 #<imm>
4362 <imm>
4363
4364 Function return TRUE if immediate was found, or FALSE.
4365 */
4366 static bool
4367 parse_sme_immediate (char **str, int64_t *imm)
4368 {
4369 int64_t val;
4370 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4371 return false;
4372
4373 *imm = val;
4374 return true;
4375 }
4376
4377 /* Parse index with vector select register and immediate:
4378
4379 [<Wv>, <imm>]
4380 [<Wv>, #<imm>]
4381 where <Wv> is in W12-W15 range and # is optional for immediate.
4382
4383 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4384 is set to true.
4385
4386 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4387 IMM output.
4388 */
4389 static bool
4390 parse_sme_za_hv_tiles_operand_index (char **str,
4391 int *vector_select_register,
4392 int64_t *imm)
4393 {
4394 const reg_entry *reg;
4395
4396 if (!skip_past_char (str, '['))
4397 {
4398 set_syntax_error (_("expected '['"));
4399 return false;
4400 }
4401
4402 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4403 reg = parse_reg (str);
4404 if (reg == NULL || reg->type != REG_TYPE_R_32
4405 || reg->number < 12 || reg->number > 15)
4406 {
4407 set_syntax_error (_("expected vector select register W12-W15"));
4408 return false;
4409 }
4410 *vector_select_register = reg->number;
4411
4412 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4413 {
4414 set_syntax_error (_("expected ','"));
4415 return false;
4416 }
4417
4418 if (!parse_sme_immediate (str, imm))
4419 {
4420 set_syntax_error (_("index offset immediate expected"));
4421 return false;
4422 }
4423
4424 if (!skip_past_char (str, ']'))
4425 {
4426 set_syntax_error (_("expected ']'"));
4427 return false;
4428 }
4429
4430 return true;
4431 }
4432
4433 /* Parse SME ZA horizontal or vertical vector access to tiles.
4434 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4435 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4436 contains <Wv> select register and corresponding optional IMMEDIATE.
4437 In addition QUALIFIER is extracted.
4438
4439 Field format examples:
4440
4441 ZA0<HV>.B[<Wv>, #<imm>]
4442 <ZAn><HV>.H[<Wv>, #<imm>]
4443 <ZAn><HV>.S[<Wv>, #<imm>]
4444 <ZAn><HV>.D[<Wv>, #<imm>]
4445 <ZAn><HV>.Q[<Wv>, #<imm>]
4446
4447 Function returns <ZAda> register number or PARSE_FAIL.
4448 */
4449 static int
4450 parse_sme_za_hv_tiles_operand (char **str,
4451 enum sme_hv_slice *slice_indicator,
4452 int *vector_select_register,
4453 int *imm,
4454 aarch64_opnd_qualifier_t *qualifier)
4455 {
4456 int regno;
4457 int64_t imm_limit;
4458 int64_t imm_value;
4459 const reg_entry *reg;
4460
4461 reg = parse_reg_with_qual (str, REG_TYPE_ZATHV, qualifier);
4462 if (!reg)
4463 return PARSE_FAIL;
4464
4465 *slice_indicator = (aarch64_check_reg_type (reg, REG_TYPE_ZATH)
4466 ? HV_horizontal
4467 : HV_vertical);
4468 regno = reg->number;
4469
4470 switch (*qualifier)
4471 {
4472 case AARCH64_OPND_QLF_S_B:
4473 imm_limit = 15;
4474 break;
4475 case AARCH64_OPND_QLF_S_H:
4476 imm_limit = 7;
4477 break;
4478 case AARCH64_OPND_QLF_S_S:
4479 imm_limit = 3;
4480 break;
4481 case AARCH64_OPND_QLF_S_D:
4482 imm_limit = 1;
4483 break;
4484 case AARCH64_OPND_QLF_S_Q:
4485 imm_limit = 0;
4486 break;
4487 default:
4488 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4489 return PARSE_FAIL;
4490 }
4491
4492 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4493 &imm_value))
4494 return PARSE_FAIL;
4495
4496 /* Check if optional index offset is in the range for instruction
4497 variant. */
4498 if (imm_value < 0 || imm_value > imm_limit)
4499 {
4500 set_syntax_error (_("index offset out of range"));
4501 return PARSE_FAIL;
4502 }
4503
4504 *imm = imm_value;
4505
4506 return regno;
4507 }
4508
4509
4510 static int
4511 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4512 enum sme_hv_slice *slice_indicator,
4513 int *vector_select_register,
4514 int *imm,
4515 aarch64_opnd_qualifier_t *qualifier)
4516 {
4517 int regno;
4518
4519 if (!skip_past_char (str, '{'))
4520 {
4521 set_syntax_error (_("expected '{'"));
4522 return PARSE_FAIL;
4523 }
4524
4525 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4526 vector_select_register, imm,
4527 qualifier);
4528
4529 if (regno == PARSE_FAIL)
4530 return PARSE_FAIL;
4531
4532 if (!skip_past_char (str, '}'))
4533 {
4534 set_syntax_error (_("expected '}'"));
4535 return PARSE_FAIL;
4536 }
4537
4538 return regno;
4539 }
4540
4541 /* Parse list of up to eight 64-bit element tile names separated by commas in
4542 SME's ZERO instruction:
4543
4544 ZERO { <mask> }
4545
4546 Function returns <mask>:
4547
4548 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4549 */
4550 static int
4551 parse_sme_zero_mask(char **str)
4552 {
4553 char *q;
4554 int mask;
4555 aarch64_opnd_qualifier_t qualifier;
4556
4557 mask = 0x00;
4558 q = *str;
4559 do
4560 {
4561 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZAT,
4562 &qualifier);
4563 if (reg)
4564 {
4565 int regno = reg->number;
4566 if (qualifier == AARCH64_OPND_QLF_S_B)
4567 {
4568 /* { ZA0.B } is assembled as all-ones immediate. */
4569 mask = 0xff;
4570 }
4571 else if (qualifier == AARCH64_OPND_QLF_S_H)
4572 mask |= 0x55 << regno;
4573 else if (qualifier == AARCH64_OPND_QLF_S_S)
4574 mask |= 0x11 << regno;
4575 else if (qualifier == AARCH64_OPND_QLF_S_D)
4576 mask |= 0x01 << regno;
4577 else
4578 {
4579 set_syntax_error (_("wrong ZA tile element format"));
4580 return PARSE_FAIL;
4581 }
4582 continue;
4583 }
4584 clear_error ();
4585 if (strncasecmp (q, "za", 2) == 0 && !ISALNUM (q[2]))
4586 {
4587 /* { ZA } is assembled as all-ones immediate. */
4588 mask = 0xff;
4589 q += 2;
4590 continue;
4591 }
4592
4593 set_syntax_error (_("wrong ZA tile element format"));
4594 return PARSE_FAIL;
4595 }
4596 while (skip_past_char (&q, ','));
4597
4598 *str = q;
4599 return mask;
4600 }
4601
4602 /* Wraps in curly braces <mask> operand ZERO instruction:
4603
4604 ZERO { <mask> }
4605
4606 Function returns value of <mask> bit-field.
4607 */
4608 static int
4609 parse_sme_list_of_64bit_tiles (char **str)
4610 {
4611 int regno;
4612
4613 if (!skip_past_char (str, '{'))
4614 {
4615 set_syntax_error (_("expected '{'"));
4616 return PARSE_FAIL;
4617 }
4618
4619 /* Empty <mask> list is an all-zeros immediate. */
4620 if (!skip_past_char (str, '}'))
4621 {
4622 regno = parse_sme_zero_mask (str);
4623 if (regno == PARSE_FAIL)
4624 return PARSE_FAIL;
4625
4626 if (!skip_past_char (str, '}'))
4627 {
4628 set_syntax_error (_("expected '}'"));
4629 return PARSE_FAIL;
4630 }
4631 }
4632 else
4633 regno = 0x00;
4634
4635 return regno;
4636 }
4637
4638 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4639 Operand format:
4640
4641 ZA[<Wv>, <imm>]
4642 ZA[<Wv>, #<imm>]
4643
4644 Function returns <Wv> or PARSE_FAIL.
4645 */
4646 static int
4647 parse_sme_za_array (char **str, int *imm)
4648 {
4649 char *p, *q;
4650 int regno;
4651 int64_t imm_value;
4652
4653 p = q = *str;
4654 while (ISALPHA (*q))
4655 q++;
4656
4657 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4658 {
4659 set_syntax_error (_("expected ZA array"));
4660 return PARSE_FAIL;
4661 }
4662
4663 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4664 return PARSE_FAIL;
4665
4666 if (imm_value < 0 || imm_value > 15)
4667 {
4668 set_syntax_error (_("offset out of range"));
4669 return PARSE_FAIL;
4670 }
4671
4672 *imm = imm_value;
4673 *str = q;
4674 return regno;
4675 }
4676
4677 /* Parse streaming mode operand for SMSTART and SMSTOP.
4678
4679 {SM | ZA}
4680
4681 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4682 */
4683 static int
4684 parse_sme_sm_za (char **str)
4685 {
4686 char *p, *q;
4687
4688 p = q = *str;
4689 while (ISALPHA (*q))
4690 q++;
4691
4692 if ((q - p != 2)
4693 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4694 {
4695 set_syntax_error (_("expected SM or ZA operand"));
4696 return PARSE_FAIL;
4697 }
4698
4699 *str = q;
4700 return TOLOWER (p[0]);
4701 }
4702
4703 /* Parse the name of the source scalable predicate register, the index base
4704 register W12-W15 and the element index. Function performs element index
4705 limit checks as well as qualifier type checks.
4706
4707 <Pn>.<T>[<Wv>, <imm>]
4708 <Pn>.<T>[<Wv>, #<imm>]
4709
4710 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4711 <imm> to IMM.
4712 Function returns <Pn>, or PARSE_FAIL.
4713 */
4714 static int
4715 parse_sme_pred_reg_with_index(char **str,
4716 int *index_base_reg,
4717 int *imm,
4718 aarch64_opnd_qualifier_t *qualifier)
4719 {
4720 int regno;
4721 int64_t imm_limit;
4722 int64_t imm_value;
4723 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4724
4725 if (reg == NULL)
4726 return PARSE_FAIL;
4727 regno = reg->number;
4728
4729 switch (*qualifier)
4730 {
4731 case AARCH64_OPND_QLF_S_B:
4732 imm_limit = 15;
4733 break;
4734 case AARCH64_OPND_QLF_S_H:
4735 imm_limit = 7;
4736 break;
4737 case AARCH64_OPND_QLF_S_S:
4738 imm_limit = 3;
4739 break;
4740 case AARCH64_OPND_QLF_S_D:
4741 imm_limit = 1;
4742 break;
4743 default:
4744 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4745 return PARSE_FAIL;
4746 }
4747
4748 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4749 return PARSE_FAIL;
4750
4751 if (imm_value < 0 || imm_value > imm_limit)
4752 {
4753 set_syntax_error (_("element index out of range for given variant"));
4754 return PARSE_FAIL;
4755 }
4756
4757 *imm = imm_value;
4758
4759 return regno;
4760 }
4761
4762 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4763 Returns the encoding for the option, or PARSE_FAIL.
4764
4765 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4766 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4767
4768 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4769 field, otherwise as a system register.
4770 */
4771
4772 static int
4773 parse_sys_reg (char **str, htab_t sys_regs,
4774 int imple_defined_p, int pstatefield_p,
4775 uint32_t* flags)
4776 {
4777 char *p, *q;
4778 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4779 const aarch64_sys_reg *o;
4780 int value;
4781
4782 p = buf;
4783 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4784 if (p < buf + (sizeof (buf) - 1))
4785 *p++ = TOLOWER (*q);
4786 *p = '\0';
4787
4788 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4789 valid system register. This is enforced by construction of the hash
4790 table. */
4791 if (p - buf != q - *str)
4792 return PARSE_FAIL;
4793
4794 o = str_hash_find (sys_regs, buf);
4795 if (!o)
4796 {
4797 if (!imple_defined_p)
4798 return PARSE_FAIL;
4799 else
4800 {
4801 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4802 unsigned int op0, op1, cn, cm, op2;
4803
4804 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4805 != 5)
4806 return PARSE_FAIL;
4807 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4808 return PARSE_FAIL;
4809 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4810 if (flags)
4811 *flags = 0;
4812 }
4813 }
4814 else
4815 {
4816 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4817 as_bad (_("selected processor does not support PSTATE field "
4818 "name '%s'"), buf);
4819 if (!pstatefield_p
4820 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4821 o->value, o->flags, o->features))
4822 as_bad (_("selected processor does not support system register "
4823 "name '%s'"), buf);
4824 if (aarch64_sys_reg_deprecated_p (o->flags))
4825 as_warn (_("system register name '%s' is deprecated and may be "
4826 "removed in a future release"), buf);
4827 value = o->value;
4828 if (flags)
4829 *flags = o->flags;
4830 }
4831
4832 *str = q;
4833 return value;
4834 }
4835
4836 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4837 for the option, or NULL. */
4838
4839 static const aarch64_sys_ins_reg *
4840 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4841 {
4842 char *p, *q;
4843 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4844 const aarch64_sys_ins_reg *o;
4845
4846 p = buf;
4847 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4848 if (p < buf + (sizeof (buf) - 1))
4849 *p++ = TOLOWER (*q);
4850 *p = '\0';
4851
4852 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4853 valid system register. This is enforced by construction of the hash
4854 table. */
4855 if (p - buf != q - *str)
4856 return NULL;
4857
4858 o = str_hash_find (sys_ins_regs, buf);
4859 if (!o)
4860 return NULL;
4861
4862 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4863 o->name, o->value, o->flags, 0))
4864 as_bad (_("selected processor does not support system register "
4865 "name '%s'"), buf);
4866 if (aarch64_sys_reg_deprecated_p (o->flags))
4867 as_warn (_("system register name '%s' is deprecated and may be "
4868 "removed in a future release"), buf);
4869
4870 *str = q;
4871 return o;
4872 }
4873 \f
4874 #define po_char_or_fail(chr) do { \
4875 if (! skip_past_char (&str, chr)) \
4876 goto failure; \
4877 } while (0)
4878
4879 #define po_reg_or_fail(regtype) do { \
4880 reg = aarch64_reg_parse (&str, regtype, NULL); \
4881 if (!reg) \
4882 { \
4883 set_default_error (); \
4884 goto failure; \
4885 } \
4886 } while (0)
4887
4888 #define po_int_reg_or_fail(reg_type) do { \
4889 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4890 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4891 { \
4892 set_default_error (); \
4893 goto failure; \
4894 } \
4895 info->reg.regno = reg->number; \
4896 info->qualifier = qualifier; \
4897 } while (0)
4898
4899 #define po_imm_nc_or_fail() do { \
4900 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4901 goto failure; \
4902 } while (0)
4903
4904 #define po_imm_or_fail(min, max) do { \
4905 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4906 goto failure; \
4907 if (val < min || val > max) \
4908 { \
4909 set_fatal_syntax_error (_("immediate value out of range "\
4910 #min " to "#max)); \
4911 goto failure; \
4912 } \
4913 } while (0)
4914
4915 #define po_enum_or_fail(array) do { \
4916 if (!parse_enum_string (&str, &val, array, \
4917 ARRAY_SIZE (array), imm_reg_type)) \
4918 goto failure; \
4919 } while (0)
4920
4921 #define po_misc_or_fail(expr) do { \
4922 if (!expr) \
4923 goto failure; \
4924 } while (0)
4925 \f
4926 /* encode the 12-bit imm field of Add/sub immediate */
4927 static inline uint32_t
4928 encode_addsub_imm (uint32_t imm)
4929 {
4930 return imm << 10;
4931 }
4932
4933 /* encode the shift amount field of Add/sub immediate */
4934 static inline uint32_t
4935 encode_addsub_imm_shift_amount (uint32_t cnt)
4936 {
4937 return cnt << 22;
4938 }
4939
4940
4941 /* encode the imm field of Adr instruction */
4942 static inline uint32_t
4943 encode_adr_imm (uint32_t imm)
4944 {
4945 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4946 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4947 }
4948
4949 /* encode the immediate field of Move wide immediate */
4950 static inline uint32_t
4951 encode_movw_imm (uint32_t imm)
4952 {
4953 return imm << 5;
4954 }
4955
4956 /* encode the 26-bit offset of unconditional branch */
4957 static inline uint32_t
4958 encode_branch_ofs_26 (uint32_t ofs)
4959 {
4960 return ofs & ((1 << 26) - 1);
4961 }
4962
4963 /* encode the 19-bit offset of conditional branch and compare & branch */
4964 static inline uint32_t
4965 encode_cond_branch_ofs_19 (uint32_t ofs)
4966 {
4967 return (ofs & ((1 << 19) - 1)) << 5;
4968 }
4969
4970 /* encode the 19-bit offset of ld literal */
4971 static inline uint32_t
4972 encode_ld_lit_ofs_19 (uint32_t ofs)
4973 {
4974 return (ofs & ((1 << 19) - 1)) << 5;
4975 }
4976
4977 /* Encode the 14-bit offset of test & branch. */
4978 static inline uint32_t
4979 encode_tst_branch_ofs_14 (uint32_t ofs)
4980 {
4981 return (ofs & ((1 << 14) - 1)) << 5;
4982 }
4983
4984 /* Encode the 16-bit imm field of svc/hvc/smc. */
4985 static inline uint32_t
4986 encode_svc_imm (uint32_t imm)
4987 {
4988 return imm << 5;
4989 }
4990
4991 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4992 static inline uint32_t
4993 reencode_addsub_switch_add_sub (uint32_t opcode)
4994 {
4995 return opcode ^ (1 << 30);
4996 }
4997
4998 static inline uint32_t
4999 reencode_movzn_to_movz (uint32_t opcode)
5000 {
5001 return opcode | (1 << 30);
5002 }
5003
5004 static inline uint32_t
5005 reencode_movzn_to_movn (uint32_t opcode)
5006 {
5007 return opcode & ~(1 << 30);
5008 }
5009
5010 /* Overall per-instruction processing. */
5011
5012 /* We need to be able to fix up arbitrary expressions in some statements.
5013 This is so that we can handle symbols that are an arbitrary distance from
5014 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5015 which returns part of an address in a form which will be valid for
5016 a data instruction. We do this by pushing the expression into a symbol
5017 in the expr_section, and creating a fix for that. */
5018
5019 static fixS *
5020 fix_new_aarch64 (fragS * frag,
5021 int where,
5022 short int size,
5023 expressionS * exp,
5024 int pc_rel,
5025 int reloc)
5026 {
5027 fixS *new_fix;
5028
5029 switch (exp->X_op)
5030 {
5031 case O_constant:
5032 case O_symbol:
5033 case O_add:
5034 case O_subtract:
5035 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5036 break;
5037
5038 default:
5039 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5040 pc_rel, reloc);
5041 break;
5042 }
5043 return new_fix;
5044 }
5045 \f
5046 /* Diagnostics on operands errors. */
5047
5048 /* By default, output verbose error message.
5049 Disable the verbose error message by -mno-verbose-error. */
5050 static int verbose_error_p = 1;
5051
5052 #ifdef DEBUG_AARCH64
5053 /* N.B. this is only for the purpose of debugging. */
5054 const char* operand_mismatch_kind_names[] =
5055 {
5056 "AARCH64_OPDE_NIL",
5057 "AARCH64_OPDE_RECOVERABLE",
5058 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5059 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5060 "AARCH64_OPDE_SYNTAX_ERROR",
5061 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5062 "AARCH64_OPDE_INVALID_VARIANT",
5063 "AARCH64_OPDE_OUT_OF_RANGE",
5064 "AARCH64_OPDE_UNALIGNED",
5065 "AARCH64_OPDE_REG_LIST",
5066 "AARCH64_OPDE_OTHER_ERROR",
5067 };
5068 #endif /* DEBUG_AARCH64 */
5069
5070 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5071
5072 When multiple errors of different kinds are found in the same assembly
5073 line, only the error of the highest severity will be picked up for
5074 issuing the diagnostics. */
5075
5076 static inline bool
5077 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5078 enum aarch64_operand_error_kind rhs)
5079 {
5080 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5081 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5082 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5083 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5084 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5085 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5086 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5087 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5088 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5089 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5090 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5091 return lhs > rhs;
5092 }
5093
5094 /* Helper routine to get the mnemonic name from the assembly instruction
5095 line; should only be called for the diagnosis purpose, as there is
5096 string copy operation involved, which may affect the runtime
5097 performance if used in elsewhere. */
5098
5099 static const char*
5100 get_mnemonic_name (const char *str)
5101 {
5102 static char mnemonic[32];
5103 char *ptr;
5104
5105 /* Get the first 15 bytes and assume that the full name is included. */
5106 strncpy (mnemonic, str, 31);
5107 mnemonic[31] = '\0';
5108
5109 /* Scan up to the end of the mnemonic, which must end in white space,
5110 '.', or end of string. */
5111 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5112 ;
5113
5114 *ptr = '\0';
5115
5116 /* Append '...' to the truncated long name. */
5117 if (ptr - mnemonic == 31)
5118 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5119
5120 return mnemonic;
5121 }
5122
5123 static void
5124 reset_aarch64_instruction (aarch64_instruction *instruction)
5125 {
5126 memset (instruction, '\0', sizeof (aarch64_instruction));
5127 instruction->reloc.type = BFD_RELOC_UNUSED;
5128 }
5129
5130 /* Data structures storing one user error in the assembly code related to
5131 operands. */
5132
5133 struct operand_error_record
5134 {
5135 const aarch64_opcode *opcode;
5136 aarch64_operand_error detail;
5137 struct operand_error_record *next;
5138 };
5139
5140 typedef struct operand_error_record operand_error_record;
5141
5142 struct operand_errors
5143 {
5144 operand_error_record *head;
5145 operand_error_record *tail;
5146 };
5147
5148 typedef struct operand_errors operand_errors;
5149
5150 /* Top-level data structure reporting user errors for the current line of
5151 the assembly code.
5152 The way md_assemble works is that all opcodes sharing the same mnemonic
5153 name are iterated to find a match to the assembly line. In this data
5154 structure, each of the such opcodes will have one operand_error_record
5155 allocated and inserted. In other words, excessive errors related with
5156 a single opcode are disregarded. */
5157 operand_errors operand_error_report;
5158
5159 /* Free record nodes. */
5160 static operand_error_record *free_opnd_error_record_nodes = NULL;
5161
5162 /* Initialize the data structure that stores the operand mismatch
5163 information on assembling one line of the assembly code. */
5164 static void
5165 init_operand_error_report (void)
5166 {
5167 if (operand_error_report.head != NULL)
5168 {
5169 gas_assert (operand_error_report.tail != NULL);
5170 operand_error_report.tail->next = free_opnd_error_record_nodes;
5171 free_opnd_error_record_nodes = operand_error_report.head;
5172 operand_error_report.head = NULL;
5173 operand_error_report.tail = NULL;
5174 return;
5175 }
5176 gas_assert (operand_error_report.tail == NULL);
5177 }
5178
5179 /* Return TRUE if some operand error has been recorded during the
5180 parsing of the current assembly line using the opcode *OPCODE;
5181 otherwise return FALSE. */
5182 static inline bool
5183 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5184 {
5185 operand_error_record *record = operand_error_report.head;
5186 return record && record->opcode == opcode;
5187 }
5188
5189 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5190 OPCODE field is initialized with OPCODE.
5191 N.B. only one record for each opcode, i.e. the maximum of one error is
5192 recorded for each instruction template. */
5193
5194 static void
5195 add_operand_error_record (const operand_error_record* new_record)
5196 {
5197 const aarch64_opcode *opcode = new_record->opcode;
5198 operand_error_record* record = operand_error_report.head;
5199
5200 /* The record may have been created for this opcode. If not, we need
5201 to prepare one. */
5202 if (! opcode_has_operand_error_p (opcode))
5203 {
5204 /* Get one empty record. */
5205 if (free_opnd_error_record_nodes == NULL)
5206 {
5207 record = XNEW (operand_error_record);
5208 }
5209 else
5210 {
5211 record = free_opnd_error_record_nodes;
5212 free_opnd_error_record_nodes = record->next;
5213 }
5214 record->opcode = opcode;
5215 /* Insert at the head. */
5216 record->next = operand_error_report.head;
5217 operand_error_report.head = record;
5218 if (operand_error_report.tail == NULL)
5219 operand_error_report.tail = record;
5220 }
5221 else if (record->detail.kind != AARCH64_OPDE_NIL
5222 && record->detail.index <= new_record->detail.index
5223 && operand_error_higher_severity_p (record->detail.kind,
5224 new_record->detail.kind))
5225 {
5226 /* In the case of multiple errors found on operands related with a
5227 single opcode, only record the error of the leftmost operand and
5228 only if the error is of higher severity. */
5229 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5230 " the existing error %s on operand %d",
5231 operand_mismatch_kind_names[new_record->detail.kind],
5232 new_record->detail.index,
5233 operand_mismatch_kind_names[record->detail.kind],
5234 record->detail.index);
5235 return;
5236 }
5237
5238 record->detail = new_record->detail;
5239 }
5240
5241 static inline void
5242 record_operand_error_info (const aarch64_opcode *opcode,
5243 aarch64_operand_error *error_info)
5244 {
5245 operand_error_record record;
5246 record.opcode = opcode;
5247 record.detail = *error_info;
5248 add_operand_error_record (&record);
5249 }
5250
5251 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5252 error message *ERROR, for operand IDX (count from 0). */
5253
5254 static void
5255 record_operand_error (const aarch64_opcode *opcode, int idx,
5256 enum aarch64_operand_error_kind kind,
5257 const char* error)
5258 {
5259 aarch64_operand_error info;
5260 memset(&info, 0, sizeof (info));
5261 info.index = idx;
5262 info.kind = kind;
5263 info.error = error;
5264 info.non_fatal = false;
5265 record_operand_error_info (opcode, &info);
5266 }
5267
5268 static void
5269 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5270 enum aarch64_operand_error_kind kind,
5271 const char* error, const int *extra_data)
5272 {
5273 aarch64_operand_error info;
5274 info.index = idx;
5275 info.kind = kind;
5276 info.error = error;
5277 info.data[0].i = extra_data[0];
5278 info.data[1].i = extra_data[1];
5279 info.data[2].i = extra_data[2];
5280 info.non_fatal = false;
5281 record_operand_error_info (opcode, &info);
5282 }
5283
5284 static void
5285 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5286 const char* error, int lower_bound,
5287 int upper_bound)
5288 {
5289 int data[3] = {lower_bound, upper_bound, 0};
5290 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5291 error, data);
5292 }
5293
5294 /* Remove the operand error record for *OPCODE. */
5295 static void ATTRIBUTE_UNUSED
5296 remove_operand_error_record (const aarch64_opcode *opcode)
5297 {
5298 if (opcode_has_operand_error_p (opcode))
5299 {
5300 operand_error_record* record = operand_error_report.head;
5301 gas_assert (record != NULL && operand_error_report.tail != NULL);
5302 operand_error_report.head = record->next;
5303 record->next = free_opnd_error_record_nodes;
5304 free_opnd_error_record_nodes = record;
5305 if (operand_error_report.head == NULL)
5306 {
5307 gas_assert (operand_error_report.tail == record);
5308 operand_error_report.tail = NULL;
5309 }
5310 }
5311 }
5312
5313 /* Given the instruction in *INSTR, return the index of the best matched
5314 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5315
5316 Return -1 if there is no qualifier sequence; return the first match
5317 if there is multiple matches found. */
5318
5319 static int
5320 find_best_match (const aarch64_inst *instr,
5321 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5322 {
5323 int i, num_opnds, max_num_matched, idx;
5324
5325 num_opnds = aarch64_num_of_operands (instr->opcode);
5326 if (num_opnds == 0)
5327 {
5328 DEBUG_TRACE ("no operand");
5329 return -1;
5330 }
5331
5332 max_num_matched = 0;
5333 idx = 0;
5334
5335 /* For each pattern. */
5336 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5337 {
5338 int j, num_matched;
5339 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5340
5341 /* Most opcodes has much fewer patterns in the list. */
5342 if (empty_qualifier_sequence_p (qualifiers))
5343 {
5344 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5345 break;
5346 }
5347
5348 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5349 if (*qualifiers == instr->operands[j].qualifier)
5350 ++num_matched;
5351
5352 if (num_matched > max_num_matched)
5353 {
5354 max_num_matched = num_matched;
5355 idx = i;
5356 }
5357 }
5358
5359 DEBUG_TRACE ("return with %d", idx);
5360 return idx;
5361 }
5362
5363 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5364 corresponding operands in *INSTR. */
5365
5366 static inline void
5367 assign_qualifier_sequence (aarch64_inst *instr,
5368 const aarch64_opnd_qualifier_t *qualifiers)
5369 {
5370 int i = 0;
5371 int num_opnds = aarch64_num_of_operands (instr->opcode);
5372 gas_assert (num_opnds);
5373 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5374 instr->operands[i].qualifier = *qualifiers;
5375 }
5376
5377 /* Callback used by aarch64_print_operand to apply STYLE to the
5378 disassembler output created from FMT and ARGS. The STYLER object holds
5379 any required state. Must return a pointer to a string (created from FMT
5380 and ARGS) that will continue to be valid until the complete disassembled
5381 instruction has been printed.
5382
5383 We don't currently add any styling to the output of the disassembler as
5384 used within assembler error messages, and so STYLE is ignored here. A
5385 new string is allocated on the obstack help within STYLER and returned
5386 to the caller. */
5387
5388 static const char *aarch64_apply_style
5389 (struct aarch64_styler *styler,
5390 enum disassembler_style style ATTRIBUTE_UNUSED,
5391 const char *fmt, va_list args)
5392 {
5393 int res;
5394 char *ptr;
5395 struct obstack *stack = (struct obstack *) styler->state;
5396 va_list ap;
5397
5398 /* Calculate the required space. */
5399 va_copy (ap, args);
5400 res = vsnprintf (NULL, 0, fmt, ap);
5401 va_end (ap);
5402 gas_assert (res >= 0);
5403
5404 /* Allocate space on the obstack and format the result. */
5405 ptr = (char *) obstack_alloc (stack, res + 1);
5406 res = vsnprintf (ptr, (res + 1), fmt, args);
5407 gas_assert (res >= 0);
5408
5409 return ptr;
5410 }
5411
5412 /* Print operands for the diagnosis purpose. */
5413
5414 static void
5415 print_operands (char *buf, const aarch64_opcode *opcode,
5416 const aarch64_opnd_info *opnds)
5417 {
5418 int i;
5419 struct aarch64_styler styler;
5420 struct obstack content;
5421 obstack_init (&content);
5422
5423 styler.apply_style = aarch64_apply_style;
5424 styler.state = (void *) &content;
5425
5426 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5427 {
5428 char str[128];
5429 char cmt[128];
5430
5431 /* We regard the opcode operand info more, however we also look into
5432 the inst->operands to support the disassembling of the optional
5433 operand.
5434 The two operand code should be the same in all cases, apart from
5435 when the operand can be optional. */
5436 if (opcode->operands[i] == AARCH64_OPND_NIL
5437 || opnds[i].type == AARCH64_OPND_NIL)
5438 break;
5439
5440 /* Generate the operand string in STR. */
5441 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5442 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5443
5444 /* Delimiter. */
5445 if (str[0] != '\0')
5446 strcat (buf, i == 0 ? " " : ", ");
5447
5448 /* Append the operand string. */
5449 strcat (buf, str);
5450
5451 /* Append a comment. This works because only the last operand ever
5452 adds a comment. If that ever changes then we'll need to be
5453 smarter here. */
5454 if (cmt[0] != '\0')
5455 {
5456 strcat (buf, "\t// ");
5457 strcat (buf, cmt);
5458 }
5459 }
5460
5461 obstack_free (&content, NULL);
5462 }
5463
5464 /* Send to stderr a string as information. */
5465
5466 static void
5467 output_info (const char *format, ...)
5468 {
5469 const char *file;
5470 unsigned int line;
5471 va_list args;
5472
5473 file = as_where (&line);
5474 if (file)
5475 {
5476 if (line != 0)
5477 fprintf (stderr, "%s:%u: ", file, line);
5478 else
5479 fprintf (stderr, "%s: ", file);
5480 }
5481 fprintf (stderr, _("Info: "));
5482 va_start (args, format);
5483 vfprintf (stderr, format, args);
5484 va_end (args);
5485 (void) putc ('\n', stderr);
5486 }
5487
5488 /* Output one operand error record. */
5489
5490 static void
5491 output_operand_error_record (const operand_error_record *record, char *str)
5492 {
5493 const aarch64_operand_error *detail = &record->detail;
5494 int idx = detail->index;
5495 const aarch64_opcode *opcode = record->opcode;
5496 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5497 : AARCH64_OPND_NIL);
5498
5499 typedef void (*handler_t)(const char *format, ...);
5500 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5501
5502 switch (detail->kind)
5503 {
5504 case AARCH64_OPDE_NIL:
5505 gas_assert (0);
5506 break;
5507
5508 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5509 handler (_("this `%s' should have an immediately preceding `%s'"
5510 " -- `%s'"),
5511 detail->data[0].s, detail->data[1].s, str);
5512 break;
5513
5514 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5515 handler (_("the preceding `%s' should be followed by `%s` rather"
5516 " than `%s` -- `%s'"),
5517 detail->data[1].s, detail->data[0].s, opcode->name, str);
5518 break;
5519
5520 case AARCH64_OPDE_SYNTAX_ERROR:
5521 case AARCH64_OPDE_RECOVERABLE:
5522 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5523 case AARCH64_OPDE_OTHER_ERROR:
5524 /* Use the prepared error message if there is, otherwise use the
5525 operand description string to describe the error. */
5526 if (detail->error != NULL)
5527 {
5528 if (idx < 0)
5529 handler (_("%s -- `%s'"), detail->error, str);
5530 else
5531 handler (_("%s at operand %d -- `%s'"),
5532 detail->error, idx + 1, str);
5533 }
5534 else
5535 {
5536 gas_assert (idx >= 0);
5537 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5538 aarch64_get_operand_desc (opd_code), str);
5539 }
5540 break;
5541
5542 case AARCH64_OPDE_INVALID_VARIANT:
5543 handler (_("operand mismatch -- `%s'"), str);
5544 if (verbose_error_p)
5545 {
5546 /* We will try to correct the erroneous instruction and also provide
5547 more information e.g. all other valid variants.
5548
5549 The string representation of the corrected instruction and other
5550 valid variants are generated by
5551
5552 1) obtaining the intermediate representation of the erroneous
5553 instruction;
5554 2) manipulating the IR, e.g. replacing the operand qualifier;
5555 3) printing out the instruction by calling the printer functions
5556 shared with the disassembler.
5557
5558 The limitation of this method is that the exact input assembly
5559 line cannot be accurately reproduced in some cases, for example an
5560 optional operand present in the actual assembly line will be
5561 omitted in the output; likewise for the optional syntax rules,
5562 e.g. the # before the immediate. Another limitation is that the
5563 assembly symbols and relocation operations in the assembly line
5564 currently cannot be printed out in the error report. Last but not
5565 least, when there is other error(s) co-exist with this error, the
5566 'corrected' instruction may be still incorrect, e.g. given
5567 'ldnp h0,h1,[x0,#6]!'
5568 this diagnosis will provide the version:
5569 'ldnp s0,s1,[x0,#6]!'
5570 which is still not right. */
5571 size_t len = strlen (get_mnemonic_name (str));
5572 int i, qlf_idx;
5573 bool result;
5574 char buf[2048];
5575 aarch64_inst *inst_base = &inst.base;
5576 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5577
5578 /* Init inst. */
5579 reset_aarch64_instruction (&inst);
5580 inst_base->opcode = opcode;
5581
5582 /* Reset the error report so that there is no side effect on the
5583 following operand parsing. */
5584 init_operand_error_report ();
5585
5586 /* Fill inst. */
5587 result = parse_operands (str + len, opcode)
5588 && programmer_friendly_fixup (&inst);
5589 gas_assert (result);
5590 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5591 NULL, NULL, insn_sequence);
5592 gas_assert (!result);
5593
5594 /* Find the most matched qualifier sequence. */
5595 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5596 gas_assert (qlf_idx > -1);
5597
5598 /* Assign the qualifiers. */
5599 assign_qualifier_sequence (inst_base,
5600 opcode->qualifiers_list[qlf_idx]);
5601
5602 /* Print the hint. */
5603 output_info (_(" did you mean this?"));
5604 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5605 print_operands (buf, opcode, inst_base->operands);
5606 output_info (_(" %s"), buf);
5607
5608 /* Print out other variant(s) if there is any. */
5609 if (qlf_idx != 0 ||
5610 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5611 output_info (_(" other valid variant(s):"));
5612
5613 /* For each pattern. */
5614 qualifiers_list = opcode->qualifiers_list;
5615 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5616 {
5617 /* Most opcodes has much fewer patterns in the list.
5618 First NIL qualifier indicates the end in the list. */
5619 if (empty_qualifier_sequence_p (*qualifiers_list))
5620 break;
5621
5622 if (i != qlf_idx)
5623 {
5624 /* Mnemonics name. */
5625 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5626
5627 /* Assign the qualifiers. */
5628 assign_qualifier_sequence (inst_base, *qualifiers_list);
5629
5630 /* Print instruction. */
5631 print_operands (buf, opcode, inst_base->operands);
5632
5633 output_info (_(" %s"), buf);
5634 }
5635 }
5636 }
5637 break;
5638
5639 case AARCH64_OPDE_UNTIED_IMMS:
5640 handler (_("operand %d must have the same immediate value "
5641 "as operand 1 -- `%s'"),
5642 detail->index + 1, str);
5643 break;
5644
5645 case AARCH64_OPDE_UNTIED_OPERAND:
5646 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5647 detail->index + 1, str);
5648 break;
5649
5650 case AARCH64_OPDE_OUT_OF_RANGE:
5651 if (detail->data[0].i != detail->data[1].i)
5652 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5653 detail->error ? detail->error : _("immediate value"),
5654 detail->data[0].i, detail->data[1].i, idx + 1, str);
5655 else
5656 handler (_("%s must be %d at operand %d -- `%s'"),
5657 detail->error ? detail->error : _("immediate value"),
5658 detail->data[0].i, idx + 1, str);
5659 break;
5660
5661 case AARCH64_OPDE_REG_LIST:
5662 if (detail->data[0].i == 1)
5663 handler (_("invalid number of registers in the list; "
5664 "only 1 register is expected at operand %d -- `%s'"),
5665 idx + 1, str);
5666 else
5667 handler (_("invalid number of registers in the list; "
5668 "%d registers are expected at operand %d -- `%s'"),
5669 detail->data[0].i, idx + 1, str);
5670 break;
5671
5672 case AARCH64_OPDE_UNALIGNED:
5673 handler (_("immediate value must be a multiple of "
5674 "%d at operand %d -- `%s'"),
5675 detail->data[0].i, idx + 1, str);
5676 break;
5677
5678 default:
5679 gas_assert (0);
5680 break;
5681 }
5682 }
5683
5684 /* Process and output the error message about the operand mismatching.
5685
5686 When this function is called, the operand error information had
5687 been collected for an assembly line and there will be multiple
5688 errors in the case of multiple instruction templates; output the
5689 error message that most closely describes the problem.
5690
5691 The errors to be printed can be filtered on printing all errors
5692 or only non-fatal errors. This distinction has to be made because
5693 the error buffer may already be filled with fatal errors we don't want to
5694 print due to the different instruction templates. */
5695
5696 static void
5697 output_operand_error_report (char *str, bool non_fatal_only)
5698 {
5699 int largest_error_pos;
5700 const char *msg = NULL;
5701 enum aarch64_operand_error_kind kind;
5702 operand_error_record *curr;
5703 operand_error_record *head = operand_error_report.head;
5704 operand_error_record *record = NULL;
5705
5706 /* No error to report. */
5707 if (head == NULL)
5708 return;
5709
5710 gas_assert (head != NULL && operand_error_report.tail != NULL);
5711
5712 /* Only one error. */
5713 if (head == operand_error_report.tail)
5714 {
5715 /* If the only error is a non-fatal one and we don't want to print it,
5716 just exit. */
5717 if (!non_fatal_only || head->detail.non_fatal)
5718 {
5719 DEBUG_TRACE ("single opcode entry with error kind: %s",
5720 operand_mismatch_kind_names[head->detail.kind]);
5721 output_operand_error_record (head, str);
5722 }
5723 return;
5724 }
5725
5726 /* Find the error kind of the highest severity. */
5727 DEBUG_TRACE ("multiple opcode entries with error kind");
5728 kind = AARCH64_OPDE_NIL;
5729 for (curr = head; curr != NULL; curr = curr->next)
5730 {
5731 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5732 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5733 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5734 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5735 kind = curr->detail.kind;
5736 }
5737
5738 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5739
5740 /* Pick up one of errors of KIND to report. */
5741 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5742 for (curr = head; curr != NULL; curr = curr->next)
5743 {
5744 /* If we don't want to print non-fatal errors then don't consider them
5745 at all. */
5746 if (curr->detail.kind != kind
5747 || (non_fatal_only && !curr->detail.non_fatal))
5748 continue;
5749 /* If there are multiple errors, pick up the one with the highest
5750 mismatching operand index. In the case of multiple errors with
5751 the equally highest operand index, pick up the first one or the
5752 first one with non-NULL error message. */
5753 if (curr->detail.index > largest_error_pos
5754 || (curr->detail.index == largest_error_pos && msg == NULL
5755 && curr->detail.error != NULL))
5756 {
5757 largest_error_pos = curr->detail.index;
5758 record = curr;
5759 msg = record->detail.error;
5760 }
5761 }
5762
5763 /* The way errors are collected in the back-end is a bit non-intuitive. But
5764 essentially, because each operand template is tried recursively you may
5765 always have errors collected from the previous tried OPND. These are
5766 usually skipped if there is one successful match. However now with the
5767 non-fatal errors we have to ignore those previously collected hard errors
5768 when we're only interested in printing the non-fatal ones. This condition
5769 prevents us from printing errors that are not appropriate, since we did
5770 match a condition, but it also has warnings that it wants to print. */
5771 if (non_fatal_only && !record)
5772 return;
5773
5774 gas_assert (largest_error_pos != -2 && record != NULL);
5775 DEBUG_TRACE ("Pick up error kind %s to report",
5776 operand_mismatch_kind_names[record->detail.kind]);
5777
5778 /* Output. */
5779 output_operand_error_record (record, str);
5780 }
5781 \f
5782 /* Write an AARCH64 instruction to buf - always little-endian. */
5783 static void
5784 put_aarch64_insn (char *buf, uint32_t insn)
5785 {
5786 unsigned char *where = (unsigned char *) buf;
5787 where[0] = insn;
5788 where[1] = insn >> 8;
5789 where[2] = insn >> 16;
5790 where[3] = insn >> 24;
5791 }
5792
5793 static uint32_t
5794 get_aarch64_insn (char *buf)
5795 {
5796 unsigned char *where = (unsigned char *) buf;
5797 uint32_t result;
5798 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5799 | ((uint32_t) where[3] << 24)));
5800 return result;
5801 }
5802
5803 static void
5804 output_inst (struct aarch64_inst *new_inst)
5805 {
5806 char *to = NULL;
5807
5808 to = frag_more (INSN_SIZE);
5809
5810 frag_now->tc_frag_data.recorded = 1;
5811
5812 put_aarch64_insn (to, inst.base.value);
5813
5814 if (inst.reloc.type != BFD_RELOC_UNUSED)
5815 {
5816 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5817 INSN_SIZE, &inst.reloc.exp,
5818 inst.reloc.pc_rel,
5819 inst.reloc.type);
5820 DEBUG_TRACE ("Prepared relocation fix up");
5821 /* Don't check the addend value against the instruction size,
5822 that's the job of our code in md_apply_fix(). */
5823 fixp->fx_no_overflow = 1;
5824 if (new_inst != NULL)
5825 fixp->tc_fix_data.inst = new_inst;
5826 if (aarch64_gas_internal_fixup_p ())
5827 {
5828 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5829 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5830 fixp->fx_addnumber = inst.reloc.flags;
5831 }
5832 }
5833
5834 dwarf2_emit_insn (INSN_SIZE);
5835 }
5836
5837 /* Link together opcodes of the same name. */
5838
5839 struct templates
5840 {
5841 const aarch64_opcode *opcode;
5842 struct templates *next;
5843 };
5844
5845 typedef struct templates templates;
5846
5847 static templates *
5848 lookup_mnemonic (const char *start, int len)
5849 {
5850 templates *templ = NULL;
5851
5852 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5853 return templ;
5854 }
5855
5856 /* Subroutine of md_assemble, responsible for looking up the primary
5857 opcode from the mnemonic the user wrote. BASE points to the beginning
5858 of the mnemonic, DOT points to the first '.' within the mnemonic
5859 (if any) and END points to the end of the mnemonic. */
5860
5861 static templates *
5862 opcode_lookup (char *base, char *dot, char *end)
5863 {
5864 const aarch64_cond *cond;
5865 char condname[16];
5866 int len;
5867
5868 if (dot == end)
5869 return 0;
5870
5871 inst.cond = COND_ALWAYS;
5872
5873 /* Handle a possible condition. */
5874 if (dot)
5875 {
5876 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5877 if (!cond)
5878 return 0;
5879 inst.cond = cond->value;
5880 len = dot - base;
5881 }
5882 else
5883 len = end - base;
5884
5885 if (inst.cond == COND_ALWAYS)
5886 {
5887 /* Look for unaffixed mnemonic. */
5888 return lookup_mnemonic (base, len);
5889 }
5890 else if (len <= 13)
5891 {
5892 /* append ".c" to mnemonic if conditional */
5893 memcpy (condname, base, len);
5894 memcpy (condname + len, ".c", 2);
5895 base = condname;
5896 len += 2;
5897 return lookup_mnemonic (base, len);
5898 }
5899
5900 return NULL;
5901 }
5902
5903 /* Process an optional operand that is found omitted from the assembly line.
5904 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5905 instruction's opcode entry while IDX is the index of this omitted operand.
5906 */
5907
5908 static void
5909 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5910 int idx, aarch64_opnd_info *operand)
5911 {
5912 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5913 gas_assert (optional_operand_p (opcode, idx));
5914 gas_assert (!operand->present);
5915
5916 switch (type)
5917 {
5918 case AARCH64_OPND_Rd:
5919 case AARCH64_OPND_Rn:
5920 case AARCH64_OPND_Rm:
5921 case AARCH64_OPND_Rt:
5922 case AARCH64_OPND_Rt2:
5923 case AARCH64_OPND_Rt_LS64:
5924 case AARCH64_OPND_Rt_SP:
5925 case AARCH64_OPND_Rs:
5926 case AARCH64_OPND_Ra:
5927 case AARCH64_OPND_Rt_SYS:
5928 case AARCH64_OPND_Rd_SP:
5929 case AARCH64_OPND_Rn_SP:
5930 case AARCH64_OPND_Rm_SP:
5931 case AARCH64_OPND_Fd:
5932 case AARCH64_OPND_Fn:
5933 case AARCH64_OPND_Fm:
5934 case AARCH64_OPND_Fa:
5935 case AARCH64_OPND_Ft:
5936 case AARCH64_OPND_Ft2:
5937 case AARCH64_OPND_Sd:
5938 case AARCH64_OPND_Sn:
5939 case AARCH64_OPND_Sm:
5940 case AARCH64_OPND_Va:
5941 case AARCH64_OPND_Vd:
5942 case AARCH64_OPND_Vn:
5943 case AARCH64_OPND_Vm:
5944 case AARCH64_OPND_VdD1:
5945 case AARCH64_OPND_VnD1:
5946 operand->reg.regno = default_value;
5947 break;
5948
5949 case AARCH64_OPND_Ed:
5950 case AARCH64_OPND_En:
5951 case AARCH64_OPND_Em:
5952 case AARCH64_OPND_Em16:
5953 case AARCH64_OPND_SM3_IMM2:
5954 operand->reglane.regno = default_value;
5955 break;
5956
5957 case AARCH64_OPND_IDX:
5958 case AARCH64_OPND_BIT_NUM:
5959 case AARCH64_OPND_IMMR:
5960 case AARCH64_OPND_IMMS:
5961 case AARCH64_OPND_SHLL_IMM:
5962 case AARCH64_OPND_IMM_VLSL:
5963 case AARCH64_OPND_IMM_VLSR:
5964 case AARCH64_OPND_CCMP_IMM:
5965 case AARCH64_OPND_FBITS:
5966 case AARCH64_OPND_UIMM4:
5967 case AARCH64_OPND_UIMM3_OP1:
5968 case AARCH64_OPND_UIMM3_OP2:
5969 case AARCH64_OPND_IMM:
5970 case AARCH64_OPND_IMM_2:
5971 case AARCH64_OPND_WIDTH:
5972 case AARCH64_OPND_UIMM7:
5973 case AARCH64_OPND_NZCV:
5974 case AARCH64_OPND_SVE_PATTERN:
5975 case AARCH64_OPND_SVE_PRFOP:
5976 operand->imm.value = default_value;
5977 break;
5978
5979 case AARCH64_OPND_SVE_PATTERN_SCALED:
5980 operand->imm.value = default_value;
5981 operand->shifter.kind = AARCH64_MOD_MUL;
5982 operand->shifter.amount = 1;
5983 break;
5984
5985 case AARCH64_OPND_EXCEPTION:
5986 inst.reloc.type = BFD_RELOC_UNUSED;
5987 break;
5988
5989 case AARCH64_OPND_BARRIER_ISB:
5990 operand->barrier = aarch64_barrier_options + default_value;
5991 break;
5992
5993 case AARCH64_OPND_BTI_TARGET:
5994 operand->hint_option = aarch64_hint_options + default_value;
5995 break;
5996
5997 default:
5998 break;
5999 }
6000 }
6001
6002 /* Process the relocation type for move wide instructions.
6003 Return TRUE on success; otherwise return FALSE. */
6004
6005 static bool
6006 process_movw_reloc_info (void)
6007 {
6008 int is32;
6009 unsigned shift;
6010
6011 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6012
6013 if (inst.base.opcode->op == OP_MOVK)
6014 switch (inst.reloc.type)
6015 {
6016 case BFD_RELOC_AARCH64_MOVW_G0_S:
6017 case BFD_RELOC_AARCH64_MOVW_G1_S:
6018 case BFD_RELOC_AARCH64_MOVW_G2_S:
6019 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6020 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6021 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6022 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6023 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6024 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6025 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6026 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6027 set_syntax_error
6028 (_("the specified relocation type is not allowed for MOVK"));
6029 return false;
6030 default:
6031 break;
6032 }
6033
6034 switch (inst.reloc.type)
6035 {
6036 case BFD_RELOC_AARCH64_MOVW_G0:
6037 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6038 case BFD_RELOC_AARCH64_MOVW_G0_S:
6039 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6040 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6041 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6042 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6043 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6044 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6045 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6046 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6047 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6048 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6049 shift = 0;
6050 break;
6051 case BFD_RELOC_AARCH64_MOVW_G1:
6052 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6053 case BFD_RELOC_AARCH64_MOVW_G1_S:
6054 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6055 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6056 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6057 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6058 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6059 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6060 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6061 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6063 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6064 shift = 16;
6065 break;
6066 case BFD_RELOC_AARCH64_MOVW_G2:
6067 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6068 case BFD_RELOC_AARCH64_MOVW_G2_S:
6069 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6070 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6071 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6072 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6073 if (is32)
6074 {
6075 set_fatal_syntax_error
6076 (_("the specified relocation type is not allowed for 32-bit "
6077 "register"));
6078 return false;
6079 }
6080 shift = 32;
6081 break;
6082 case BFD_RELOC_AARCH64_MOVW_G3:
6083 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6084 if (is32)
6085 {
6086 set_fatal_syntax_error
6087 (_("the specified relocation type is not allowed for 32-bit "
6088 "register"));
6089 return false;
6090 }
6091 shift = 48;
6092 break;
6093 default:
6094 /* More cases should be added when more MOVW-related relocation types
6095 are supported in GAS. */
6096 gas_assert (aarch64_gas_internal_fixup_p ());
6097 /* The shift amount should have already been set by the parser. */
6098 return true;
6099 }
6100 inst.base.operands[1].shifter.amount = shift;
6101 return true;
6102 }
6103
6104 /* A primitive log calculator. */
6105
6106 static inline unsigned int
6107 get_logsz (unsigned int size)
6108 {
6109 const unsigned char ls[16] =
6110 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6111 if (size > 16)
6112 {
6113 gas_assert (0);
6114 return -1;
6115 }
6116 gas_assert (ls[size - 1] != (unsigned char)-1);
6117 return ls[size - 1];
6118 }
6119
6120 /* Determine and return the real reloc type code for an instruction
6121 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6122
6123 static inline bfd_reloc_code_real_type
6124 ldst_lo12_determine_real_reloc_type (void)
6125 {
6126 unsigned logsz, max_logsz;
6127 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6128 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6129
6130 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6131 {
6132 BFD_RELOC_AARCH64_LDST8_LO12,
6133 BFD_RELOC_AARCH64_LDST16_LO12,
6134 BFD_RELOC_AARCH64_LDST32_LO12,
6135 BFD_RELOC_AARCH64_LDST64_LO12,
6136 BFD_RELOC_AARCH64_LDST128_LO12
6137 },
6138 {
6139 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6140 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6141 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6142 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6143 BFD_RELOC_AARCH64_NONE
6144 },
6145 {
6146 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6147 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6148 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6149 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6150 BFD_RELOC_AARCH64_NONE
6151 },
6152 {
6153 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6154 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6155 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6156 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6157 BFD_RELOC_AARCH64_NONE
6158 },
6159 {
6160 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6161 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6162 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6163 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6164 BFD_RELOC_AARCH64_NONE
6165 }
6166 };
6167
6168 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6169 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6170 || (inst.reloc.type
6171 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6172 || (inst.reloc.type
6173 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6174 || (inst.reloc.type
6175 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6176 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6177
6178 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6179 opd1_qlf =
6180 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6181 1, opd0_qlf, 0);
6182 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6183
6184 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6185
6186 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6187 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6188 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6189 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6190 max_logsz = 3;
6191 else
6192 max_logsz = 4;
6193
6194 if (logsz > max_logsz)
6195 {
6196 /* SEE PR 27904 for an example of this. */
6197 set_fatal_syntax_error
6198 (_("relocation qualifier does not match instruction size"));
6199 return BFD_RELOC_AARCH64_NONE;
6200 }
6201
6202 /* In reloc.c, these pseudo relocation types should be defined in similar
6203 order as above reloc_ldst_lo12 array. Because the array index calculation
6204 below relies on this. */
6205 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6206 }
6207
6208 /* Check whether a register list REGINFO is valid. The registers must be
6209 numbered in increasing order (modulo 32), in increments of one or two.
6210
6211 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6212 increments of two.
6213
6214 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6215
6216 static bool
6217 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6218 {
6219 uint32_t i, nb_regs, prev_regno, incr;
6220
6221 nb_regs = 1 + (reginfo & 0x3);
6222 reginfo >>= 2;
6223 prev_regno = reginfo & 0x1f;
6224 incr = accept_alternate ? 2 : 1;
6225
6226 for (i = 1; i < nb_regs; ++i)
6227 {
6228 uint32_t curr_regno;
6229 reginfo >>= 5;
6230 curr_regno = reginfo & 0x1f;
6231 if (curr_regno != ((prev_regno + incr) & 0x1f))
6232 return false;
6233 prev_regno = curr_regno;
6234 }
6235
6236 return true;
6237 }
6238
6239 /* Generic instruction operand parser. This does no encoding and no
6240 semantic validation; it merely squirrels values away in the inst
6241 structure. Returns TRUE or FALSE depending on whether the
6242 specified grammar matched. */
6243
6244 static bool
6245 parse_operands (char *str, const aarch64_opcode *opcode)
6246 {
6247 int i;
6248 char *backtrack_pos = 0;
6249 const enum aarch64_opnd *operands = opcode->operands;
6250 aarch64_reg_type imm_reg_type;
6251
6252 clear_error ();
6253 skip_whitespace (str);
6254
6255 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6256 AARCH64_FEATURE_SVE
6257 | AARCH64_FEATURE_SVE2))
6258 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6259 else
6260 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6261
6262 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6263 {
6264 int64_t val;
6265 const reg_entry *reg;
6266 int comma_skipped_p = 0;
6267 struct vector_type_el vectype;
6268 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6269 aarch64_opnd_info *info = &inst.base.operands[i];
6270 aarch64_reg_type reg_type;
6271
6272 DEBUG_TRACE ("parse operand %d", i);
6273
6274 /* Assign the operand code. */
6275 info->type = operands[i];
6276
6277 if (optional_operand_p (opcode, i))
6278 {
6279 /* Remember where we are in case we need to backtrack. */
6280 gas_assert (!backtrack_pos);
6281 backtrack_pos = str;
6282 }
6283
6284 /* Expect comma between operands; the backtrack mechanism will take
6285 care of cases of omitted optional operand. */
6286 if (i > 0 && ! skip_past_char (&str, ','))
6287 {
6288 set_syntax_error (_("comma expected between operands"));
6289 goto failure;
6290 }
6291 else
6292 comma_skipped_p = 1;
6293
6294 switch (operands[i])
6295 {
6296 case AARCH64_OPND_Rd:
6297 case AARCH64_OPND_Rn:
6298 case AARCH64_OPND_Rm:
6299 case AARCH64_OPND_Rt:
6300 case AARCH64_OPND_Rt2:
6301 case AARCH64_OPND_Rs:
6302 case AARCH64_OPND_Ra:
6303 case AARCH64_OPND_Rt_LS64:
6304 case AARCH64_OPND_Rt_SYS:
6305 case AARCH64_OPND_PAIRREG:
6306 case AARCH64_OPND_SVE_Rm:
6307 po_int_reg_or_fail (REG_TYPE_R_Z);
6308
6309 /* In LS64 load/store instructions Rt register number must be even
6310 and <=22. */
6311 if (operands[i] == AARCH64_OPND_Rt_LS64)
6312 {
6313 /* We've already checked if this is valid register.
6314 This will check if register number (Rt) is not undefined for LS64
6315 instructions:
6316 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6317 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6318 {
6319 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6320 goto failure;
6321 }
6322 }
6323 break;
6324
6325 case AARCH64_OPND_Rd_SP:
6326 case AARCH64_OPND_Rn_SP:
6327 case AARCH64_OPND_Rt_SP:
6328 case AARCH64_OPND_SVE_Rn_SP:
6329 case AARCH64_OPND_Rm_SP:
6330 po_int_reg_or_fail (REG_TYPE_R_SP);
6331 break;
6332
6333 case AARCH64_OPND_Rm_EXT:
6334 case AARCH64_OPND_Rm_SFT:
6335 po_misc_or_fail (parse_shifter_operand
6336 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6337 ? SHIFTED_ARITH_IMM
6338 : SHIFTED_LOGIC_IMM)));
6339 if (!info->shifter.operator_present)
6340 {
6341 /* Default to LSL if not present. Libopcodes prefers shifter
6342 kind to be explicit. */
6343 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6344 info->shifter.kind = AARCH64_MOD_LSL;
6345 /* For Rm_EXT, libopcodes will carry out further check on whether
6346 or not stack pointer is used in the instruction (Recall that
6347 "the extend operator is not optional unless at least one of
6348 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6349 }
6350 break;
6351
6352 case AARCH64_OPND_Fd:
6353 case AARCH64_OPND_Fn:
6354 case AARCH64_OPND_Fm:
6355 case AARCH64_OPND_Fa:
6356 case AARCH64_OPND_Ft:
6357 case AARCH64_OPND_Ft2:
6358 case AARCH64_OPND_Sd:
6359 case AARCH64_OPND_Sn:
6360 case AARCH64_OPND_Sm:
6361 case AARCH64_OPND_SVE_VZn:
6362 case AARCH64_OPND_SVE_Vd:
6363 case AARCH64_OPND_SVE_Vm:
6364 case AARCH64_OPND_SVE_Vn:
6365 reg = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, NULL);
6366 if (!reg)
6367 {
6368 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6369 goto failure;
6370 }
6371 gas_assert (reg->type >= REG_TYPE_FP_B
6372 && reg->type <= REG_TYPE_FP_Q);
6373
6374 info->reg.regno = reg->number;
6375 info->qualifier = AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
6376 break;
6377
6378 case AARCH64_OPND_SVE_Pd:
6379 case AARCH64_OPND_SVE_Pg3:
6380 case AARCH64_OPND_SVE_Pg4_5:
6381 case AARCH64_OPND_SVE_Pg4_10:
6382 case AARCH64_OPND_SVE_Pg4_16:
6383 case AARCH64_OPND_SVE_Pm:
6384 case AARCH64_OPND_SVE_Pn:
6385 case AARCH64_OPND_SVE_Pt:
6386 case AARCH64_OPND_SME_Pm:
6387 reg_type = REG_TYPE_PN;
6388 goto vector_reg;
6389
6390 case AARCH64_OPND_SVE_Za_5:
6391 case AARCH64_OPND_SVE_Za_16:
6392 case AARCH64_OPND_SVE_Zd:
6393 case AARCH64_OPND_SVE_Zm_5:
6394 case AARCH64_OPND_SVE_Zm_16:
6395 case AARCH64_OPND_SVE_Zn:
6396 case AARCH64_OPND_SVE_Zt:
6397 reg_type = REG_TYPE_ZN;
6398 goto vector_reg;
6399
6400 case AARCH64_OPND_Va:
6401 case AARCH64_OPND_Vd:
6402 case AARCH64_OPND_Vn:
6403 case AARCH64_OPND_Vm:
6404 reg_type = REG_TYPE_VN;
6405 vector_reg:
6406 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6407 if (!reg)
6408 {
6409 first_error (_(get_reg_expected_msg (reg_type)));
6410 goto failure;
6411 }
6412 if (vectype.defined & NTA_HASINDEX)
6413 goto failure;
6414
6415 info->reg.regno = reg->number;
6416 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6417 && vectype.type == NT_invtype)
6418 /* Unqualified Pn and Zn registers are allowed in certain
6419 contexts. Rely on F_STRICT qualifier checking to catch
6420 invalid uses. */
6421 info->qualifier = AARCH64_OPND_QLF_NIL;
6422 else
6423 {
6424 info->qualifier = vectype_to_qualifier (&vectype);
6425 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6426 goto failure;
6427 }
6428 break;
6429
6430 case AARCH64_OPND_VdD1:
6431 case AARCH64_OPND_VnD1:
6432 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6433 if (!reg)
6434 {
6435 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6436 goto failure;
6437 }
6438 if (vectype.type != NT_d || vectype.index != 1)
6439 {
6440 set_fatal_syntax_error
6441 (_("the top half of a 128-bit FP/SIMD register is expected"));
6442 goto failure;
6443 }
6444 info->reg.regno = reg->number;
6445 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6446 here; it is correct for the purpose of encoding/decoding since
6447 only the register number is explicitly encoded in the related
6448 instructions, although this appears a bit hacky. */
6449 info->qualifier = AARCH64_OPND_QLF_S_D;
6450 break;
6451
6452 case AARCH64_OPND_SVE_Zm3_INDEX:
6453 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6454 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6455 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6456 case AARCH64_OPND_SVE_Zm4_INDEX:
6457 case AARCH64_OPND_SVE_Zn_INDEX:
6458 reg_type = REG_TYPE_ZN;
6459 goto vector_reg_index;
6460
6461 case AARCH64_OPND_Ed:
6462 case AARCH64_OPND_En:
6463 case AARCH64_OPND_Em:
6464 case AARCH64_OPND_Em16:
6465 case AARCH64_OPND_SM3_IMM2:
6466 reg_type = REG_TYPE_VN;
6467 vector_reg_index:
6468 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6469 if (!reg)
6470 {
6471 first_error (_(get_reg_expected_msg (reg_type)));
6472 goto failure;
6473 }
6474 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6475 goto failure;
6476
6477 info->reglane.regno = reg->number;
6478 info->reglane.index = vectype.index;
6479 info->qualifier = vectype_to_qualifier (&vectype);
6480 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6481 goto failure;
6482 break;
6483
6484 case AARCH64_OPND_SVE_ZnxN:
6485 case AARCH64_OPND_SVE_ZtxN:
6486 reg_type = REG_TYPE_ZN;
6487 goto vector_reg_list;
6488
6489 case AARCH64_OPND_LVn:
6490 case AARCH64_OPND_LVt:
6491 case AARCH64_OPND_LVt_AL:
6492 case AARCH64_OPND_LEt:
6493 reg_type = REG_TYPE_VN;
6494 vector_reg_list:
6495 if (reg_type == REG_TYPE_ZN
6496 && get_opcode_dependent_value (opcode) == 1
6497 && *str != '{')
6498 {
6499 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6500 if (!reg)
6501 {
6502 first_error (_(get_reg_expected_msg (reg_type)));
6503 goto failure;
6504 }
6505 info->reglist.first_regno = reg->number;
6506 info->reglist.num_regs = 1;
6507 }
6508 else
6509 {
6510 val = parse_vector_reg_list (&str, reg_type, &vectype);
6511 if (val == PARSE_FAIL)
6512 goto failure;
6513
6514 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6515 {
6516 set_fatal_syntax_error (_("invalid register list"));
6517 goto failure;
6518 }
6519
6520 if (vectype.width != 0 && *str != ',')
6521 {
6522 set_fatal_syntax_error
6523 (_("expected element type rather than vector type"));
6524 goto failure;
6525 }
6526
6527 info->reglist.first_regno = (val >> 2) & 0x1f;
6528 info->reglist.num_regs = (val & 0x3) + 1;
6529 }
6530 if (operands[i] == AARCH64_OPND_LEt)
6531 {
6532 if (!(vectype.defined & NTA_HASINDEX))
6533 goto failure;
6534 info->reglist.has_index = 1;
6535 info->reglist.index = vectype.index;
6536 }
6537 else
6538 {
6539 if (vectype.defined & NTA_HASINDEX)
6540 goto failure;
6541 if (!(vectype.defined & NTA_HASTYPE))
6542 {
6543 if (reg_type == REG_TYPE_ZN)
6544 set_fatal_syntax_error (_("missing type suffix"));
6545 goto failure;
6546 }
6547 }
6548 info->qualifier = vectype_to_qualifier (&vectype);
6549 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6550 goto failure;
6551 break;
6552
6553 case AARCH64_OPND_CRn:
6554 case AARCH64_OPND_CRm:
6555 {
6556 char prefix = *(str++);
6557 if (prefix != 'c' && prefix != 'C')
6558 goto failure;
6559
6560 po_imm_nc_or_fail ();
6561 if (val > 15)
6562 {
6563 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6564 goto failure;
6565 }
6566 info->qualifier = AARCH64_OPND_QLF_CR;
6567 info->imm.value = val;
6568 break;
6569 }
6570
6571 case AARCH64_OPND_SHLL_IMM:
6572 case AARCH64_OPND_IMM_VLSR:
6573 po_imm_or_fail (1, 64);
6574 info->imm.value = val;
6575 break;
6576
6577 case AARCH64_OPND_CCMP_IMM:
6578 case AARCH64_OPND_SIMM5:
6579 case AARCH64_OPND_FBITS:
6580 case AARCH64_OPND_TME_UIMM16:
6581 case AARCH64_OPND_UIMM4:
6582 case AARCH64_OPND_UIMM4_ADDG:
6583 case AARCH64_OPND_UIMM10:
6584 case AARCH64_OPND_UIMM3_OP1:
6585 case AARCH64_OPND_UIMM3_OP2:
6586 case AARCH64_OPND_IMM_VLSL:
6587 case AARCH64_OPND_IMM:
6588 case AARCH64_OPND_IMM_2:
6589 case AARCH64_OPND_WIDTH:
6590 case AARCH64_OPND_SVE_INV_LIMM:
6591 case AARCH64_OPND_SVE_LIMM:
6592 case AARCH64_OPND_SVE_LIMM_MOV:
6593 case AARCH64_OPND_SVE_SHLIMM_PRED:
6594 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6595 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6596 case AARCH64_OPND_SVE_SHRIMM_PRED:
6597 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6598 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6599 case AARCH64_OPND_SVE_SIMM5:
6600 case AARCH64_OPND_SVE_SIMM5B:
6601 case AARCH64_OPND_SVE_SIMM6:
6602 case AARCH64_OPND_SVE_SIMM8:
6603 case AARCH64_OPND_SVE_UIMM3:
6604 case AARCH64_OPND_SVE_UIMM7:
6605 case AARCH64_OPND_SVE_UIMM8:
6606 case AARCH64_OPND_SVE_UIMM8_53:
6607 case AARCH64_OPND_IMM_ROT1:
6608 case AARCH64_OPND_IMM_ROT2:
6609 case AARCH64_OPND_IMM_ROT3:
6610 case AARCH64_OPND_SVE_IMM_ROT1:
6611 case AARCH64_OPND_SVE_IMM_ROT2:
6612 case AARCH64_OPND_SVE_IMM_ROT3:
6613 case AARCH64_OPND_CSSC_SIMM8:
6614 case AARCH64_OPND_CSSC_UIMM8:
6615 po_imm_nc_or_fail ();
6616 info->imm.value = val;
6617 break;
6618
6619 case AARCH64_OPND_SVE_AIMM:
6620 case AARCH64_OPND_SVE_ASIMM:
6621 po_imm_nc_or_fail ();
6622 info->imm.value = val;
6623 skip_whitespace (str);
6624 if (skip_past_comma (&str))
6625 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6626 else
6627 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6628 break;
6629
6630 case AARCH64_OPND_SVE_PATTERN:
6631 po_enum_or_fail (aarch64_sve_pattern_array);
6632 info->imm.value = val;
6633 break;
6634
6635 case AARCH64_OPND_SVE_PATTERN_SCALED:
6636 po_enum_or_fail (aarch64_sve_pattern_array);
6637 info->imm.value = val;
6638 if (skip_past_comma (&str)
6639 && !parse_shift (&str, info, SHIFTED_MUL))
6640 goto failure;
6641 if (!info->shifter.operator_present)
6642 {
6643 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6644 info->shifter.kind = AARCH64_MOD_MUL;
6645 info->shifter.amount = 1;
6646 }
6647 break;
6648
6649 case AARCH64_OPND_SVE_PRFOP:
6650 po_enum_or_fail (aarch64_sve_prfop_array);
6651 info->imm.value = val;
6652 break;
6653
6654 case AARCH64_OPND_UIMM7:
6655 po_imm_or_fail (0, 127);
6656 info->imm.value = val;
6657 break;
6658
6659 case AARCH64_OPND_IDX:
6660 case AARCH64_OPND_MASK:
6661 case AARCH64_OPND_BIT_NUM:
6662 case AARCH64_OPND_IMMR:
6663 case AARCH64_OPND_IMMS:
6664 po_imm_or_fail (0, 63);
6665 info->imm.value = val;
6666 break;
6667
6668 case AARCH64_OPND_IMM0:
6669 po_imm_nc_or_fail ();
6670 if (val != 0)
6671 {
6672 set_fatal_syntax_error (_("immediate zero expected"));
6673 goto failure;
6674 }
6675 info->imm.value = 0;
6676 break;
6677
6678 case AARCH64_OPND_FPIMM0:
6679 {
6680 int qfloat;
6681 bool res1 = false, res2 = false;
6682 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6683 it is probably not worth the effort to support it. */
6684 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6685 imm_reg_type))
6686 && (error_p ()
6687 || !(res2 = parse_constant_immediate (&str, &val,
6688 imm_reg_type))))
6689 goto failure;
6690 if ((res1 && qfloat == 0) || (res2 && val == 0))
6691 {
6692 info->imm.value = 0;
6693 info->imm.is_fp = 1;
6694 break;
6695 }
6696 set_fatal_syntax_error (_("immediate zero expected"));
6697 goto failure;
6698 }
6699
6700 case AARCH64_OPND_IMM_MOV:
6701 {
6702 char *saved = str;
6703 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6704 reg_name_p (str, REG_TYPE_VN))
6705 goto failure;
6706 str = saved;
6707 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6708 GE_OPT_PREFIX, REJECT_ABSENT));
6709 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6710 later. fix_mov_imm_insn will try to determine a machine
6711 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6712 message if the immediate cannot be moved by a single
6713 instruction. */
6714 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6715 inst.base.operands[i].skip = 1;
6716 }
6717 break;
6718
6719 case AARCH64_OPND_SIMD_IMM:
6720 case AARCH64_OPND_SIMD_IMM_SFT:
6721 if (! parse_big_immediate (&str, &val, imm_reg_type))
6722 goto failure;
6723 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6724 /* addr_off_p */ 0,
6725 /* need_libopcodes_p */ 1,
6726 /* skip_p */ 1);
6727 /* Parse shift.
6728 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6729 shift, we don't check it here; we leave the checking to
6730 the libopcodes (operand_general_constraint_met_p). By
6731 doing this, we achieve better diagnostics. */
6732 if (skip_past_comma (&str)
6733 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6734 goto failure;
6735 if (!info->shifter.operator_present
6736 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6737 {
6738 /* Default to LSL if not present. Libopcodes prefers shifter
6739 kind to be explicit. */
6740 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6741 info->shifter.kind = AARCH64_MOD_LSL;
6742 }
6743 break;
6744
6745 case AARCH64_OPND_FPIMM:
6746 case AARCH64_OPND_SIMD_FPIMM:
6747 case AARCH64_OPND_SVE_FPIMM8:
6748 {
6749 int qfloat;
6750 bool dp_p;
6751
6752 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6753 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6754 || !aarch64_imm_float_p (qfloat))
6755 {
6756 if (!error_p ())
6757 set_fatal_syntax_error (_("invalid floating-point"
6758 " constant"));
6759 goto failure;
6760 }
6761 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6762 inst.base.operands[i].imm.is_fp = 1;
6763 }
6764 break;
6765
6766 case AARCH64_OPND_SVE_I1_HALF_ONE:
6767 case AARCH64_OPND_SVE_I1_HALF_TWO:
6768 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6769 {
6770 int qfloat;
6771 bool dp_p;
6772
6773 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6774 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6775 {
6776 if (!error_p ())
6777 set_fatal_syntax_error (_("invalid floating-point"
6778 " constant"));
6779 goto failure;
6780 }
6781 inst.base.operands[i].imm.value = qfloat;
6782 inst.base.operands[i].imm.is_fp = 1;
6783 }
6784 break;
6785
6786 case AARCH64_OPND_LIMM:
6787 po_misc_or_fail (parse_shifter_operand (&str, info,
6788 SHIFTED_LOGIC_IMM));
6789 if (info->shifter.operator_present)
6790 {
6791 set_fatal_syntax_error
6792 (_("shift not allowed for bitmask immediate"));
6793 goto failure;
6794 }
6795 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6796 /* addr_off_p */ 0,
6797 /* need_libopcodes_p */ 1,
6798 /* skip_p */ 1);
6799 break;
6800
6801 case AARCH64_OPND_AIMM:
6802 if (opcode->op == OP_ADD)
6803 /* ADD may have relocation types. */
6804 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6805 SHIFTED_ARITH_IMM));
6806 else
6807 po_misc_or_fail (parse_shifter_operand (&str, info,
6808 SHIFTED_ARITH_IMM));
6809 switch (inst.reloc.type)
6810 {
6811 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6812 info->shifter.amount = 12;
6813 break;
6814 case BFD_RELOC_UNUSED:
6815 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6816 if (info->shifter.kind != AARCH64_MOD_NONE)
6817 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6818 inst.reloc.pc_rel = 0;
6819 break;
6820 default:
6821 break;
6822 }
6823 info->imm.value = 0;
6824 if (!info->shifter.operator_present)
6825 {
6826 /* Default to LSL if not present. Libopcodes prefers shifter
6827 kind to be explicit. */
6828 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6829 info->shifter.kind = AARCH64_MOD_LSL;
6830 }
6831 break;
6832
6833 case AARCH64_OPND_HALF:
6834 {
6835 /* #<imm16> or relocation. */
6836 int internal_fixup_p;
6837 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6838 if (internal_fixup_p)
6839 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6840 skip_whitespace (str);
6841 if (skip_past_comma (&str))
6842 {
6843 /* {, LSL #<shift>} */
6844 if (! aarch64_gas_internal_fixup_p ())
6845 {
6846 set_fatal_syntax_error (_("can't mix relocation modifier "
6847 "with explicit shift"));
6848 goto failure;
6849 }
6850 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6851 }
6852 else
6853 inst.base.operands[i].shifter.amount = 0;
6854 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6855 inst.base.operands[i].imm.value = 0;
6856 if (! process_movw_reloc_info ())
6857 goto failure;
6858 }
6859 break;
6860
6861 case AARCH64_OPND_EXCEPTION:
6862 case AARCH64_OPND_UNDEFINED:
6863 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6864 imm_reg_type));
6865 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6866 /* addr_off_p */ 0,
6867 /* need_libopcodes_p */ 0,
6868 /* skip_p */ 1);
6869 break;
6870
6871 case AARCH64_OPND_NZCV:
6872 {
6873 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6874 if (nzcv != NULL)
6875 {
6876 str += 4;
6877 info->imm.value = nzcv->value;
6878 break;
6879 }
6880 po_imm_or_fail (0, 15);
6881 info->imm.value = val;
6882 }
6883 break;
6884
6885 case AARCH64_OPND_COND:
6886 case AARCH64_OPND_COND1:
6887 {
6888 char *start = str;
6889 do
6890 str++;
6891 while (ISALPHA (*str));
6892 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6893 if (info->cond == NULL)
6894 {
6895 set_syntax_error (_("invalid condition"));
6896 goto failure;
6897 }
6898 else if (operands[i] == AARCH64_OPND_COND1
6899 && (info->cond->value & 0xe) == 0xe)
6900 {
6901 /* Do not allow AL or NV. */
6902 set_default_error ();
6903 goto failure;
6904 }
6905 }
6906 break;
6907
6908 case AARCH64_OPND_ADDR_ADRP:
6909 po_misc_or_fail (parse_adrp (&str));
6910 /* Clear the value as operand needs to be relocated. */
6911 info->imm.value = 0;
6912 break;
6913
6914 case AARCH64_OPND_ADDR_PCREL14:
6915 case AARCH64_OPND_ADDR_PCREL19:
6916 case AARCH64_OPND_ADDR_PCREL21:
6917 case AARCH64_OPND_ADDR_PCREL26:
6918 po_misc_or_fail (parse_address (&str, info));
6919 if (!info->addr.pcrel)
6920 {
6921 set_syntax_error (_("invalid pc-relative address"));
6922 goto failure;
6923 }
6924 if (inst.gen_lit_pool
6925 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6926 {
6927 /* Only permit "=value" in the literal load instructions.
6928 The literal will be generated by programmer_friendly_fixup. */
6929 set_syntax_error (_("invalid use of \"=immediate\""));
6930 goto failure;
6931 }
6932 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6933 {
6934 set_syntax_error (_("unrecognized relocation suffix"));
6935 goto failure;
6936 }
6937 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6938 {
6939 info->imm.value = inst.reloc.exp.X_add_number;
6940 inst.reloc.type = BFD_RELOC_UNUSED;
6941 }
6942 else
6943 {
6944 info->imm.value = 0;
6945 if (inst.reloc.type == BFD_RELOC_UNUSED)
6946 switch (opcode->iclass)
6947 {
6948 case compbranch:
6949 case condbranch:
6950 /* e.g. CBZ or B.COND */
6951 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6952 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6953 break;
6954 case testbranch:
6955 /* e.g. TBZ */
6956 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6957 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6958 break;
6959 case branch_imm:
6960 /* e.g. B or BL */
6961 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6962 inst.reloc.type =
6963 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6964 : BFD_RELOC_AARCH64_JUMP26;
6965 break;
6966 case loadlit:
6967 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6968 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6969 break;
6970 case pcreladdr:
6971 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6972 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6973 break;
6974 default:
6975 gas_assert (0);
6976 abort ();
6977 }
6978 inst.reloc.pc_rel = 1;
6979 }
6980 break;
6981
6982 case AARCH64_OPND_ADDR_SIMPLE:
6983 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6984 {
6985 /* [<Xn|SP>{, #<simm>}] */
6986 char *start = str;
6987 /* First use the normal address-parsing routines, to get
6988 the usual syntax errors. */
6989 po_misc_or_fail (parse_address (&str, info));
6990 if (info->addr.pcrel || info->addr.offset.is_reg
6991 || !info->addr.preind || info->addr.postind
6992 || info->addr.writeback)
6993 {
6994 set_syntax_error (_("invalid addressing mode"));
6995 goto failure;
6996 }
6997
6998 /* Then retry, matching the specific syntax of these addresses. */
6999 str = start;
7000 po_char_or_fail ('[');
7001 po_reg_or_fail (REG_TYPE_R64_SP);
7002 /* Accept optional ", #0". */
7003 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7004 && skip_past_char (&str, ','))
7005 {
7006 skip_past_char (&str, '#');
7007 if (! skip_past_char (&str, '0'))
7008 {
7009 set_fatal_syntax_error
7010 (_("the optional immediate offset can only be 0"));
7011 goto failure;
7012 }
7013 }
7014 po_char_or_fail (']');
7015 break;
7016 }
7017
7018 case AARCH64_OPND_ADDR_REGOFF:
7019 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7020 po_misc_or_fail (parse_address (&str, info));
7021 regoff_addr:
7022 if (info->addr.pcrel || !info->addr.offset.is_reg
7023 || !info->addr.preind || info->addr.postind
7024 || info->addr.writeback)
7025 {
7026 set_syntax_error (_("invalid addressing mode"));
7027 goto failure;
7028 }
7029 if (!info->shifter.operator_present)
7030 {
7031 /* Default to LSL if not present. Libopcodes prefers shifter
7032 kind to be explicit. */
7033 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7034 info->shifter.kind = AARCH64_MOD_LSL;
7035 }
7036 /* Qualifier to be deduced by libopcodes. */
7037 break;
7038
7039 case AARCH64_OPND_ADDR_SIMM7:
7040 po_misc_or_fail (parse_address (&str, info));
7041 if (info->addr.pcrel || info->addr.offset.is_reg
7042 || (!info->addr.preind && !info->addr.postind))
7043 {
7044 set_syntax_error (_("invalid addressing mode"));
7045 goto failure;
7046 }
7047 if (inst.reloc.type != BFD_RELOC_UNUSED)
7048 {
7049 set_syntax_error (_("relocation not allowed"));
7050 goto failure;
7051 }
7052 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7053 /* addr_off_p */ 1,
7054 /* need_libopcodes_p */ 1,
7055 /* skip_p */ 0);
7056 break;
7057
7058 case AARCH64_OPND_ADDR_SIMM9:
7059 case AARCH64_OPND_ADDR_SIMM9_2:
7060 case AARCH64_OPND_ADDR_SIMM11:
7061 case AARCH64_OPND_ADDR_SIMM13:
7062 po_misc_or_fail (parse_address (&str, info));
7063 if (info->addr.pcrel || info->addr.offset.is_reg
7064 || (!info->addr.preind && !info->addr.postind)
7065 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7066 && info->addr.writeback))
7067 {
7068 set_syntax_error (_("invalid addressing mode"));
7069 goto failure;
7070 }
7071 if (inst.reloc.type != BFD_RELOC_UNUSED)
7072 {
7073 set_syntax_error (_("relocation not allowed"));
7074 goto failure;
7075 }
7076 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7077 /* addr_off_p */ 1,
7078 /* need_libopcodes_p */ 1,
7079 /* skip_p */ 0);
7080 break;
7081
7082 case AARCH64_OPND_ADDR_SIMM10:
7083 case AARCH64_OPND_ADDR_OFFSET:
7084 po_misc_or_fail (parse_address (&str, info));
7085 if (info->addr.pcrel || info->addr.offset.is_reg
7086 || !info->addr.preind || info->addr.postind)
7087 {
7088 set_syntax_error (_("invalid addressing mode"));
7089 goto failure;
7090 }
7091 if (inst.reloc.type != BFD_RELOC_UNUSED)
7092 {
7093 set_syntax_error (_("relocation not allowed"));
7094 goto failure;
7095 }
7096 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7097 /* addr_off_p */ 1,
7098 /* need_libopcodes_p */ 1,
7099 /* skip_p */ 0);
7100 break;
7101
7102 case AARCH64_OPND_ADDR_UIMM12:
7103 po_misc_or_fail (parse_address (&str, info));
7104 if (info->addr.pcrel || info->addr.offset.is_reg
7105 || !info->addr.preind || info->addr.writeback)
7106 {
7107 set_syntax_error (_("invalid addressing mode"));
7108 goto failure;
7109 }
7110 if (inst.reloc.type == BFD_RELOC_UNUSED)
7111 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7112 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7113 || (inst.reloc.type
7114 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7115 || (inst.reloc.type
7116 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7117 || (inst.reloc.type
7118 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7119 || (inst.reloc.type
7120 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7121 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7122 /* Leave qualifier to be determined by libopcodes. */
7123 break;
7124
7125 case AARCH64_OPND_SIMD_ADDR_POST:
7126 /* [<Xn|SP>], <Xm|#<amount>> */
7127 po_misc_or_fail (parse_address (&str, info));
7128 if (!info->addr.postind || !info->addr.writeback)
7129 {
7130 set_syntax_error (_("invalid addressing mode"));
7131 goto failure;
7132 }
7133 if (!info->addr.offset.is_reg)
7134 {
7135 if (inst.reloc.exp.X_op == O_constant)
7136 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7137 else
7138 {
7139 set_fatal_syntax_error
7140 (_("writeback value must be an immediate constant"));
7141 goto failure;
7142 }
7143 }
7144 /* No qualifier. */
7145 break;
7146
7147 case AARCH64_OPND_SME_SM_ZA:
7148 /* { SM | ZA } */
7149 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7150 {
7151 set_syntax_error (_("unknown or missing PSTATE field name"));
7152 goto failure;
7153 }
7154 info->reg.regno = val;
7155 break;
7156
7157 case AARCH64_OPND_SME_PnT_Wm_imm:
7158 /* <Pn>.<T>[<Wm>, #<imm>] */
7159 {
7160 int index_base_reg;
7161 int imm;
7162 val = parse_sme_pred_reg_with_index (&str,
7163 &index_base_reg,
7164 &imm,
7165 &qualifier);
7166 if (val == PARSE_FAIL)
7167 goto failure;
7168
7169 info->za_tile_vector.regno = val;
7170 info->za_tile_vector.index.regno = index_base_reg;
7171 info->za_tile_vector.index.imm = imm;
7172 info->qualifier = qualifier;
7173 break;
7174 }
7175
7176 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7177 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7178 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7179 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7180 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7181 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7182 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7183 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7184 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7185 case AARCH64_OPND_SVE_ADDR_RI_U6:
7186 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7187 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7188 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7189 /* [X<n>{, #imm, MUL VL}]
7190 [X<n>{, #imm}]
7191 but recognizing SVE registers. */
7192 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7193 &offset_qualifier));
7194 if (base_qualifier != AARCH64_OPND_QLF_X)
7195 {
7196 set_syntax_error (_("invalid addressing mode"));
7197 goto failure;
7198 }
7199 sve_regimm:
7200 if (info->addr.pcrel || info->addr.offset.is_reg
7201 || !info->addr.preind || info->addr.writeback)
7202 {
7203 set_syntax_error (_("invalid addressing mode"));
7204 goto failure;
7205 }
7206 if (inst.reloc.type != BFD_RELOC_UNUSED
7207 || inst.reloc.exp.X_op != O_constant)
7208 {
7209 /* Make sure this has priority over
7210 "invalid addressing mode". */
7211 set_fatal_syntax_error (_("constant offset required"));
7212 goto failure;
7213 }
7214 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7215 break;
7216
7217 case AARCH64_OPND_SVE_ADDR_R:
7218 /* [<Xn|SP>{, <R><m>}]
7219 but recognizing SVE registers. */
7220 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7221 &offset_qualifier));
7222 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7223 {
7224 offset_qualifier = AARCH64_OPND_QLF_X;
7225 info->addr.offset.is_reg = 1;
7226 info->addr.offset.regno = 31;
7227 }
7228 else if (base_qualifier != AARCH64_OPND_QLF_X
7229 || offset_qualifier != AARCH64_OPND_QLF_X)
7230 {
7231 set_syntax_error (_("invalid addressing mode"));
7232 goto failure;
7233 }
7234 goto regoff_addr;
7235
7236 case AARCH64_OPND_SVE_ADDR_RR:
7237 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7238 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7239 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7240 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7241 case AARCH64_OPND_SVE_ADDR_RX:
7242 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7243 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7244 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7245 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7246 but recognizing SVE registers. */
7247 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7248 &offset_qualifier));
7249 if (base_qualifier != AARCH64_OPND_QLF_X
7250 || offset_qualifier != AARCH64_OPND_QLF_X)
7251 {
7252 set_syntax_error (_("invalid addressing mode"));
7253 goto failure;
7254 }
7255 goto regoff_addr;
7256
7257 case AARCH64_OPND_SVE_ADDR_RZ:
7258 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7259 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7260 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7261 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7262 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7263 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7264 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7265 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7266 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7267 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7268 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7269 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7270 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7271 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7272 &offset_qualifier));
7273 if (base_qualifier != AARCH64_OPND_QLF_X
7274 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7275 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7276 {
7277 set_syntax_error (_("invalid addressing mode"));
7278 goto failure;
7279 }
7280 info->qualifier = offset_qualifier;
7281 goto regoff_addr;
7282
7283 case AARCH64_OPND_SVE_ADDR_ZX:
7284 /* [Zn.<T>{, <Xm>}]. */
7285 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7286 &offset_qualifier));
7287 /* Things to check:
7288 base_qualifier either S_S or S_D
7289 offset_qualifier must be X
7290 */
7291 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7292 && base_qualifier != AARCH64_OPND_QLF_S_D)
7293 || offset_qualifier != AARCH64_OPND_QLF_X)
7294 {
7295 set_syntax_error (_("invalid addressing mode"));
7296 goto failure;
7297 }
7298 info->qualifier = base_qualifier;
7299 if (!info->addr.offset.is_reg || info->addr.pcrel
7300 || !info->addr.preind || info->addr.writeback
7301 || info->shifter.operator_present != 0)
7302 {
7303 set_syntax_error (_("invalid addressing mode"));
7304 goto failure;
7305 }
7306 info->shifter.kind = AARCH64_MOD_LSL;
7307 break;
7308
7309
7310 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7311 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7312 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7313 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7314 /* [Z<n>.<T>{, #imm}] */
7315 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7316 &offset_qualifier));
7317 if (base_qualifier != AARCH64_OPND_QLF_S_S
7318 && base_qualifier != AARCH64_OPND_QLF_S_D)
7319 {
7320 set_syntax_error (_("invalid addressing mode"));
7321 goto failure;
7322 }
7323 info->qualifier = base_qualifier;
7324 goto sve_regimm;
7325
7326 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7327 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7328 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7329 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7330 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7331
7332 We don't reject:
7333
7334 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7335
7336 here since we get better error messages by leaving it to
7337 the qualifier checking routines. */
7338 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7339 &offset_qualifier));
7340 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7341 && base_qualifier != AARCH64_OPND_QLF_S_D)
7342 || offset_qualifier != base_qualifier)
7343 {
7344 set_syntax_error (_("invalid addressing mode"));
7345 goto failure;
7346 }
7347 info->qualifier = base_qualifier;
7348 goto regoff_addr;
7349
7350 case AARCH64_OPND_SYSREG:
7351 {
7352 uint32_t sysreg_flags;
7353 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7354 &sysreg_flags)) == PARSE_FAIL)
7355 {
7356 set_syntax_error (_("unknown or missing system register name"));
7357 goto failure;
7358 }
7359 inst.base.operands[i].sysreg.value = val;
7360 inst.base.operands[i].sysreg.flags = sysreg_flags;
7361 break;
7362 }
7363
7364 case AARCH64_OPND_PSTATEFIELD:
7365 {
7366 uint32_t sysreg_flags;
7367 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7368 &sysreg_flags)) == PARSE_FAIL)
7369 {
7370 set_syntax_error (_("unknown or missing PSTATE field name"));
7371 goto failure;
7372 }
7373 inst.base.operands[i].pstatefield = val;
7374 inst.base.operands[i].sysreg.flags = sysreg_flags;
7375 break;
7376 }
7377
7378 case AARCH64_OPND_SYSREG_IC:
7379 inst.base.operands[i].sysins_op =
7380 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7381 goto sys_reg_ins;
7382
7383 case AARCH64_OPND_SYSREG_DC:
7384 inst.base.operands[i].sysins_op =
7385 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7386 goto sys_reg_ins;
7387
7388 case AARCH64_OPND_SYSREG_AT:
7389 inst.base.operands[i].sysins_op =
7390 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7391 goto sys_reg_ins;
7392
7393 case AARCH64_OPND_SYSREG_SR:
7394 inst.base.operands[i].sysins_op =
7395 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7396 goto sys_reg_ins;
7397
7398 case AARCH64_OPND_SYSREG_TLBI:
7399 inst.base.operands[i].sysins_op =
7400 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7401 sys_reg_ins:
7402 if (inst.base.operands[i].sysins_op == NULL)
7403 {
7404 set_fatal_syntax_error ( _("unknown or missing operation name"));
7405 goto failure;
7406 }
7407 break;
7408
7409 case AARCH64_OPND_BARRIER:
7410 case AARCH64_OPND_BARRIER_ISB:
7411 val = parse_barrier (&str);
7412 if (val != PARSE_FAIL
7413 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7414 {
7415 /* ISB only accepts options name 'sy'. */
7416 set_syntax_error
7417 (_("the specified option is not accepted in ISB"));
7418 /* Turn off backtrack as this optional operand is present. */
7419 backtrack_pos = 0;
7420 goto failure;
7421 }
7422 if (val != PARSE_FAIL
7423 && operands[i] == AARCH64_OPND_BARRIER)
7424 {
7425 /* Regular barriers accept options CRm (C0-C15).
7426 DSB nXS barrier variant accepts values > 15. */
7427 if (val < 0 || val > 15)
7428 {
7429 set_syntax_error (_("the specified option is not accepted in DSB"));
7430 goto failure;
7431 }
7432 }
7433 /* This is an extension to accept a 0..15 immediate. */
7434 if (val == PARSE_FAIL)
7435 po_imm_or_fail (0, 15);
7436 info->barrier = aarch64_barrier_options + val;
7437 break;
7438
7439 case AARCH64_OPND_BARRIER_DSB_NXS:
7440 val = parse_barrier (&str);
7441 if (val != PARSE_FAIL)
7442 {
7443 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7444 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7445 {
7446 set_syntax_error (_("the specified option is not accepted in DSB"));
7447 /* Turn off backtrack as this optional operand is present. */
7448 backtrack_pos = 0;
7449 goto failure;
7450 }
7451 }
7452 else
7453 {
7454 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7455 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7456 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7457 goto failure;
7458 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7459 {
7460 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7461 goto failure;
7462 }
7463 }
7464 /* Option index is encoded as 2-bit value in val<3:2>. */
7465 val = (val >> 2) - 4;
7466 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7467 break;
7468
7469 case AARCH64_OPND_PRFOP:
7470 val = parse_pldop (&str);
7471 /* This is an extension to accept a 0..31 immediate. */
7472 if (val == PARSE_FAIL)
7473 po_imm_or_fail (0, 31);
7474 inst.base.operands[i].prfop = aarch64_prfops + val;
7475 break;
7476
7477 case AARCH64_OPND_BARRIER_PSB:
7478 val = parse_barrier_psb (&str, &(info->hint_option));
7479 if (val == PARSE_FAIL)
7480 goto failure;
7481 break;
7482
7483 case AARCH64_OPND_BTI_TARGET:
7484 val = parse_bti_operand (&str, &(info->hint_option));
7485 if (val == PARSE_FAIL)
7486 goto failure;
7487 break;
7488
7489 case AARCH64_OPND_SME_ZAda_2b:
7490 case AARCH64_OPND_SME_ZAda_3b:
7491 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier);
7492 if (!reg)
7493 goto failure;
7494 info->reg.regno = reg->number;
7495 info->qualifier = qualifier;
7496 break;
7497
7498 case AARCH64_OPND_SME_ZA_HV_idx_src:
7499 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7500 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7501 {
7502 enum sme_hv_slice slice_indicator;
7503 int vector_select_register;
7504 int imm;
7505
7506 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7507 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7508 &slice_indicator,
7509 &vector_select_register,
7510 &imm,
7511 &qualifier);
7512 else
7513 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7514 &vector_select_register,
7515 &imm,
7516 &qualifier);
7517 if (val == PARSE_FAIL)
7518 goto failure;
7519 info->za_tile_vector.regno = val;
7520 info->za_tile_vector.index.regno = vector_select_register;
7521 info->za_tile_vector.index.imm = imm;
7522 info->za_tile_vector.v = slice_indicator;
7523 info->qualifier = qualifier;
7524 break;
7525 }
7526
7527 case AARCH64_OPND_SME_list_of_64bit_tiles:
7528 val = parse_sme_list_of_64bit_tiles (&str);
7529 if (val == PARSE_FAIL)
7530 goto failure;
7531 info->imm.value = val;
7532 break;
7533
7534 case AARCH64_OPND_SME_ZA_array:
7535 {
7536 int imm;
7537 val = parse_sme_za_array (&str, &imm);
7538 if (val == PARSE_FAIL)
7539 goto failure;
7540 info->za_tile_vector.index.regno = val;
7541 info->za_tile_vector.index.imm = imm;
7542 break;
7543 }
7544
7545 case AARCH64_OPND_MOPS_ADDR_Rd:
7546 case AARCH64_OPND_MOPS_ADDR_Rs:
7547 po_char_or_fail ('[');
7548 if (!parse_x0_to_x30 (&str, info))
7549 goto failure;
7550 po_char_or_fail (']');
7551 po_char_or_fail ('!');
7552 break;
7553
7554 case AARCH64_OPND_MOPS_WB_Rn:
7555 if (!parse_x0_to_x30 (&str, info))
7556 goto failure;
7557 po_char_or_fail ('!');
7558 break;
7559
7560 default:
7561 as_fatal (_("unhandled operand code %d"), operands[i]);
7562 }
7563
7564 /* If we get here, this operand was successfully parsed. */
7565 inst.base.operands[i].present = 1;
7566 continue;
7567
7568 failure:
7569 /* The parse routine should already have set the error, but in case
7570 not, set a default one here. */
7571 if (! error_p ())
7572 set_default_error ();
7573
7574 if (! backtrack_pos)
7575 goto parse_operands_return;
7576
7577 {
7578 /* We reach here because this operand is marked as optional, and
7579 either no operand was supplied or the operand was supplied but it
7580 was syntactically incorrect. In the latter case we report an
7581 error. In the former case we perform a few more checks before
7582 dropping through to the code to insert the default operand. */
7583
7584 char *tmp = backtrack_pos;
7585 char endchar = END_OF_INSN;
7586
7587 if (i != (aarch64_num_of_operands (opcode) - 1))
7588 endchar = ',';
7589 skip_past_char (&tmp, ',');
7590
7591 if (*tmp != endchar)
7592 /* The user has supplied an operand in the wrong format. */
7593 goto parse_operands_return;
7594
7595 /* Make sure there is not a comma before the optional operand.
7596 For example the fifth operand of 'sys' is optional:
7597
7598 sys #0,c0,c0,#0, <--- wrong
7599 sys #0,c0,c0,#0 <--- correct. */
7600 if (comma_skipped_p && i && endchar == END_OF_INSN)
7601 {
7602 set_fatal_syntax_error
7603 (_("unexpected comma before the omitted optional operand"));
7604 goto parse_operands_return;
7605 }
7606 }
7607
7608 /* Reaching here means we are dealing with an optional operand that is
7609 omitted from the assembly line. */
7610 gas_assert (optional_operand_p (opcode, i));
7611 info->present = 0;
7612 process_omitted_operand (operands[i], opcode, i, info);
7613
7614 /* Try again, skipping the optional operand at backtrack_pos. */
7615 str = backtrack_pos;
7616 backtrack_pos = 0;
7617
7618 /* Clear any error record after the omitted optional operand has been
7619 successfully handled. */
7620 clear_error ();
7621 }
7622
7623 /* Check if we have parsed all the operands. */
7624 if (*str != '\0' && ! error_p ())
7625 {
7626 /* Set I to the index of the last present operand; this is
7627 for the purpose of diagnostics. */
7628 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7629 ;
7630 set_fatal_syntax_error
7631 (_("unexpected characters following instruction"));
7632 }
7633
7634 parse_operands_return:
7635
7636 if (error_p ())
7637 {
7638 inst.parsing_error.index = i;
7639 DEBUG_TRACE ("parsing FAIL: %s - %s",
7640 operand_mismatch_kind_names[inst.parsing_error.kind],
7641 inst.parsing_error.error);
7642 /* Record the operand error properly; this is useful when there
7643 are multiple instruction templates for a mnemonic name, so that
7644 later on, we can select the error that most closely describes
7645 the problem. */
7646 record_operand_error_info (opcode, &inst.parsing_error);
7647 return false;
7648 }
7649 else
7650 {
7651 DEBUG_TRACE ("parsing SUCCESS");
7652 return true;
7653 }
7654 }
7655
7656 /* It does some fix-up to provide some programmer friendly feature while
7657 keeping the libopcodes happy, i.e. libopcodes only accepts
7658 the preferred architectural syntax.
7659 Return FALSE if there is any failure; otherwise return TRUE. */
7660
7661 static bool
7662 programmer_friendly_fixup (aarch64_instruction *instr)
7663 {
7664 aarch64_inst *base = &instr->base;
7665 const aarch64_opcode *opcode = base->opcode;
7666 enum aarch64_op op = opcode->op;
7667 aarch64_opnd_info *operands = base->operands;
7668
7669 DEBUG_TRACE ("enter");
7670
7671 switch (opcode->iclass)
7672 {
7673 case testbranch:
7674 /* TBNZ Xn|Wn, #uimm6, label
7675 Test and Branch Not Zero: conditionally jumps to label if bit number
7676 uimm6 in register Xn is not zero. The bit number implies the width of
7677 the register, which may be written and should be disassembled as Wn if
7678 uimm is less than 32. */
7679 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7680 {
7681 if (operands[1].imm.value >= 32)
7682 {
7683 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7684 0, 31);
7685 return false;
7686 }
7687 operands[0].qualifier = AARCH64_OPND_QLF_X;
7688 }
7689 break;
7690 case loadlit:
7691 /* LDR Wt, label | =value
7692 As a convenience assemblers will typically permit the notation
7693 "=value" in conjunction with the pc-relative literal load instructions
7694 to automatically place an immediate value or symbolic address in a
7695 nearby literal pool and generate a hidden label which references it.
7696 ISREG has been set to 0 in the case of =value. */
7697 if (instr->gen_lit_pool
7698 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7699 {
7700 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7701 if (op == OP_LDRSW_LIT)
7702 size = 4;
7703 if (instr->reloc.exp.X_op != O_constant
7704 && instr->reloc.exp.X_op != O_big
7705 && instr->reloc.exp.X_op != O_symbol)
7706 {
7707 record_operand_error (opcode, 1,
7708 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7709 _("constant expression expected"));
7710 return false;
7711 }
7712 if (! add_to_lit_pool (&instr->reloc.exp, size))
7713 {
7714 record_operand_error (opcode, 1,
7715 AARCH64_OPDE_OTHER_ERROR,
7716 _("literal pool insertion failed"));
7717 return false;
7718 }
7719 }
7720 break;
7721 case log_shift:
7722 case bitfield:
7723 /* UXT[BHW] Wd, Wn
7724 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7725 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7726 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7727 A programmer-friendly assembler should accept a destination Xd in
7728 place of Wd, however that is not the preferred form for disassembly.
7729 */
7730 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7731 && operands[1].qualifier == AARCH64_OPND_QLF_W
7732 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7733 operands[0].qualifier = AARCH64_OPND_QLF_W;
7734 break;
7735
7736 case addsub_ext:
7737 {
7738 /* In the 64-bit form, the final register operand is written as Wm
7739 for all but the (possibly omitted) UXTX/LSL and SXTX
7740 operators.
7741 As a programmer-friendly assembler, we accept e.g.
7742 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7743 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7744 int idx = aarch64_operand_index (opcode->operands,
7745 AARCH64_OPND_Rm_EXT);
7746 gas_assert (idx == 1 || idx == 2);
7747 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7748 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7749 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7750 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7751 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7752 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7753 }
7754 break;
7755
7756 default:
7757 break;
7758 }
7759
7760 DEBUG_TRACE ("exit with SUCCESS");
7761 return true;
7762 }
7763
7764 /* Check for loads and stores that will cause unpredictable behavior. */
7765
7766 static void
7767 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7768 {
7769 aarch64_inst *base = &instr->base;
7770 const aarch64_opcode *opcode = base->opcode;
7771 const aarch64_opnd_info *opnds = base->operands;
7772 switch (opcode->iclass)
7773 {
7774 case ldst_pos:
7775 case ldst_imm9:
7776 case ldst_imm10:
7777 case ldst_unscaled:
7778 case ldst_unpriv:
7779 /* Loading/storing the base register is unpredictable if writeback. */
7780 if ((aarch64_get_operand_class (opnds[0].type)
7781 == AARCH64_OPND_CLASS_INT_REG)
7782 && opnds[0].reg.regno == opnds[1].addr.base_regno
7783 && opnds[1].addr.base_regno != REG_SP
7784 /* Exempt STG/STZG/ST2G/STZ2G. */
7785 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7786 && opnds[1].addr.writeback)
7787 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7788 break;
7789
7790 case ldstpair_off:
7791 case ldstnapair_offs:
7792 case ldstpair_indexed:
7793 /* Loading/storing the base register is unpredictable if writeback. */
7794 if ((aarch64_get_operand_class (opnds[0].type)
7795 == AARCH64_OPND_CLASS_INT_REG)
7796 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7797 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7798 && opnds[2].addr.base_regno != REG_SP
7799 /* Exempt STGP. */
7800 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7801 && opnds[2].addr.writeback)
7802 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7803 /* Load operations must load different registers. */
7804 if ((opcode->opcode & (1 << 22))
7805 && opnds[0].reg.regno == opnds[1].reg.regno)
7806 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7807 break;
7808
7809 case ldstexcl:
7810 if ((aarch64_get_operand_class (opnds[0].type)
7811 == AARCH64_OPND_CLASS_INT_REG)
7812 && (aarch64_get_operand_class (opnds[1].type)
7813 == AARCH64_OPND_CLASS_INT_REG))
7814 {
7815 if ((opcode->opcode & (1 << 22)))
7816 {
7817 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7818 if ((opcode->opcode & (1 << 21))
7819 && opnds[0].reg.regno == opnds[1].reg.regno)
7820 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7821 }
7822 else
7823 {
7824 /* Store-Exclusive is unpredictable if Rt == Rs. */
7825 if (opnds[0].reg.regno == opnds[1].reg.regno)
7826 as_warn
7827 (_("unpredictable: identical transfer and status registers"
7828 " --`%s'"),str);
7829
7830 if (opnds[0].reg.regno == opnds[2].reg.regno)
7831 {
7832 if (!(opcode->opcode & (1 << 21)))
7833 /* Store-Exclusive is unpredictable if Rn == Rs. */
7834 as_warn
7835 (_("unpredictable: identical base and status registers"
7836 " --`%s'"),str);
7837 else
7838 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7839 as_warn
7840 (_("unpredictable: "
7841 "identical transfer and status registers"
7842 " --`%s'"),str);
7843 }
7844
7845 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7846 if ((opcode->opcode & (1 << 21))
7847 && opnds[0].reg.regno == opnds[3].reg.regno
7848 && opnds[3].reg.regno != REG_SP)
7849 as_warn (_("unpredictable: identical base and status registers"
7850 " --`%s'"),str);
7851 }
7852 }
7853 break;
7854
7855 default:
7856 break;
7857 }
7858 }
7859
7860 static void
7861 force_automatic_sequence_close (void)
7862 {
7863 struct aarch64_segment_info_type *tc_seg_info;
7864
7865 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7866 if (tc_seg_info->insn_sequence.instr)
7867 {
7868 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7869 _("previous `%s' sequence has not been closed"),
7870 tc_seg_info->insn_sequence.instr->opcode->name);
7871 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7872 }
7873 }
7874
7875 /* A wrapper function to interface with libopcodes on encoding and
7876 record the error message if there is any.
7877
7878 Return TRUE on success; otherwise return FALSE. */
7879
7880 static bool
7881 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7882 aarch64_insn *code)
7883 {
7884 aarch64_operand_error error_info;
7885 memset (&error_info, '\0', sizeof (error_info));
7886 error_info.kind = AARCH64_OPDE_NIL;
7887 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7888 && !error_info.non_fatal)
7889 return true;
7890
7891 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7892 record_operand_error_info (opcode, &error_info);
7893 return error_info.non_fatal;
7894 }
7895
7896 #ifdef DEBUG_AARCH64
7897 static inline void
7898 dump_opcode_operands (const aarch64_opcode *opcode)
7899 {
7900 int i = 0;
7901 while (opcode->operands[i] != AARCH64_OPND_NIL)
7902 {
7903 aarch64_verbose ("\t\t opnd%d: %s", i,
7904 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7905 ? aarch64_get_operand_name (opcode->operands[i])
7906 : aarch64_get_operand_desc (opcode->operands[i]));
7907 ++i;
7908 }
7909 }
7910 #endif /* DEBUG_AARCH64 */
7911
7912 /* This is the guts of the machine-dependent assembler. STR points to a
7913 machine dependent instruction. This function is supposed to emit
7914 the frags/bytes it assembles to. */
7915
7916 void
7917 md_assemble (char *str)
7918 {
7919 templates *template;
7920 const aarch64_opcode *opcode;
7921 struct aarch64_segment_info_type *tc_seg_info;
7922 aarch64_inst *inst_base;
7923 unsigned saved_cond;
7924
7925 /* Align the previous label if needed. */
7926 if (last_label_seen != NULL)
7927 {
7928 symbol_set_frag (last_label_seen, frag_now);
7929 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7930 S_SET_SEGMENT (last_label_seen, now_seg);
7931 }
7932
7933 /* Update the current insn_sequence from the segment. */
7934 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7935 insn_sequence = &tc_seg_info->insn_sequence;
7936 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7937
7938 inst.reloc.type = BFD_RELOC_UNUSED;
7939
7940 DEBUG_TRACE ("\n\n");
7941 DEBUG_TRACE ("==============================");
7942 DEBUG_TRACE ("Enter md_assemble with %s", str);
7943
7944 /* Scan up to the end of the mnemonic, which must end in whitespace,
7945 '.', or end of string. */
7946 char *p = str;
7947 char *dot = 0;
7948 for (; is_part_of_name (*p); p++)
7949 if (*p == '.' && !dot)
7950 dot = p;
7951
7952 if (p == str)
7953 {
7954 as_bad (_("unknown mnemonic -- `%s'"), str);
7955 return;
7956 }
7957
7958 if (!dot && create_register_alias (str, p))
7959 return;
7960
7961 template = opcode_lookup (str, dot, p);
7962 if (!template)
7963 {
7964 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7965 str);
7966 return;
7967 }
7968
7969 skip_whitespace (p);
7970 if (*p == ',')
7971 {
7972 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7973 get_mnemonic_name (str), str);
7974 return;
7975 }
7976
7977 init_operand_error_report ();
7978
7979 /* Sections are assumed to start aligned. In executable section, there is no
7980 MAP_DATA symbol pending. So we only align the address during
7981 MAP_DATA --> MAP_INSN transition.
7982 For other sections, this is not guaranteed. */
7983 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7984 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7985 frag_align_code (2, 0);
7986
7987 saved_cond = inst.cond;
7988 reset_aarch64_instruction (&inst);
7989 inst.cond = saved_cond;
7990
7991 /* Iterate through all opcode entries with the same mnemonic name. */
7992 do
7993 {
7994 opcode = template->opcode;
7995
7996 DEBUG_TRACE ("opcode %s found", opcode->name);
7997 #ifdef DEBUG_AARCH64
7998 if (debug_dump)
7999 dump_opcode_operands (opcode);
8000 #endif /* DEBUG_AARCH64 */
8001
8002 mapping_state (MAP_INSN);
8003
8004 inst_base = &inst.base;
8005 inst_base->opcode = opcode;
8006
8007 /* Truly conditionally executed instructions, e.g. b.cond. */
8008 if (opcode->flags & F_COND)
8009 {
8010 gas_assert (inst.cond != COND_ALWAYS);
8011 inst_base->cond = get_cond_from_value (inst.cond);
8012 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8013 }
8014 else if (inst.cond != COND_ALWAYS)
8015 {
8016 /* It shouldn't arrive here, where the assembly looks like a
8017 conditional instruction but the found opcode is unconditional. */
8018 gas_assert (0);
8019 continue;
8020 }
8021
8022 if (parse_operands (p, opcode)
8023 && programmer_friendly_fixup (&inst)
8024 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8025 {
8026 /* Check that this instruction is supported for this CPU. */
8027 if (!opcode->avariant
8028 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8029 {
8030 as_bad (_("selected processor does not support `%s'"), str);
8031 return;
8032 }
8033
8034 warn_unpredictable_ldst (&inst, str);
8035
8036 if (inst.reloc.type == BFD_RELOC_UNUSED
8037 || !inst.reloc.need_libopcodes_p)
8038 output_inst (NULL);
8039 else
8040 {
8041 /* If there is relocation generated for the instruction,
8042 store the instruction information for the future fix-up. */
8043 struct aarch64_inst *copy;
8044 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8045 copy = XNEW (struct aarch64_inst);
8046 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8047 output_inst (copy);
8048 }
8049
8050 /* Issue non-fatal messages if any. */
8051 output_operand_error_report (str, true);
8052 return;
8053 }
8054
8055 template = template->next;
8056 if (template != NULL)
8057 {
8058 reset_aarch64_instruction (&inst);
8059 inst.cond = saved_cond;
8060 }
8061 }
8062 while (template != NULL);
8063
8064 /* Issue the error messages if any. */
8065 output_operand_error_report (str, false);
8066 }
8067
8068 /* Various frobbings of labels and their addresses. */
8069
8070 void
8071 aarch64_start_line_hook (void)
8072 {
8073 last_label_seen = NULL;
8074 }
8075
8076 void
8077 aarch64_frob_label (symbolS * sym)
8078 {
8079 last_label_seen = sym;
8080
8081 dwarf2_emit_label (sym);
8082 }
8083
8084 void
8085 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8086 {
8087 /* Check to see if we have a block to close. */
8088 force_automatic_sequence_close ();
8089 }
8090
8091 int
8092 aarch64_data_in_code (void)
8093 {
8094 if (startswith (input_line_pointer + 1, "data:"))
8095 {
8096 *input_line_pointer = '/';
8097 input_line_pointer += 5;
8098 *input_line_pointer = 0;
8099 return 1;
8100 }
8101
8102 return 0;
8103 }
8104
8105 char *
8106 aarch64_canonicalize_symbol_name (char *name)
8107 {
8108 int len;
8109
8110 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8111 *(name + len - 5) = 0;
8112
8113 return name;
8114 }
8115 \f
8116 /* Table of all register names defined by default. The user can
8117 define additional names with .req. Note that all register names
8118 should appear in both upper and lowercase variants. Some registers
8119 also have mixed-case names. */
8120
8121 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8122 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8123 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8124 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8125 #define REGSET16(p,t) \
8126 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8127 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8128 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8129 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8130 #define REGSET16S(p,s,t) \
8131 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8132 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8133 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8134 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8135 #define REGSET31(p,t) \
8136 REGSET16(p, t), \
8137 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8138 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8139 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8140 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8141 #define REGSET(p,t) \
8142 REGSET31(p,t), REGNUM(p,31,t)
8143
8144 /* These go into aarch64_reg_hsh hash-table. */
8145 static const reg_entry reg_names[] = {
8146 /* Integer registers. */
8147 REGSET31 (x, R_64), REGSET31 (X, R_64),
8148 REGSET31 (w, R_32), REGSET31 (W, R_32),
8149
8150 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8151 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8152 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8153 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8154 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8155 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8156
8157 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8158 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8159
8160 /* Floating-point single precision registers. */
8161 REGSET (s, FP_S), REGSET (S, FP_S),
8162
8163 /* Floating-point double precision registers. */
8164 REGSET (d, FP_D), REGSET (D, FP_D),
8165
8166 /* Floating-point half precision registers. */
8167 REGSET (h, FP_H), REGSET (H, FP_H),
8168
8169 /* Floating-point byte precision registers. */
8170 REGSET (b, FP_B), REGSET (B, FP_B),
8171
8172 /* Floating-point quad precision registers. */
8173 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8174
8175 /* FP/SIMD registers. */
8176 REGSET (v, VN), REGSET (V, VN),
8177
8178 /* SVE vector registers. */
8179 REGSET (z, ZN), REGSET (Z, ZN),
8180
8181 /* SVE predicate registers. */
8182 REGSET16 (p, PN), REGSET16 (P, PN),
8183
8184 /* SME ZA tile registers. */
8185 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8186
8187 /* SME ZA tile registers (horizontal slice). */
8188 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8189
8190 /* SME ZA tile registers (vertical slice). */
8191 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8192 };
8193
8194 #undef REGDEF
8195 #undef REGDEF_ALIAS
8196 #undef REGNUM
8197 #undef REGSET16
8198 #undef REGSET31
8199 #undef REGSET
8200
8201 #define N 1
8202 #define n 0
8203 #define Z 1
8204 #define z 0
8205 #define C 1
8206 #define c 0
8207 #define V 1
8208 #define v 0
8209 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8210 static const asm_nzcv nzcv_names[] = {
8211 {"nzcv", B (n, z, c, v)},
8212 {"nzcV", B (n, z, c, V)},
8213 {"nzCv", B (n, z, C, v)},
8214 {"nzCV", B (n, z, C, V)},
8215 {"nZcv", B (n, Z, c, v)},
8216 {"nZcV", B (n, Z, c, V)},
8217 {"nZCv", B (n, Z, C, v)},
8218 {"nZCV", B (n, Z, C, V)},
8219 {"Nzcv", B (N, z, c, v)},
8220 {"NzcV", B (N, z, c, V)},
8221 {"NzCv", B (N, z, C, v)},
8222 {"NzCV", B (N, z, C, V)},
8223 {"NZcv", B (N, Z, c, v)},
8224 {"NZcV", B (N, Z, c, V)},
8225 {"NZCv", B (N, Z, C, v)},
8226 {"NZCV", B (N, Z, C, V)}
8227 };
8228
8229 #undef N
8230 #undef n
8231 #undef Z
8232 #undef z
8233 #undef C
8234 #undef c
8235 #undef V
8236 #undef v
8237 #undef B
8238 \f
8239 /* MD interface: bits in the object file. */
8240
8241 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8242 for use in the a.out file, and stores them in the array pointed to by buf.
8243 This knows about the endian-ness of the target machine and does
8244 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8245 2 (short) and 4 (long) Floating numbers are put out as a series of
8246 LITTLENUMS (shorts, here at least). */
8247
8248 void
8249 md_number_to_chars (char *buf, valueT val, int n)
8250 {
8251 if (target_big_endian)
8252 number_to_chars_bigendian (buf, val, n);
8253 else
8254 number_to_chars_littleendian (buf, val, n);
8255 }
8256
8257 /* MD interface: Sections. */
8258
8259 /* Estimate the size of a frag before relaxing. Assume everything fits in
8260 4 bytes. */
8261
8262 int
8263 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8264 {
8265 fragp->fr_var = 4;
8266 return 4;
8267 }
8268
8269 /* Round up a section size to the appropriate boundary. */
8270
8271 valueT
8272 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8273 {
8274 return size;
8275 }
8276
8277 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8278 of an rs_align_code fragment.
8279
8280 Here we fill the frag with the appropriate info for padding the
8281 output stream. The resulting frag will consist of a fixed (fr_fix)
8282 and of a repeating (fr_var) part.
8283
8284 The fixed content is always emitted before the repeating content and
8285 these two parts are used as follows in constructing the output:
8286 - the fixed part will be used to align to a valid instruction word
8287 boundary, in case that we start at a misaligned address; as no
8288 executable instruction can live at the misaligned location, we
8289 simply fill with zeros;
8290 - the variable part will be used to cover the remaining padding and
8291 we fill using the AArch64 NOP instruction.
8292
8293 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8294 enough storage space for up to 3 bytes for padding the back to a valid
8295 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8296
8297 void
8298 aarch64_handle_align (fragS * fragP)
8299 {
8300 /* NOP = d503201f */
8301 /* AArch64 instructions are always little-endian. */
8302 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8303
8304 int bytes, fix, noop_size;
8305 char *p;
8306
8307 if (fragP->fr_type != rs_align_code)
8308 return;
8309
8310 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8311 p = fragP->fr_literal + fragP->fr_fix;
8312
8313 #ifdef OBJ_ELF
8314 gas_assert (fragP->tc_frag_data.recorded);
8315 #endif
8316
8317 noop_size = sizeof (aarch64_noop);
8318
8319 fix = bytes & (noop_size - 1);
8320 if (fix)
8321 {
8322 #if defined OBJ_ELF || defined OBJ_COFF
8323 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8324 #endif
8325 memset (p, 0, fix);
8326 p += fix;
8327 fragP->fr_fix += fix;
8328 }
8329
8330 if (noop_size)
8331 memcpy (p, aarch64_noop, noop_size);
8332 fragP->fr_var = noop_size;
8333 }
8334
8335 /* Perform target specific initialisation of a frag.
8336 Note - despite the name this initialisation is not done when the frag
8337 is created, but only when its type is assigned. A frag can be created
8338 and used a long time before its type is set, so beware of assuming that
8339 this initialisation is performed first. */
8340
8341 #ifndef OBJ_ELF
8342 void
8343 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8344 int max_chars ATTRIBUTE_UNUSED)
8345 {
8346 }
8347
8348 #else /* OBJ_ELF is defined. */
8349 void
8350 aarch64_init_frag (fragS * fragP, int max_chars)
8351 {
8352 /* Record a mapping symbol for alignment frags. We will delete this
8353 later if the alignment ends up empty. */
8354 if (!fragP->tc_frag_data.recorded)
8355 fragP->tc_frag_data.recorded = 1;
8356
8357 /* PR 21809: Do not set a mapping state for debug sections
8358 - it just confuses other tools. */
8359 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8360 return;
8361
8362 switch (fragP->fr_type)
8363 {
8364 case rs_align_test:
8365 case rs_fill:
8366 mapping_state_2 (MAP_DATA, max_chars);
8367 break;
8368 case rs_align:
8369 /* PR 20364: We can get alignment frags in code sections,
8370 so do not just assume that we should use the MAP_DATA state. */
8371 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8372 break;
8373 case rs_align_code:
8374 mapping_state_2 (MAP_INSN, max_chars);
8375 break;
8376 default:
8377 break;
8378 }
8379 }
8380
8381 /* Whether SFrame stack trace info is supported. */
8382
8383 bool
8384 aarch64_support_sframe_p (void)
8385 {
8386 /* At this time, SFrame is supported for aarch64 only. */
8387 return (aarch64_abi == AARCH64_ABI_LP64);
8388 }
8389
8390 /* Specify if RA tracking is needed. */
8391
8392 bool
8393 aarch64_sframe_ra_tracking_p (void)
8394 {
8395 return true;
8396 }
8397
8398 /* Specify the fixed offset to recover RA from CFA.
8399 (useful only when RA tracking is not needed). */
8400
8401 offsetT
8402 aarch64_sframe_cfa_ra_offset (void)
8403 {
8404 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8405 }
8406
8407 /* Get the abi/arch indentifier for SFrame. */
8408
8409 unsigned char
8410 aarch64_sframe_get_abi_arch (void)
8411 {
8412 unsigned char sframe_abi_arch = 0;
8413
8414 if (aarch64_support_sframe_p ())
8415 {
8416 sframe_abi_arch = target_big_endian
8417 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8418 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8419 }
8420
8421 return sframe_abi_arch;
8422 }
8423
8424 #endif /* OBJ_ELF */
8425 \f
8426 /* Initialize the DWARF-2 unwind information for this procedure. */
8427
8428 void
8429 tc_aarch64_frame_initial_instructions (void)
8430 {
8431 cfi_add_CFA_def_cfa (REG_SP, 0);
8432 }
8433
8434 /* Convert REGNAME to a DWARF-2 register number. */
8435
8436 int
8437 tc_aarch64_regname_to_dw2regnum (char *regname)
8438 {
8439 const reg_entry *reg = parse_reg (&regname);
8440 if (reg == NULL)
8441 return -1;
8442
8443 switch (reg->type)
8444 {
8445 case REG_TYPE_SP_32:
8446 case REG_TYPE_SP_64:
8447 case REG_TYPE_R_32:
8448 case REG_TYPE_R_64:
8449 return reg->number;
8450
8451 case REG_TYPE_FP_B:
8452 case REG_TYPE_FP_H:
8453 case REG_TYPE_FP_S:
8454 case REG_TYPE_FP_D:
8455 case REG_TYPE_FP_Q:
8456 return reg->number + 64;
8457
8458 default:
8459 break;
8460 }
8461 return -1;
8462 }
8463
8464 /* Implement DWARF2_ADDR_SIZE. */
8465
8466 int
8467 aarch64_dwarf2_addr_size (void)
8468 {
8469 if (ilp32_p)
8470 return 4;
8471 else if (llp64_p)
8472 return 8;
8473 return bfd_arch_bits_per_address (stdoutput) / 8;
8474 }
8475
8476 /* MD interface: Symbol and relocation handling. */
8477
8478 /* Return the address within the segment that a PC-relative fixup is
8479 relative to. For AArch64 PC-relative fixups applied to instructions
8480 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8481
8482 long
8483 md_pcrel_from_section (fixS * fixP, segT seg)
8484 {
8485 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8486
8487 /* If this is pc-relative and we are going to emit a relocation
8488 then we just want to put out any pipeline compensation that the linker
8489 will need. Otherwise we want to use the calculated base. */
8490 if (fixP->fx_pcrel
8491 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8492 || aarch64_force_relocation (fixP)))
8493 base = 0;
8494
8495 /* AArch64 should be consistent for all pc-relative relocations. */
8496 return base + AARCH64_PCREL_OFFSET;
8497 }
8498
8499 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8500 Otherwise we have no need to default values of symbols. */
8501
8502 symbolS *
8503 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8504 {
8505 #ifdef OBJ_ELF
8506 if (name[0] == '_' && name[1] == 'G'
8507 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8508 {
8509 if (!GOT_symbol)
8510 {
8511 if (symbol_find (name))
8512 as_bad (_("GOT already in the symbol table"));
8513
8514 GOT_symbol = symbol_new (name, undefined_section,
8515 &zero_address_frag, 0);
8516 }
8517
8518 return GOT_symbol;
8519 }
8520 #endif
8521
8522 return 0;
8523 }
8524
8525 /* Return non-zero if the indicated VALUE has overflowed the maximum
8526 range expressible by a unsigned number with the indicated number of
8527 BITS. */
8528
8529 static bool
8530 unsigned_overflow (valueT value, unsigned bits)
8531 {
8532 valueT lim;
8533 if (bits >= sizeof (valueT) * 8)
8534 return false;
8535 lim = (valueT) 1 << bits;
8536 return (value >= lim);
8537 }
8538
8539
8540 /* Return non-zero if the indicated VALUE has overflowed the maximum
8541 range expressible by an signed number with the indicated number of
8542 BITS. */
8543
8544 static bool
8545 signed_overflow (offsetT value, unsigned bits)
8546 {
8547 offsetT lim;
8548 if (bits >= sizeof (offsetT) * 8)
8549 return false;
8550 lim = (offsetT) 1 << (bits - 1);
8551 return (value < -lim || value >= lim);
8552 }
8553
8554 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8555 unsigned immediate offset load/store instruction, try to encode it as
8556 an unscaled, 9-bit, signed immediate offset load/store instruction.
8557 Return TRUE if it is successful; otherwise return FALSE.
8558
8559 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8560 in response to the standard LDR/STR mnemonics when the immediate offset is
8561 unambiguous, i.e. when it is negative or unaligned. */
8562
8563 static bool
8564 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8565 {
8566 int idx;
8567 enum aarch64_op new_op;
8568 const aarch64_opcode *new_opcode;
8569
8570 gas_assert (instr->opcode->iclass == ldst_pos);
8571
8572 switch (instr->opcode->op)
8573 {
8574 case OP_LDRB_POS:new_op = OP_LDURB; break;
8575 case OP_STRB_POS: new_op = OP_STURB; break;
8576 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8577 case OP_LDRH_POS: new_op = OP_LDURH; break;
8578 case OP_STRH_POS: new_op = OP_STURH; break;
8579 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8580 case OP_LDR_POS: new_op = OP_LDUR; break;
8581 case OP_STR_POS: new_op = OP_STUR; break;
8582 case OP_LDRF_POS: new_op = OP_LDURV; break;
8583 case OP_STRF_POS: new_op = OP_STURV; break;
8584 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8585 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8586 default: new_op = OP_NIL; break;
8587 }
8588
8589 if (new_op == OP_NIL)
8590 return false;
8591
8592 new_opcode = aarch64_get_opcode (new_op);
8593 gas_assert (new_opcode != NULL);
8594
8595 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8596 instr->opcode->op, new_opcode->op);
8597
8598 aarch64_replace_opcode (instr, new_opcode);
8599
8600 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8601 qualifier matching may fail because the out-of-date qualifier will
8602 prevent the operand being updated with a new and correct qualifier. */
8603 idx = aarch64_operand_index (instr->opcode->operands,
8604 AARCH64_OPND_ADDR_SIMM9);
8605 gas_assert (idx == 1);
8606 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8607
8608 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8609
8610 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8611 insn_sequence))
8612 return false;
8613
8614 return true;
8615 }
8616
8617 /* Called by fix_insn to fix a MOV immediate alias instruction.
8618
8619 Operand for a generic move immediate instruction, which is an alias
8620 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8621 a 32-bit/64-bit immediate value into general register. An assembler error
8622 shall result if the immediate cannot be created by a single one of these
8623 instructions. If there is a choice, then to ensure reversability an
8624 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8625
8626 static void
8627 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8628 {
8629 const aarch64_opcode *opcode;
8630
8631 /* Need to check if the destination is SP/ZR. The check has to be done
8632 before any aarch64_replace_opcode. */
8633 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8634 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8635
8636 instr->operands[1].imm.value = value;
8637 instr->operands[1].skip = 0;
8638
8639 if (try_mov_wide_p)
8640 {
8641 /* Try the MOVZ alias. */
8642 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8643 aarch64_replace_opcode (instr, opcode);
8644 if (aarch64_opcode_encode (instr->opcode, instr,
8645 &instr->value, NULL, NULL, insn_sequence))
8646 {
8647 put_aarch64_insn (buf, instr->value);
8648 return;
8649 }
8650 /* Try the MOVK alias. */
8651 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8652 aarch64_replace_opcode (instr, opcode);
8653 if (aarch64_opcode_encode (instr->opcode, instr,
8654 &instr->value, NULL, NULL, insn_sequence))
8655 {
8656 put_aarch64_insn (buf, instr->value);
8657 return;
8658 }
8659 }
8660
8661 if (try_mov_bitmask_p)
8662 {
8663 /* Try the ORR alias. */
8664 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8665 aarch64_replace_opcode (instr, opcode);
8666 if (aarch64_opcode_encode (instr->opcode, instr,
8667 &instr->value, NULL, NULL, insn_sequence))
8668 {
8669 put_aarch64_insn (buf, instr->value);
8670 return;
8671 }
8672 }
8673
8674 as_bad_where (fixP->fx_file, fixP->fx_line,
8675 _("immediate cannot be moved by a single instruction"));
8676 }
8677
8678 /* An instruction operand which is immediate related may have symbol used
8679 in the assembly, e.g.
8680
8681 mov w0, u32
8682 .set u32, 0x00ffff00
8683
8684 At the time when the assembly instruction is parsed, a referenced symbol,
8685 like 'u32' in the above example may not have been seen; a fixS is created
8686 in such a case and is handled here after symbols have been resolved.
8687 Instruction is fixed up with VALUE using the information in *FIXP plus
8688 extra information in FLAGS.
8689
8690 This function is called by md_apply_fix to fix up instructions that need
8691 a fix-up described above but does not involve any linker-time relocation. */
8692
8693 static void
8694 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8695 {
8696 int idx;
8697 uint32_t insn;
8698 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8699 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8700 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8701
8702 if (new_inst)
8703 {
8704 /* Now the instruction is about to be fixed-up, so the operand that
8705 was previously marked as 'ignored' needs to be unmarked in order
8706 to get the encoding done properly. */
8707 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8708 new_inst->operands[idx].skip = 0;
8709 }
8710
8711 gas_assert (opnd != AARCH64_OPND_NIL);
8712
8713 switch (opnd)
8714 {
8715 case AARCH64_OPND_EXCEPTION:
8716 case AARCH64_OPND_UNDEFINED:
8717 if (unsigned_overflow (value, 16))
8718 as_bad_where (fixP->fx_file, fixP->fx_line,
8719 _("immediate out of range"));
8720 insn = get_aarch64_insn (buf);
8721 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8722 put_aarch64_insn (buf, insn);
8723 break;
8724
8725 case AARCH64_OPND_AIMM:
8726 /* ADD or SUB with immediate.
8727 NOTE this assumes we come here with a add/sub shifted reg encoding
8728 3 322|2222|2 2 2 21111 111111
8729 1 098|7654|3 2 1 09876 543210 98765 43210
8730 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8731 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8732 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8733 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8734 ->
8735 3 322|2222|2 2 221111111111
8736 1 098|7654|3 2 109876543210 98765 43210
8737 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8738 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8739 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8740 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8741 Fields sf Rn Rd are already set. */
8742 insn = get_aarch64_insn (buf);
8743 if (value < 0)
8744 {
8745 /* Add <-> sub. */
8746 insn = reencode_addsub_switch_add_sub (insn);
8747 value = -value;
8748 }
8749
8750 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8751 && unsigned_overflow (value, 12))
8752 {
8753 /* Try to shift the value by 12 to make it fit. */
8754 if (((value >> 12) << 12) == value
8755 && ! unsigned_overflow (value, 12 + 12))
8756 {
8757 value >>= 12;
8758 insn |= encode_addsub_imm_shift_amount (1);
8759 }
8760 }
8761
8762 if (unsigned_overflow (value, 12))
8763 as_bad_where (fixP->fx_file, fixP->fx_line,
8764 _("immediate out of range"));
8765
8766 insn |= encode_addsub_imm (value);
8767
8768 put_aarch64_insn (buf, insn);
8769 break;
8770
8771 case AARCH64_OPND_SIMD_IMM:
8772 case AARCH64_OPND_SIMD_IMM_SFT:
8773 case AARCH64_OPND_LIMM:
8774 /* Bit mask immediate. */
8775 gas_assert (new_inst != NULL);
8776 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8777 new_inst->operands[idx].imm.value = value;
8778 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8779 &new_inst->value, NULL, NULL, insn_sequence))
8780 put_aarch64_insn (buf, new_inst->value);
8781 else
8782 as_bad_where (fixP->fx_file, fixP->fx_line,
8783 _("invalid immediate"));
8784 break;
8785
8786 case AARCH64_OPND_HALF:
8787 /* 16-bit unsigned immediate. */
8788 if (unsigned_overflow (value, 16))
8789 as_bad_where (fixP->fx_file, fixP->fx_line,
8790 _("immediate out of range"));
8791 insn = get_aarch64_insn (buf);
8792 insn |= encode_movw_imm (value & 0xffff);
8793 put_aarch64_insn (buf, insn);
8794 break;
8795
8796 case AARCH64_OPND_IMM_MOV:
8797 /* Operand for a generic move immediate instruction, which is
8798 an alias instruction that generates a single MOVZ, MOVN or ORR
8799 instruction to loads a 32-bit/64-bit immediate value into general
8800 register. An assembler error shall result if the immediate cannot be
8801 created by a single one of these instructions. If there is a choice,
8802 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8803 and MOVZ or MOVN to ORR. */
8804 gas_assert (new_inst != NULL);
8805 fix_mov_imm_insn (fixP, buf, new_inst, value);
8806 break;
8807
8808 case AARCH64_OPND_ADDR_SIMM7:
8809 case AARCH64_OPND_ADDR_SIMM9:
8810 case AARCH64_OPND_ADDR_SIMM9_2:
8811 case AARCH64_OPND_ADDR_SIMM10:
8812 case AARCH64_OPND_ADDR_UIMM12:
8813 case AARCH64_OPND_ADDR_SIMM11:
8814 case AARCH64_OPND_ADDR_SIMM13:
8815 /* Immediate offset in an address. */
8816 insn = get_aarch64_insn (buf);
8817
8818 gas_assert (new_inst != NULL && new_inst->value == insn);
8819 gas_assert (new_inst->opcode->operands[1] == opnd
8820 || new_inst->opcode->operands[2] == opnd);
8821
8822 /* Get the index of the address operand. */
8823 if (new_inst->opcode->operands[1] == opnd)
8824 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8825 idx = 1;
8826 else
8827 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8828 idx = 2;
8829
8830 /* Update the resolved offset value. */
8831 new_inst->operands[idx].addr.offset.imm = value;
8832
8833 /* Encode/fix-up. */
8834 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8835 &new_inst->value, NULL, NULL, insn_sequence))
8836 {
8837 put_aarch64_insn (buf, new_inst->value);
8838 break;
8839 }
8840 else if (new_inst->opcode->iclass == ldst_pos
8841 && try_to_encode_as_unscaled_ldst (new_inst))
8842 {
8843 put_aarch64_insn (buf, new_inst->value);
8844 break;
8845 }
8846
8847 as_bad_where (fixP->fx_file, fixP->fx_line,
8848 _("immediate offset out of range"));
8849 break;
8850
8851 default:
8852 gas_assert (0);
8853 as_fatal (_("unhandled operand code %d"), opnd);
8854 }
8855 }
8856
8857 /* Apply a fixup (fixP) to segment data, once it has been determined
8858 by our caller that we have all the info we need to fix it up.
8859
8860 Parameter valP is the pointer to the value of the bits. */
8861
8862 void
8863 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8864 {
8865 offsetT value = *valP;
8866 uint32_t insn;
8867 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8868 int scale;
8869 unsigned flags = fixP->fx_addnumber;
8870
8871 DEBUG_TRACE ("\n\n");
8872 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8873 DEBUG_TRACE ("Enter md_apply_fix");
8874
8875 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8876
8877 /* Note whether this will delete the relocation. */
8878
8879 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8880 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8881 fixP->fx_done = 1;
8882
8883 /* Process the relocations. */
8884 switch (fixP->fx_r_type)
8885 {
8886 case BFD_RELOC_NONE:
8887 /* This will need to go in the object file. */
8888 fixP->fx_done = 0;
8889 break;
8890
8891 case BFD_RELOC_8:
8892 case BFD_RELOC_8_PCREL:
8893 if (fixP->fx_done || !seg->use_rela_p)
8894 md_number_to_chars (buf, value, 1);
8895 break;
8896
8897 case BFD_RELOC_16:
8898 case BFD_RELOC_16_PCREL:
8899 if (fixP->fx_done || !seg->use_rela_p)
8900 md_number_to_chars (buf, value, 2);
8901 break;
8902
8903 case BFD_RELOC_32:
8904 case BFD_RELOC_32_PCREL:
8905 if (fixP->fx_done || !seg->use_rela_p)
8906 md_number_to_chars (buf, value, 4);
8907 break;
8908
8909 case BFD_RELOC_64:
8910 case BFD_RELOC_64_PCREL:
8911 if (fixP->fx_done || !seg->use_rela_p)
8912 md_number_to_chars (buf, value, 8);
8913 break;
8914
8915 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8916 /* We claim that these fixups have been processed here, even if
8917 in fact we generate an error because we do not have a reloc
8918 for them, so tc_gen_reloc() will reject them. */
8919 fixP->fx_done = 1;
8920 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8921 {
8922 as_bad_where (fixP->fx_file, fixP->fx_line,
8923 _("undefined symbol %s used as an immediate value"),
8924 S_GET_NAME (fixP->fx_addsy));
8925 goto apply_fix_return;
8926 }
8927 fix_insn (fixP, flags, value);
8928 break;
8929
8930 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8931 if (fixP->fx_done || !seg->use_rela_p)
8932 {
8933 if (value & 3)
8934 as_bad_where (fixP->fx_file, fixP->fx_line,
8935 _("pc-relative load offset not word aligned"));
8936 if (signed_overflow (value, 21))
8937 as_bad_where (fixP->fx_file, fixP->fx_line,
8938 _("pc-relative load offset out of range"));
8939 insn = get_aarch64_insn (buf);
8940 insn |= encode_ld_lit_ofs_19 (value >> 2);
8941 put_aarch64_insn (buf, insn);
8942 }
8943 break;
8944
8945 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8946 if (fixP->fx_done || !seg->use_rela_p)
8947 {
8948 if (signed_overflow (value, 21))
8949 as_bad_where (fixP->fx_file, fixP->fx_line,
8950 _("pc-relative address offset out of range"));
8951 insn = get_aarch64_insn (buf);
8952 insn |= encode_adr_imm (value);
8953 put_aarch64_insn (buf, insn);
8954 }
8955 break;
8956
8957 case BFD_RELOC_AARCH64_BRANCH19:
8958 if (fixP->fx_done || !seg->use_rela_p)
8959 {
8960 if (value & 3)
8961 as_bad_where (fixP->fx_file, fixP->fx_line,
8962 _("conditional branch target not word aligned"));
8963 if (signed_overflow (value, 21))
8964 as_bad_where (fixP->fx_file, fixP->fx_line,
8965 _("conditional branch out of range"));
8966 insn = get_aarch64_insn (buf);
8967 insn |= encode_cond_branch_ofs_19 (value >> 2);
8968 put_aarch64_insn (buf, insn);
8969 }
8970 break;
8971
8972 case BFD_RELOC_AARCH64_TSTBR14:
8973 if (fixP->fx_done || !seg->use_rela_p)
8974 {
8975 if (value & 3)
8976 as_bad_where (fixP->fx_file, fixP->fx_line,
8977 _("conditional branch target not word aligned"));
8978 if (signed_overflow (value, 16))
8979 as_bad_where (fixP->fx_file, fixP->fx_line,
8980 _("conditional branch out of range"));
8981 insn = get_aarch64_insn (buf);
8982 insn |= encode_tst_branch_ofs_14 (value >> 2);
8983 put_aarch64_insn (buf, insn);
8984 }
8985 break;
8986
8987 case BFD_RELOC_AARCH64_CALL26:
8988 case BFD_RELOC_AARCH64_JUMP26:
8989 if (fixP->fx_done || !seg->use_rela_p)
8990 {
8991 if (value & 3)
8992 as_bad_where (fixP->fx_file, fixP->fx_line,
8993 _("branch target not word aligned"));
8994 if (signed_overflow (value, 28))
8995 as_bad_where (fixP->fx_file, fixP->fx_line,
8996 _("branch out of range"));
8997 insn = get_aarch64_insn (buf);
8998 insn |= encode_branch_ofs_26 (value >> 2);
8999 put_aarch64_insn (buf, insn);
9000 }
9001 break;
9002
9003 case BFD_RELOC_AARCH64_MOVW_G0:
9004 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9005 case BFD_RELOC_AARCH64_MOVW_G0_S:
9006 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9007 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9008 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9009 scale = 0;
9010 goto movw_common;
9011 case BFD_RELOC_AARCH64_MOVW_G1:
9012 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9013 case BFD_RELOC_AARCH64_MOVW_G1_S:
9014 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9015 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9016 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9017 scale = 16;
9018 goto movw_common;
9019 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9020 scale = 0;
9021 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9022 /* Should always be exported to object file, see
9023 aarch64_force_relocation(). */
9024 gas_assert (!fixP->fx_done);
9025 gas_assert (seg->use_rela_p);
9026 goto movw_common;
9027 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9028 scale = 16;
9029 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9030 /* Should always be exported to object file, see
9031 aarch64_force_relocation(). */
9032 gas_assert (!fixP->fx_done);
9033 gas_assert (seg->use_rela_p);
9034 goto movw_common;
9035 case BFD_RELOC_AARCH64_MOVW_G2:
9036 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9037 case BFD_RELOC_AARCH64_MOVW_G2_S:
9038 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9039 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9040 scale = 32;
9041 goto movw_common;
9042 case BFD_RELOC_AARCH64_MOVW_G3:
9043 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9044 scale = 48;
9045 movw_common:
9046 if (fixP->fx_done || !seg->use_rela_p)
9047 {
9048 insn = get_aarch64_insn (buf);
9049
9050 if (!fixP->fx_done)
9051 {
9052 /* REL signed addend must fit in 16 bits */
9053 if (signed_overflow (value, 16))
9054 as_bad_where (fixP->fx_file, fixP->fx_line,
9055 _("offset out of range"));
9056 }
9057 else
9058 {
9059 /* Check for overflow and scale. */
9060 switch (fixP->fx_r_type)
9061 {
9062 case BFD_RELOC_AARCH64_MOVW_G0:
9063 case BFD_RELOC_AARCH64_MOVW_G1:
9064 case BFD_RELOC_AARCH64_MOVW_G2:
9065 case BFD_RELOC_AARCH64_MOVW_G3:
9066 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9067 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9068 if (unsigned_overflow (value, scale + 16))
9069 as_bad_where (fixP->fx_file, fixP->fx_line,
9070 _("unsigned value out of range"));
9071 break;
9072 case BFD_RELOC_AARCH64_MOVW_G0_S:
9073 case BFD_RELOC_AARCH64_MOVW_G1_S:
9074 case BFD_RELOC_AARCH64_MOVW_G2_S:
9075 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9076 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9077 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9078 /* NOTE: We can only come here with movz or movn. */
9079 if (signed_overflow (value, scale + 16))
9080 as_bad_where (fixP->fx_file, fixP->fx_line,
9081 _("signed value out of range"));
9082 if (value < 0)
9083 {
9084 /* Force use of MOVN. */
9085 value = ~value;
9086 insn = reencode_movzn_to_movn (insn);
9087 }
9088 else
9089 {
9090 /* Force use of MOVZ. */
9091 insn = reencode_movzn_to_movz (insn);
9092 }
9093 break;
9094 default:
9095 /* Unchecked relocations. */
9096 break;
9097 }
9098 value >>= scale;
9099 }
9100
9101 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9102 insn |= encode_movw_imm (value & 0xffff);
9103
9104 put_aarch64_insn (buf, insn);
9105 }
9106 break;
9107
9108 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9109 fixP->fx_r_type = (ilp32_p
9110 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9111 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9112 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9113 /* Should always be exported to object file, see
9114 aarch64_force_relocation(). */
9115 gas_assert (!fixP->fx_done);
9116 gas_assert (seg->use_rela_p);
9117 break;
9118
9119 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9120 fixP->fx_r_type = (ilp32_p
9121 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9122 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9123 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9124 /* Should always be exported to object file, see
9125 aarch64_force_relocation(). */
9126 gas_assert (!fixP->fx_done);
9127 gas_assert (seg->use_rela_p);
9128 break;
9129
9130 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9131 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9132 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9133 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9134 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9135 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9136 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9137 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9138 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9139 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9140 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9141 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9142 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9143 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9144 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9145 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9146 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9147 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9148 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9149 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9150 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9151 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9152 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9153 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9154 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9155 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9156 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9157 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9158 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9159 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9160 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9161 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9162 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9163 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9164 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9165 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9166 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9167 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9168 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9169 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9170 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9171 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9172 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9173 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9174 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9175 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9176 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9177 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9178 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9179 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9180 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9181 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9182 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9183 /* Should always be exported to object file, see
9184 aarch64_force_relocation(). */
9185 gas_assert (!fixP->fx_done);
9186 gas_assert (seg->use_rela_p);
9187 break;
9188
9189 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9190 /* Should always be exported to object file, see
9191 aarch64_force_relocation(). */
9192 fixP->fx_r_type = (ilp32_p
9193 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9194 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9195 gas_assert (!fixP->fx_done);
9196 gas_assert (seg->use_rela_p);
9197 break;
9198
9199 case BFD_RELOC_AARCH64_ADD_LO12:
9200 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9201 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9202 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9203 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9204 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9205 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9206 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9207 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9208 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9209 case BFD_RELOC_AARCH64_LDST128_LO12:
9210 case BFD_RELOC_AARCH64_LDST16_LO12:
9211 case BFD_RELOC_AARCH64_LDST32_LO12:
9212 case BFD_RELOC_AARCH64_LDST64_LO12:
9213 case BFD_RELOC_AARCH64_LDST8_LO12:
9214 /* Should always be exported to object file, see
9215 aarch64_force_relocation(). */
9216 gas_assert (!fixP->fx_done);
9217 gas_assert (seg->use_rela_p);
9218 break;
9219
9220 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9221 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9222 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9223 break;
9224
9225 case BFD_RELOC_UNUSED:
9226 /* An error will already have been reported. */
9227 break;
9228
9229 case BFD_RELOC_RVA:
9230 case BFD_RELOC_32_SECREL:
9231 case BFD_RELOC_16_SECIDX:
9232 break;
9233
9234 default:
9235 as_bad_where (fixP->fx_file, fixP->fx_line,
9236 _("unexpected %s fixup"),
9237 bfd_get_reloc_code_name (fixP->fx_r_type));
9238 break;
9239 }
9240
9241 apply_fix_return:
9242 /* Free the allocated the struct aarch64_inst.
9243 N.B. currently there are very limited number of fix-up types actually use
9244 this field, so the impact on the performance should be minimal . */
9245 free (fixP->tc_fix_data.inst);
9246
9247 return;
9248 }
9249
9250 /* Translate internal representation of relocation info to BFD target
9251 format. */
9252
9253 arelent *
9254 tc_gen_reloc (asection * section, fixS * fixp)
9255 {
9256 arelent *reloc;
9257 bfd_reloc_code_real_type code;
9258
9259 reloc = XNEW (arelent);
9260
9261 reloc->sym_ptr_ptr = XNEW (asymbol *);
9262 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9263 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9264
9265 if (fixp->fx_pcrel)
9266 {
9267 if (section->use_rela_p)
9268 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9269 else
9270 fixp->fx_offset = reloc->address;
9271 }
9272 reloc->addend = fixp->fx_offset;
9273
9274 code = fixp->fx_r_type;
9275 switch (code)
9276 {
9277 case BFD_RELOC_16:
9278 if (fixp->fx_pcrel)
9279 code = BFD_RELOC_16_PCREL;
9280 break;
9281
9282 case BFD_RELOC_32:
9283 if (fixp->fx_pcrel)
9284 code = BFD_RELOC_32_PCREL;
9285 break;
9286
9287 case BFD_RELOC_64:
9288 if (fixp->fx_pcrel)
9289 code = BFD_RELOC_64_PCREL;
9290 break;
9291
9292 default:
9293 break;
9294 }
9295
9296 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9297 if (reloc->howto == NULL)
9298 {
9299 as_bad_where (fixp->fx_file, fixp->fx_line,
9300 _
9301 ("cannot represent %s relocation in this object file format"),
9302 bfd_get_reloc_code_name (code));
9303 return NULL;
9304 }
9305
9306 return reloc;
9307 }
9308
9309 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9310
9311 void
9312 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9313 {
9314 bfd_reloc_code_real_type type;
9315 int pcrel = 0;
9316
9317 #ifdef TE_PE
9318 if (exp->X_op == O_secrel)
9319 {
9320 exp->X_op = O_symbol;
9321 type = BFD_RELOC_32_SECREL;
9322 }
9323 else if (exp->X_op == O_secidx)
9324 {
9325 exp->X_op = O_symbol;
9326 type = BFD_RELOC_16_SECIDX;
9327 }
9328 else
9329 {
9330 #endif
9331 /* Pick a reloc.
9332 FIXME: @@ Should look at CPU word size. */
9333 switch (size)
9334 {
9335 case 1:
9336 type = BFD_RELOC_8;
9337 break;
9338 case 2:
9339 type = BFD_RELOC_16;
9340 break;
9341 case 4:
9342 type = BFD_RELOC_32;
9343 break;
9344 case 8:
9345 type = BFD_RELOC_64;
9346 break;
9347 default:
9348 as_bad (_("cannot do %u-byte relocation"), size);
9349 type = BFD_RELOC_UNUSED;
9350 break;
9351 }
9352 #ifdef TE_PE
9353 }
9354 #endif
9355
9356 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9357 }
9358
9359 /* Implement md_after_parse_args. This is the earliest time we need to decide
9360 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9361
9362 void
9363 aarch64_after_parse_args (void)
9364 {
9365 if (aarch64_abi != AARCH64_ABI_NONE)
9366 return;
9367
9368 #ifdef OBJ_ELF
9369 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9370 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9371 aarch64_abi = AARCH64_ABI_ILP32;
9372 else
9373 aarch64_abi = AARCH64_ABI_LP64;
9374 #else
9375 aarch64_abi = AARCH64_ABI_LLP64;
9376 #endif
9377 }
9378
9379 #ifdef OBJ_ELF
9380 const char *
9381 elf64_aarch64_target_format (void)
9382 {
9383 #ifdef TE_CLOUDABI
9384 /* FIXME: What to do for ilp32_p ? */
9385 if (target_big_endian)
9386 return "elf64-bigaarch64-cloudabi";
9387 else
9388 return "elf64-littleaarch64-cloudabi";
9389 #else
9390 if (target_big_endian)
9391 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9392 else
9393 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9394 #endif
9395 }
9396
9397 void
9398 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9399 {
9400 elf_frob_symbol (symp, puntp);
9401 }
9402 #elif defined OBJ_COFF
9403 const char *
9404 coff_aarch64_target_format (void)
9405 {
9406 return "pe-aarch64-little";
9407 }
9408 #endif
9409
9410 /* MD interface: Finalization. */
9411
9412 /* A good place to do this, although this was probably not intended
9413 for this kind of use. We need to dump the literal pool before
9414 references are made to a null symbol pointer. */
9415
9416 void
9417 aarch64_cleanup (void)
9418 {
9419 literal_pool *pool;
9420
9421 for (pool = list_of_pools; pool; pool = pool->next)
9422 {
9423 /* Put it at the end of the relevant section. */
9424 subseg_set (pool->section, pool->sub_section);
9425 s_ltorg (0);
9426 }
9427 }
9428
9429 #ifdef OBJ_ELF
9430 /* Remove any excess mapping symbols generated for alignment frags in
9431 SEC. We may have created a mapping symbol before a zero byte
9432 alignment; remove it if there's a mapping symbol after the
9433 alignment. */
9434 static void
9435 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9436 void *dummy ATTRIBUTE_UNUSED)
9437 {
9438 segment_info_type *seginfo = seg_info (sec);
9439 fragS *fragp;
9440
9441 if (seginfo == NULL || seginfo->frchainP == NULL)
9442 return;
9443
9444 for (fragp = seginfo->frchainP->frch_root;
9445 fragp != NULL; fragp = fragp->fr_next)
9446 {
9447 symbolS *sym = fragp->tc_frag_data.last_map;
9448 fragS *next = fragp->fr_next;
9449
9450 /* Variable-sized frags have been converted to fixed size by
9451 this point. But if this was variable-sized to start with,
9452 there will be a fixed-size frag after it. So don't handle
9453 next == NULL. */
9454 if (sym == NULL || next == NULL)
9455 continue;
9456
9457 if (S_GET_VALUE (sym) < next->fr_address)
9458 /* Not at the end of this frag. */
9459 continue;
9460 know (S_GET_VALUE (sym) == next->fr_address);
9461
9462 do
9463 {
9464 if (next->tc_frag_data.first_map != NULL)
9465 {
9466 /* Next frag starts with a mapping symbol. Discard this
9467 one. */
9468 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9469 break;
9470 }
9471
9472 if (next->fr_next == NULL)
9473 {
9474 /* This mapping symbol is at the end of the section. Discard
9475 it. */
9476 know (next->fr_fix == 0 && next->fr_var == 0);
9477 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9478 break;
9479 }
9480
9481 /* As long as we have empty frags without any mapping symbols,
9482 keep looking. */
9483 /* If the next frag is non-empty and does not start with a
9484 mapping symbol, then this mapping symbol is required. */
9485 if (next->fr_address != next->fr_next->fr_address)
9486 break;
9487
9488 next = next->fr_next;
9489 }
9490 while (next != NULL);
9491 }
9492 }
9493 #endif
9494
9495 /* Adjust the symbol table. */
9496
9497 void
9498 aarch64_adjust_symtab (void)
9499 {
9500 #ifdef OBJ_ELF
9501 /* Remove any overlapping mapping symbols generated by alignment frags. */
9502 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9503 /* Now do generic ELF adjustments. */
9504 elf_adjust_symtab ();
9505 #endif
9506 }
9507
9508 static void
9509 checked_hash_insert (htab_t table, const char *key, void *value)
9510 {
9511 str_hash_insert (table, key, value, 0);
9512 }
9513
9514 static void
9515 sysreg_hash_insert (htab_t table, const char *key, void *value)
9516 {
9517 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9518 checked_hash_insert (table, key, value);
9519 }
9520
9521 static void
9522 fill_instruction_hash_table (void)
9523 {
9524 const aarch64_opcode *opcode = aarch64_opcode_table;
9525
9526 while (opcode->name != NULL)
9527 {
9528 templates *templ, *new_templ;
9529 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9530
9531 new_templ = XNEW (templates);
9532 new_templ->opcode = opcode;
9533 new_templ->next = NULL;
9534
9535 if (!templ)
9536 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9537 else
9538 {
9539 new_templ->next = templ->next;
9540 templ->next = new_templ;
9541 }
9542 ++opcode;
9543 }
9544 }
9545
9546 static inline void
9547 convert_to_upper (char *dst, const char *src, size_t num)
9548 {
9549 unsigned int i;
9550 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9551 *dst = TOUPPER (*src);
9552 *dst = '\0';
9553 }
9554
9555 /* Assume STR point to a lower-case string, allocate, convert and return
9556 the corresponding upper-case string. */
9557 static inline const char*
9558 get_upper_str (const char *str)
9559 {
9560 char *ret;
9561 size_t len = strlen (str);
9562 ret = XNEWVEC (char, len + 1);
9563 convert_to_upper (ret, str, len);
9564 return ret;
9565 }
9566
9567 /* MD interface: Initialization. */
9568
9569 void
9570 md_begin (void)
9571 {
9572 unsigned mach;
9573 unsigned int i;
9574
9575 aarch64_ops_hsh = str_htab_create ();
9576 aarch64_cond_hsh = str_htab_create ();
9577 aarch64_shift_hsh = str_htab_create ();
9578 aarch64_sys_regs_hsh = str_htab_create ();
9579 aarch64_pstatefield_hsh = str_htab_create ();
9580 aarch64_sys_regs_ic_hsh = str_htab_create ();
9581 aarch64_sys_regs_dc_hsh = str_htab_create ();
9582 aarch64_sys_regs_at_hsh = str_htab_create ();
9583 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9584 aarch64_sys_regs_sr_hsh = str_htab_create ();
9585 aarch64_reg_hsh = str_htab_create ();
9586 aarch64_barrier_opt_hsh = str_htab_create ();
9587 aarch64_nzcv_hsh = str_htab_create ();
9588 aarch64_pldop_hsh = str_htab_create ();
9589 aarch64_hint_opt_hsh = str_htab_create ();
9590
9591 fill_instruction_hash_table ();
9592
9593 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9594 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9595 (void *) (aarch64_sys_regs + i));
9596
9597 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9598 sysreg_hash_insert (aarch64_pstatefield_hsh,
9599 aarch64_pstatefields[i].name,
9600 (void *) (aarch64_pstatefields + i));
9601
9602 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9603 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9604 aarch64_sys_regs_ic[i].name,
9605 (void *) (aarch64_sys_regs_ic + i));
9606
9607 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9608 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9609 aarch64_sys_regs_dc[i].name,
9610 (void *) (aarch64_sys_regs_dc + i));
9611
9612 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9613 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9614 aarch64_sys_regs_at[i].name,
9615 (void *) (aarch64_sys_regs_at + i));
9616
9617 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9618 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9619 aarch64_sys_regs_tlbi[i].name,
9620 (void *) (aarch64_sys_regs_tlbi + i));
9621
9622 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9623 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9624 aarch64_sys_regs_sr[i].name,
9625 (void *) (aarch64_sys_regs_sr + i));
9626
9627 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9628 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9629 (void *) (reg_names + i));
9630
9631 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9632 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9633 (void *) (nzcv_names + i));
9634
9635 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9636 {
9637 const char *name = aarch64_operand_modifiers[i].name;
9638 checked_hash_insert (aarch64_shift_hsh, name,
9639 (void *) (aarch64_operand_modifiers + i));
9640 /* Also hash the name in the upper case. */
9641 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9642 (void *) (aarch64_operand_modifiers + i));
9643 }
9644
9645 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9646 {
9647 unsigned int j;
9648 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9649 the same condition code. */
9650 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9651 {
9652 const char *name = aarch64_conds[i].names[j];
9653 if (name == NULL)
9654 break;
9655 checked_hash_insert (aarch64_cond_hsh, name,
9656 (void *) (aarch64_conds + i));
9657 /* Also hash the name in the upper case. */
9658 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9659 (void *) (aarch64_conds + i));
9660 }
9661 }
9662
9663 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9664 {
9665 const char *name = aarch64_barrier_options[i].name;
9666 /* Skip xx00 - the unallocated values of option. */
9667 if ((i & 0x3) == 0)
9668 continue;
9669 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9670 (void *) (aarch64_barrier_options + i));
9671 /* Also hash the name in the upper case. */
9672 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9673 (void *) (aarch64_barrier_options + i));
9674 }
9675
9676 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9677 {
9678 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9679 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9680 (void *) (aarch64_barrier_dsb_nxs_options + i));
9681 /* Also hash the name in the upper case. */
9682 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9683 (void *) (aarch64_barrier_dsb_nxs_options + i));
9684 }
9685
9686 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9687 {
9688 const char* name = aarch64_prfops[i].name;
9689 /* Skip the unallocated hint encodings. */
9690 if (name == NULL)
9691 continue;
9692 checked_hash_insert (aarch64_pldop_hsh, name,
9693 (void *) (aarch64_prfops + i));
9694 /* Also hash the name in the upper case. */
9695 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9696 (void *) (aarch64_prfops + i));
9697 }
9698
9699 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9700 {
9701 const char* name = aarch64_hint_options[i].name;
9702 const char* upper_name = get_upper_str(name);
9703
9704 checked_hash_insert (aarch64_hint_opt_hsh, name,
9705 (void *) (aarch64_hint_options + i));
9706
9707 /* Also hash the name in the upper case if not the same. */
9708 if (strcmp (name, upper_name) != 0)
9709 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9710 (void *) (aarch64_hint_options + i));
9711 }
9712
9713 /* Set the cpu variant based on the command-line options. */
9714 if (!mcpu_cpu_opt)
9715 mcpu_cpu_opt = march_cpu_opt;
9716
9717 if (!mcpu_cpu_opt)
9718 mcpu_cpu_opt = &cpu_default;
9719
9720 cpu_variant = *mcpu_cpu_opt;
9721
9722 /* Record the CPU type. */
9723 if(ilp32_p)
9724 mach = bfd_mach_aarch64_ilp32;
9725 else if (llp64_p)
9726 mach = bfd_mach_aarch64_llp64;
9727 else
9728 mach = bfd_mach_aarch64;
9729
9730 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9731 #ifdef OBJ_ELF
9732 /* FIXME - is there a better way to do it ? */
9733 aarch64_sframe_cfa_sp_reg = 31;
9734 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9735 aarch64_sframe_cfa_ra_reg = 30;
9736 #endif
9737 }
9738
9739 /* Command line processing. */
9740
9741 const char *md_shortopts = "m:";
9742
9743 #ifdef AARCH64_BI_ENDIAN
9744 #define OPTION_EB (OPTION_MD_BASE + 0)
9745 #define OPTION_EL (OPTION_MD_BASE + 1)
9746 #else
9747 #if TARGET_BYTES_BIG_ENDIAN
9748 #define OPTION_EB (OPTION_MD_BASE + 0)
9749 #else
9750 #define OPTION_EL (OPTION_MD_BASE + 1)
9751 #endif
9752 #endif
9753
9754 struct option md_longopts[] = {
9755 #ifdef OPTION_EB
9756 {"EB", no_argument, NULL, OPTION_EB},
9757 #endif
9758 #ifdef OPTION_EL
9759 {"EL", no_argument, NULL, OPTION_EL},
9760 #endif
9761 {NULL, no_argument, NULL, 0}
9762 };
9763
9764 size_t md_longopts_size = sizeof (md_longopts);
9765
9766 struct aarch64_option_table
9767 {
9768 const char *option; /* Option name to match. */
9769 const char *help; /* Help information. */
9770 int *var; /* Variable to change. */
9771 int value; /* What to change it to. */
9772 char *deprecated; /* If non-null, print this message. */
9773 };
9774
9775 static struct aarch64_option_table aarch64_opts[] = {
9776 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9777 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9778 NULL},
9779 #ifdef DEBUG_AARCH64
9780 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9781 #endif /* DEBUG_AARCH64 */
9782 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9783 NULL},
9784 {"mno-verbose-error", N_("do not output verbose error messages"),
9785 &verbose_error_p, 0, NULL},
9786 {NULL, NULL, NULL, 0, NULL}
9787 };
9788
9789 struct aarch64_cpu_option_table
9790 {
9791 const char *name;
9792 const aarch64_feature_set value;
9793 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9794 case. */
9795 const char *canonical_name;
9796 };
9797
9798 /* This list should, at a minimum, contain all the cpu names
9799 recognized by GCC. */
9800 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9801 {"all", AARCH64_ANY, NULL},
9802 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9803 AARCH64_FEATURE_CRC), "Cortex-A34"},
9804 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9805 AARCH64_FEATURE_CRC), "Cortex-A35"},
9806 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9807 AARCH64_FEATURE_CRC), "Cortex-A53"},
9808 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9809 AARCH64_FEATURE_CRC), "Cortex-A57"},
9810 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9811 AARCH64_FEATURE_CRC), "Cortex-A72"},
9812 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9813 AARCH64_FEATURE_CRC), "Cortex-A73"},
9814 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9815 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9816 "Cortex-A55"},
9817 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9818 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9819 "Cortex-A75"},
9820 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9821 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9822 "Cortex-A76"},
9823 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9824 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9825 | AARCH64_FEATURE_DOTPROD
9826 | AARCH64_FEATURE_SSBS),
9827 "Cortex-A76AE"},
9828 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9829 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9830 | AARCH64_FEATURE_DOTPROD
9831 | AARCH64_FEATURE_SSBS),
9832 "Cortex-A77"},
9833 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9834 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9835 | AARCH64_FEATURE_DOTPROD
9836 | AARCH64_FEATURE_SSBS),
9837 "Cortex-A65"},
9838 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9839 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9840 | AARCH64_FEATURE_DOTPROD
9841 | AARCH64_FEATURE_SSBS),
9842 "Cortex-A65AE"},
9843 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9844 AARCH64_FEATURE_F16
9845 | AARCH64_FEATURE_RCPC
9846 | AARCH64_FEATURE_DOTPROD
9847 | AARCH64_FEATURE_SSBS
9848 | AARCH64_FEATURE_PROFILE),
9849 "Cortex-A78"},
9850 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9851 AARCH64_FEATURE_F16
9852 | AARCH64_FEATURE_RCPC
9853 | AARCH64_FEATURE_DOTPROD
9854 | AARCH64_FEATURE_SSBS
9855 | AARCH64_FEATURE_PROFILE),
9856 "Cortex-A78AE"},
9857 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9858 AARCH64_FEATURE_DOTPROD
9859 | AARCH64_FEATURE_F16
9860 | AARCH64_FEATURE_FLAGM
9861 | AARCH64_FEATURE_PAC
9862 | AARCH64_FEATURE_PROFILE
9863 | AARCH64_FEATURE_RCPC
9864 | AARCH64_FEATURE_SSBS),
9865 "Cortex-A78C"},
9866 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9867 AARCH64_FEATURE_BFLOAT16
9868 | AARCH64_FEATURE_I8MM
9869 | AARCH64_FEATURE_MEMTAG
9870 | AARCH64_FEATURE_SVE2_BITPERM),
9871 "Cortex-A510"},
9872 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9873 AARCH64_FEATURE_BFLOAT16
9874 | AARCH64_FEATURE_I8MM
9875 | AARCH64_FEATURE_MEMTAG
9876 | AARCH64_FEATURE_SVE2_BITPERM),
9877 "Cortex-A710"},
9878 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9879 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9880 | AARCH64_FEATURE_DOTPROD
9881 | AARCH64_FEATURE_PROFILE),
9882 "Ares"},
9883 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9884 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9885 "Samsung Exynos M1"},
9886 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9887 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9888 | AARCH64_FEATURE_RDMA),
9889 "Qualcomm Falkor"},
9890 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9891 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9892 | AARCH64_FEATURE_DOTPROD
9893 | AARCH64_FEATURE_SSBS),
9894 "Neoverse E1"},
9895 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9896 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9897 | AARCH64_FEATURE_DOTPROD
9898 | AARCH64_FEATURE_PROFILE),
9899 "Neoverse N1"},
9900 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9901 AARCH64_FEATURE_BFLOAT16
9902 | AARCH64_FEATURE_I8MM
9903 | AARCH64_FEATURE_F16
9904 | AARCH64_FEATURE_SVE
9905 | AARCH64_FEATURE_SVE2
9906 | AARCH64_FEATURE_SVE2_BITPERM
9907 | AARCH64_FEATURE_MEMTAG
9908 | AARCH64_FEATURE_RNG),
9909 "Neoverse N2"},
9910 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9911 AARCH64_FEATURE_PROFILE
9912 | AARCH64_FEATURE_CVADP
9913 | AARCH64_FEATURE_SVE
9914 | AARCH64_FEATURE_SSBS
9915 | AARCH64_FEATURE_RNG
9916 | AARCH64_FEATURE_F16
9917 | AARCH64_FEATURE_BFLOAT16
9918 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9919 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9920 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9921 | AARCH64_FEATURE_RDMA),
9922 "Qualcomm QDF24XX"},
9923 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9924 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9925 "Qualcomm Saphira"},
9926 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9927 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9928 "Cavium ThunderX"},
9929 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9930 AARCH64_FEATURE_CRYPTO),
9931 "Broadcom Vulcan"},
9932 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9933 in earlier releases and is superseded by 'xgene1' in all
9934 tools. */
9935 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9936 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9937 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9938 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9939 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9940 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9941 AARCH64_FEATURE_F16
9942 | AARCH64_FEATURE_RCPC
9943 | AARCH64_FEATURE_DOTPROD
9944 | AARCH64_FEATURE_SSBS
9945 | AARCH64_FEATURE_PROFILE),
9946 "Cortex-X1"},
9947 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9948 AARCH64_FEATURE_BFLOAT16
9949 | AARCH64_FEATURE_I8MM
9950 | AARCH64_FEATURE_MEMTAG
9951 | AARCH64_FEATURE_SVE2_BITPERM),
9952 "Cortex-X2"},
9953 {"generic", AARCH64_ARCH_V8, NULL},
9954
9955 {NULL, AARCH64_ARCH_NONE, NULL}
9956 };
9957
9958 struct aarch64_arch_option_table
9959 {
9960 const char *name;
9961 const aarch64_feature_set value;
9962 };
9963
9964 /* This list should, at a minimum, contain all the architecture names
9965 recognized by GCC. */
9966 static const struct aarch64_arch_option_table aarch64_archs[] = {
9967 {"all", AARCH64_ANY},
9968 {"armv8-a", AARCH64_ARCH_V8},
9969 {"armv8.1-a", AARCH64_ARCH_V8_1},
9970 {"armv8.2-a", AARCH64_ARCH_V8_2},
9971 {"armv8.3-a", AARCH64_ARCH_V8_3},
9972 {"armv8.4-a", AARCH64_ARCH_V8_4},
9973 {"armv8.5-a", AARCH64_ARCH_V8_5},
9974 {"armv8.6-a", AARCH64_ARCH_V8_6},
9975 {"armv8.7-a", AARCH64_ARCH_V8_7},
9976 {"armv8.8-a", AARCH64_ARCH_V8_8},
9977 {"armv8-r", AARCH64_ARCH_V8_R},
9978 {"armv9-a", AARCH64_ARCH_V9},
9979 {"armv9.1-a", AARCH64_ARCH_V9_1},
9980 {"armv9.2-a", AARCH64_ARCH_V9_2},
9981 {"armv9.3-a", AARCH64_ARCH_V9_3},
9982 {NULL, AARCH64_ARCH_NONE}
9983 };
9984
9985 /* ISA extensions. */
9986 struct aarch64_option_cpu_value_table
9987 {
9988 const char *name;
9989 const aarch64_feature_set value;
9990 const aarch64_feature_set require; /* Feature dependencies. */
9991 };
9992
9993 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9994 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9995 AARCH64_ARCH_NONE},
9996 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9997 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9998 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9999 AARCH64_ARCH_NONE},
10000 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10001 AARCH64_ARCH_NONE},
10002 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10003 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10004 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10005 AARCH64_ARCH_NONE},
10006 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10007 AARCH64_ARCH_NONE},
10008 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10009 AARCH64_ARCH_NONE},
10010 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10011 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10012 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10013 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10014 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10015 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10016 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10017 AARCH64_ARCH_NONE},
10018 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10019 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10020 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10021 AARCH64_ARCH_NONE},
10022 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10023 AARCH64_FEATURE (AARCH64_FEATURE_F16
10024 | AARCH64_FEATURE_SIMD, 0)},
10025 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10026 AARCH64_ARCH_NONE},
10027 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10028 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10029 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10030 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10031 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10032 AARCH64_ARCH_NONE},
10033 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10034 AARCH64_ARCH_NONE},
10035 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10036 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10037 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10038 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10039 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10040 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10041 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10042 AARCH64_ARCH_NONE},
10043 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10044 AARCH64_ARCH_NONE},
10045 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10046 AARCH64_ARCH_NONE},
10047 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10048 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10049 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10050 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10051 | AARCH64_FEATURE_SM4, 0)},
10052 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10053 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10054 | AARCH64_FEATURE_AES, 0)},
10055 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10056 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10057 | AARCH64_FEATURE_SHA3, 0)},
10058 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10059 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10060 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10061 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10062 | AARCH64_FEATURE_BFLOAT16, 0)},
10063 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10064 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10065 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10066 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10067 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10068 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10069 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10070 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10071 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10072 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10073 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10074 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10075 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10076 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10077 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10078 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10079 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10080 AARCH64_ARCH_NONE},
10081 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10082 AARCH64_ARCH_NONE},
10083 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10084 AARCH64_ARCH_NONE},
10085 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10086 AARCH64_ARCH_NONE},
10087 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10088 AARCH64_ARCH_NONE},
10089 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10090 AARCH64_ARCH_NONE},
10091 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10092 };
10093
10094 struct aarch64_long_option_table
10095 {
10096 const char *option; /* Substring to match. */
10097 const char *help; /* Help information. */
10098 int (*func) (const char *subopt); /* Function to decode sub-option. */
10099 char *deprecated; /* If non-null, print this message. */
10100 };
10101
10102 /* Transitive closure of features depending on set. */
10103 static aarch64_feature_set
10104 aarch64_feature_disable_set (aarch64_feature_set set)
10105 {
10106 const struct aarch64_option_cpu_value_table *opt;
10107 aarch64_feature_set prev = 0;
10108
10109 while (prev != set) {
10110 prev = set;
10111 for (opt = aarch64_features; opt->name != NULL; opt++)
10112 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10113 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10114 }
10115 return set;
10116 }
10117
10118 /* Transitive closure of dependencies of set. */
10119 static aarch64_feature_set
10120 aarch64_feature_enable_set (aarch64_feature_set set)
10121 {
10122 const struct aarch64_option_cpu_value_table *opt;
10123 aarch64_feature_set prev = 0;
10124
10125 while (prev != set) {
10126 prev = set;
10127 for (opt = aarch64_features; opt->name != NULL; opt++)
10128 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10129 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10130 }
10131 return set;
10132 }
10133
10134 static int
10135 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10136 bool ext_only)
10137 {
10138 /* We insist on extensions being added before being removed. We achieve
10139 this by using the ADDING_VALUE variable to indicate whether we are
10140 adding an extension (1) or removing it (0) and only allowing it to
10141 change in the order -1 -> 1 -> 0. */
10142 int adding_value = -1;
10143 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10144
10145 /* Copy the feature set, so that we can modify it. */
10146 *ext_set = **opt_p;
10147 *opt_p = ext_set;
10148
10149 while (str != NULL && *str != 0)
10150 {
10151 const struct aarch64_option_cpu_value_table *opt;
10152 const char *ext = NULL;
10153 int optlen;
10154
10155 if (!ext_only)
10156 {
10157 if (*str != '+')
10158 {
10159 as_bad (_("invalid architectural extension"));
10160 return 0;
10161 }
10162
10163 ext = strchr (++str, '+');
10164 }
10165
10166 if (ext != NULL)
10167 optlen = ext - str;
10168 else
10169 optlen = strlen (str);
10170
10171 if (optlen >= 2 && startswith (str, "no"))
10172 {
10173 if (adding_value != 0)
10174 adding_value = 0;
10175 optlen -= 2;
10176 str += 2;
10177 }
10178 else if (optlen > 0)
10179 {
10180 if (adding_value == -1)
10181 adding_value = 1;
10182 else if (adding_value != 1)
10183 {
10184 as_bad (_("must specify extensions to add before specifying "
10185 "those to remove"));
10186 return false;
10187 }
10188 }
10189
10190 if (optlen == 0)
10191 {
10192 as_bad (_("missing architectural extension"));
10193 return 0;
10194 }
10195
10196 gas_assert (adding_value != -1);
10197
10198 for (opt = aarch64_features; opt->name != NULL; opt++)
10199 if (strncmp (opt->name, str, optlen) == 0)
10200 {
10201 aarch64_feature_set set;
10202
10203 /* Add or remove the extension. */
10204 if (adding_value)
10205 {
10206 set = aarch64_feature_enable_set (opt->value);
10207 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10208 }
10209 else
10210 {
10211 set = aarch64_feature_disable_set (opt->value);
10212 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10213 }
10214 break;
10215 }
10216
10217 if (opt->name == NULL)
10218 {
10219 as_bad (_("unknown architectural extension `%s'"), str);
10220 return 0;
10221 }
10222
10223 str = ext;
10224 };
10225
10226 return 1;
10227 }
10228
10229 static int
10230 aarch64_parse_cpu (const char *str)
10231 {
10232 const struct aarch64_cpu_option_table *opt;
10233 const char *ext = strchr (str, '+');
10234 size_t optlen;
10235
10236 if (ext != NULL)
10237 optlen = ext - str;
10238 else
10239 optlen = strlen (str);
10240
10241 if (optlen == 0)
10242 {
10243 as_bad (_("missing cpu name `%s'"), str);
10244 return 0;
10245 }
10246
10247 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10248 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10249 {
10250 mcpu_cpu_opt = &opt->value;
10251 if (ext != NULL)
10252 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10253
10254 return 1;
10255 }
10256
10257 as_bad (_("unknown cpu `%s'"), str);
10258 return 0;
10259 }
10260
10261 static int
10262 aarch64_parse_arch (const char *str)
10263 {
10264 const struct aarch64_arch_option_table *opt;
10265 const char *ext = strchr (str, '+');
10266 size_t optlen;
10267
10268 if (ext != NULL)
10269 optlen = ext - str;
10270 else
10271 optlen = strlen (str);
10272
10273 if (optlen == 0)
10274 {
10275 as_bad (_("missing architecture name `%s'"), str);
10276 return 0;
10277 }
10278
10279 for (opt = aarch64_archs; opt->name != NULL; opt++)
10280 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10281 {
10282 march_cpu_opt = &opt->value;
10283 if (ext != NULL)
10284 return aarch64_parse_features (ext, &march_cpu_opt, false);
10285
10286 return 1;
10287 }
10288
10289 as_bad (_("unknown architecture `%s'\n"), str);
10290 return 0;
10291 }
10292
10293 /* ABIs. */
10294 struct aarch64_option_abi_value_table
10295 {
10296 const char *name;
10297 enum aarch64_abi_type value;
10298 };
10299
10300 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10301 #ifdef OBJ_ELF
10302 {"ilp32", AARCH64_ABI_ILP32},
10303 {"lp64", AARCH64_ABI_LP64},
10304 #else
10305 {"llp64", AARCH64_ABI_LLP64},
10306 #endif
10307 };
10308
10309 static int
10310 aarch64_parse_abi (const char *str)
10311 {
10312 unsigned int i;
10313
10314 if (str[0] == '\0')
10315 {
10316 as_bad (_("missing abi name `%s'"), str);
10317 return 0;
10318 }
10319
10320 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10321 if (strcmp (str, aarch64_abis[i].name) == 0)
10322 {
10323 aarch64_abi = aarch64_abis[i].value;
10324 return 1;
10325 }
10326
10327 as_bad (_("unknown abi `%s'\n"), str);
10328 return 0;
10329 }
10330
10331 static struct aarch64_long_option_table aarch64_long_opts[] = {
10332 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10333 aarch64_parse_abi, NULL},
10334 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10335 aarch64_parse_cpu, NULL},
10336 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10337 aarch64_parse_arch, NULL},
10338 {NULL, NULL, 0, NULL}
10339 };
10340
10341 int
10342 md_parse_option (int c, const char *arg)
10343 {
10344 struct aarch64_option_table *opt;
10345 struct aarch64_long_option_table *lopt;
10346
10347 switch (c)
10348 {
10349 #ifdef OPTION_EB
10350 case OPTION_EB:
10351 target_big_endian = 1;
10352 break;
10353 #endif
10354
10355 #ifdef OPTION_EL
10356 case OPTION_EL:
10357 target_big_endian = 0;
10358 break;
10359 #endif
10360
10361 case 'a':
10362 /* Listing option. Just ignore these, we don't support additional
10363 ones. */
10364 return 0;
10365
10366 default:
10367 for (opt = aarch64_opts; opt->option != NULL; opt++)
10368 {
10369 if (c == opt->option[0]
10370 && ((arg == NULL && opt->option[1] == 0)
10371 || streq (arg, opt->option + 1)))
10372 {
10373 /* If the option is deprecated, tell the user. */
10374 if (opt->deprecated != NULL)
10375 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10376 arg ? arg : "", _(opt->deprecated));
10377
10378 if (opt->var != NULL)
10379 *opt->var = opt->value;
10380
10381 return 1;
10382 }
10383 }
10384
10385 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10386 {
10387 /* These options are expected to have an argument. */
10388 if (c == lopt->option[0]
10389 && arg != NULL
10390 && startswith (arg, lopt->option + 1))
10391 {
10392 /* If the option is deprecated, tell the user. */
10393 if (lopt->deprecated != NULL)
10394 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10395 _(lopt->deprecated));
10396
10397 /* Call the sup-option parser. */
10398 return lopt->func (arg + strlen (lopt->option) - 1);
10399 }
10400 }
10401
10402 return 0;
10403 }
10404
10405 return 1;
10406 }
10407
10408 void
10409 md_show_usage (FILE * fp)
10410 {
10411 struct aarch64_option_table *opt;
10412 struct aarch64_long_option_table *lopt;
10413
10414 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10415
10416 for (opt = aarch64_opts; opt->option != NULL; opt++)
10417 if (opt->help != NULL)
10418 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10419
10420 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10421 if (lopt->help != NULL)
10422 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10423
10424 #ifdef OPTION_EB
10425 fprintf (fp, _("\
10426 -EB assemble code for a big-endian cpu\n"));
10427 #endif
10428
10429 #ifdef OPTION_EL
10430 fprintf (fp, _("\
10431 -EL assemble code for a little-endian cpu\n"));
10432 #endif
10433 }
10434
10435 /* Parse a .cpu directive. */
10436
10437 static void
10438 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10439 {
10440 const struct aarch64_cpu_option_table *opt;
10441 char saved_char;
10442 char *name;
10443 char *ext;
10444 size_t optlen;
10445
10446 name = input_line_pointer;
10447 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10448 saved_char = *input_line_pointer;
10449 *input_line_pointer = 0;
10450
10451 ext = strchr (name, '+');
10452
10453 if (ext != NULL)
10454 optlen = ext - name;
10455 else
10456 optlen = strlen (name);
10457
10458 /* Skip the first "all" entry. */
10459 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10460 if (strlen (opt->name) == optlen
10461 && strncmp (name, opt->name, optlen) == 0)
10462 {
10463 mcpu_cpu_opt = &opt->value;
10464 if (ext != NULL)
10465 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10466 return;
10467
10468 cpu_variant = *mcpu_cpu_opt;
10469
10470 *input_line_pointer = saved_char;
10471 demand_empty_rest_of_line ();
10472 return;
10473 }
10474 as_bad (_("unknown cpu `%s'"), name);
10475 *input_line_pointer = saved_char;
10476 ignore_rest_of_line ();
10477 }
10478
10479
10480 /* Parse a .arch directive. */
10481
10482 static void
10483 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10484 {
10485 const struct aarch64_arch_option_table *opt;
10486 char saved_char;
10487 char *name;
10488 char *ext;
10489 size_t optlen;
10490
10491 name = input_line_pointer;
10492 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10493 saved_char = *input_line_pointer;
10494 *input_line_pointer = 0;
10495
10496 ext = strchr (name, '+');
10497
10498 if (ext != NULL)
10499 optlen = ext - name;
10500 else
10501 optlen = strlen (name);
10502
10503 /* Skip the first "all" entry. */
10504 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10505 if (strlen (opt->name) == optlen
10506 && strncmp (name, opt->name, optlen) == 0)
10507 {
10508 mcpu_cpu_opt = &opt->value;
10509 if (ext != NULL)
10510 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10511 return;
10512
10513 cpu_variant = *mcpu_cpu_opt;
10514
10515 *input_line_pointer = saved_char;
10516 demand_empty_rest_of_line ();
10517 return;
10518 }
10519
10520 as_bad (_("unknown architecture `%s'\n"), name);
10521 *input_line_pointer = saved_char;
10522 ignore_rest_of_line ();
10523 }
10524
10525 /* Parse a .arch_extension directive. */
10526
10527 static void
10528 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10529 {
10530 char saved_char;
10531 char *ext = input_line_pointer;
10532
10533 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10534 saved_char = *input_line_pointer;
10535 *input_line_pointer = 0;
10536
10537 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10538 return;
10539
10540 cpu_variant = *mcpu_cpu_opt;
10541
10542 *input_line_pointer = saved_char;
10543 demand_empty_rest_of_line ();
10544 }
10545
10546 /* Copy symbol information. */
10547
10548 void
10549 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10550 {
10551 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10552 }
10553
10554 #ifdef OBJ_ELF
10555 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10556 This is needed so AArch64 specific st_other values can be independently
10557 specified for an IFUNC resolver (that is called by the dynamic linker)
10558 and the symbol it resolves (aliased to the resolver). In particular,
10559 if a function symbol has special st_other value set via directives,
10560 then attaching an IFUNC resolver to that symbol should not override
10561 the st_other setting. Requiring the directive on the IFUNC resolver
10562 symbol would be unexpected and problematic in C code, where the two
10563 symbols appear as two independent function declarations. */
10564
10565 void
10566 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10567 {
10568 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10569 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10570 /* If size is unset, copy size from src. Because we don't track whether
10571 .size has been used, we can't differentiate .size dest, 0 from the case
10572 where dest's size is unset. */
10573 if (!destelf->size && S_GET_SIZE (dest) == 0)
10574 {
10575 if (srcelf->size)
10576 {
10577 destelf->size = XNEW (expressionS);
10578 *destelf->size = *srcelf->size;
10579 }
10580 S_SET_SIZE (dest, S_GET_SIZE (src));
10581 }
10582 }
10583 #endif